text
stringlengths
29
850k
# -*- coding: utf-8 -*- import dateutil.parser import datetime import os import logging import sys from dateutil.tz import tzutc from github_client import GithubClient root = logging.getLogger() root.setLevel(logging.DEBUG) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(name)-25s: %(filename)s:%(lineno)-3d: %(message)s') ch.setFormatter(formatter) root.addHandler(ch) logger = logging.getLogger('github_reviews') NEVER = datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=tzutc()) TOKEN = None if 'TOKEN' in os.environ: TOKEN = os.environ['TOKEN'] else: print("Auth token not found, " "please create a new token at Settings - 'Personal access tokens' " "and set TOKEN env var") def get_prs(client, user): logger.debug("get_prs for user {}".format(user)) raw_prs = client.get_involved_pull_requests(user) # Sort PRs by date - most likely the newest were not reviewed sorted_prs = sorted(raw_prs, key=lambda x: dateutil.parser.parse(x['updated_at']), reverse=True) pr_links = [x['html_url'] for x in sorted_prs] logger.debug("pr_links: {}".format(pr_links)) for pr_link in pr_links: owner, repo, number = GithubClient.get_pr_info_from_link(pr_link) logger.debug("pr_links {}, owner {}, repo {}, number {}".format( pr_link, owner, repo, number)) pr_reviews_raw = client.get_pr_reviews(owner, repo, number) yield (pr_link, owner, repo, number, pr_reviews_raw) def get_pr_reviews(pr_reviews_raw): logger.debug("get_pr_reviews") review_results = {} pr_reviews_sorted = sorted(pr_reviews_raw, key=lambda x: dateutil.parser.parse(x['submitted_at'])) for pr_review in pr_reviews_sorted: user = pr_review['user']['login'] logger.debug("pr for user {} with state {}".format( user, pr_review['state'])) # Don't replace approved/changes_required with 'commented' # Github API quirk probably existing_review = review_results.get(user, {}).get('state') logger.debug("pr state {}".format(existing_review)) if existing_review in ['APPROVED', 'CHANGES_REQUESTED'] and \ pr_review['state'] == 'COMMENTED': continue review_results[user] = { 'state': pr_review['state'], 'date': dateutil.parser.parse(pr_review['submitted_at']) } logger.debug(review_results) return review_results def get_pr_review_requests(client, owner, repo, number): requests_raw = client.get_pr_review_requests(owner, repo, number) return [x['login'] for x in requests_raw] def get_pr_comments(client, owner, repo, number): comments = [] comments_raw = client.get_pr_comments(owner, repo, number) for comment in comments_raw: comments.append({ 'user': comment['user']['login'], 'text': comment['body'], 'date': dateutil.parser.parse(comment['created_at']) }) return comments def get_pr_commits(client, owner, repo, number): commits = [] commits_raw = client.get_pr_commits(owner, repo, number) for commit in commits_raw: commits.append({ 'hash': commit['sha'][:8], 'message': commit['commit']['message'].split('\n')[0], 'user': commit['commit']['author']['name'], 'user_email': commit['commit']['author']['email'], 'date': dateutil.parser.parse(commit['commit']['author']['date']) }) return commits def prepare_report(user): client = GithubClient(token=TOKEN) return (client, get_prs(client, user)) def make_report(user, client, prs_with_reviews): total_prs = None for i, pr_data in enumerate(prs_with_reviews): if not total_prs: total_prs = client.total_count progress = int(((i+1) / total_prs) * 100) yield {'progress': progress} pr_link, owner, repo, number, pr_reviews_raw = pr_data logger.debug("PR {}".format(pr_link)) pr_info_raw = client.get_pr(owner, repo, number) review_requested_from_users = get_pr_review_requests(client, owner, repo, number) review_results = get_pr_reviews(pr_reviews_raw) comments = get_pr_comments(client, owner, repo, number) commits = get_pr_commits(client, owner, repo, number) report_entry = { 'pr_link': pr_link, 'owner': owner, 'repo': repo, 'pr_number': number, 'pr_title': pr_info_raw['title'], 'pr_owner': pr_info_raw['user']['login'], 'pr_reviews': {}, 'new_comments': [], 'new_commits': [] } # If user was explicitely requested to review it - show it user_was_requested_to_review = user in review_requested_from_users # Print others review state for pr_reviewer in review_results: pr_review_result = review_results[pr_reviewer]['state'] report_entry['pr_reviews'][pr_reviewer] = pr_review_result # Add requests from other users unless there is a review set by them there already for pr_review_request in review_requested_from_users: if pr_review_request not in report_entry['pr_reviews'].keys(): report_entry['pr_reviews'][pr_review_request] = 'REVIEW_REQUESTED' last_user_review_date = review_results.get(user, {}).get('date') or NEVER # Find last user comment user_comments = filter(lambda x: x['user'] == user, comments) sorted_user_comments = sorted(user_comments, key=lambda x: x['date']) last_user_comment_date = sorted_user_comments[-1]['date'] if sorted_user_comments else NEVER logger.debug("last_user_comment_date {}".format(last_user_comment_date)) # Get user email so we could filter out new commits by this user user_info_raw = client.get_user_info(user) user_email = user_info_raw['email'] user_commits = filter(lambda x: x['user_email'] == user_email, commits) sorted_user_commits = sorted(user_commits, key=lambda x: x['date']) last_user_commit_date = sorted_user_commits[-1]['date'] if sorted_user_commits else NEVER logger.debug("last_user_commit_date {}".format(last_user_commit_date)) # If last activity date cannot be found the PR should be skipped if not user_was_requested_to_review and \ last_user_comment_date == NEVER and \ last_user_review_date == NEVER and \ last_user_commit_date == NEVER: continue last_user_activity = max([ last_user_comment_date, last_user_review_date, last_user_commit_date ]) logger.debug("last_user_activity {}".format(last_user_activity)) # Collect new comments since last user activity new_comments = [x for x in comments if x['date'] > last_user_activity] for comment in new_comments: report_entry['new_comments'].append({ 'date': comment['date'], 'user': comment['user'], 'text': comment['text'] }) logger.debug("new_comments {}".format(new_comments)) # Collect new commits since last activity new_commits = [x for x in commits if x['date'] > last_user_activity] for commit in new_commits: report_entry['new_commits'].append({ 'hash': commit['hash'], 'user': commit['user'], 'message': commit['message'], 'date': commit['date'] }) logger.debug("new_commits {}".format(new_commits)) # Skip PR if no new comments/commits available if user_was_requested_to_review or \ report_entry['new_comments'] or \ report_entry['new_commits']: yield report_entry
How Much Does Home Security Cost in Clinton, NC? There are a variety of home security companies in Clinton, NC, each with their own pricing structure, so you can be sure to find one that fits your requirements and your budget. Pricing depends on the type of installation (professional or DIY), ownership of equipment, and monitoring. Installation charges can be free (when you install yourself), up to about $200. When you sign up for monitoring, some companies give you a basic system free of charge, eliminating equipment costs. A basic system, which you own after purchasing, tends to run about $230, though it may vary in price from company to company and based on your specific needs. Monitoring fees also vary, ranging in price from $29.99 a month to $200 a month. Wireless home security systems come with the added advantage of never needing to drill holes to run cables. The individual pieces of security equipment communicate wirelessly, similar to tablets and smartphones. This allows you to get your system set up quickly and easily. Wireless technology also makes it simple to move and reconfigure components whenever necessary. Communications from your wireless home security equipment to the monitoring center are handled via cellular transmissions, providing you with the ultimate protection. You won’t have to worry about burglars disabling your system from the outside of your home by cutting phone or cable wires. That’s a benefit you only get with wireless technology. It’s never been easier to keep an eye on everything at home. Many homeowners in Clinton, NC use security cameras to be able to check in from anywhere. For example, motion-activated and night vision cameras give you two different ways to monitor what’s going on in and around your home. Remote access to your home security system allows you to receive updates via text or email, and you can even view real-time footage from your mobile device or computer. Whatever your needs, you can feel confident knowing you can add security cameras to your home security system. How many times have you left the house and wondered if you’d locked the doors or armed your alarm system? By using your smartphone, you’ll never have to worry again. You can check on both thanks to smart home automation in Clinton, NC. Smart home technology places you in full control of the electronics in your home. You can control anything you connect wirelessly to your central control hub, such as lights, your furnace or a/c, and of course your alarm system. You’ll be able to set up text or email alerts to track anything in your home from package deliveries to kids returning from the mall.
#!/usr/bin/env python3 import sys import os import pathlib import shutil def copyFile( src, dst_dir ): dst = dst_dir / src.name shutil.copy( str( src ), str( dst ) ) if len(sys.argv) < 3: print( 'Usage: %s <version> <kits-folder> [--test] [--install]' % (sys.argv[0],) ) print( ' %s 0.9.3 /shared/Downloads/ScmWorkbench/0.9.3' % (sys.argv[0],) ) version = sys.argv[1] built_kits_dir = pathlib.Path( sys.argv[2] ) testing = '--test' in sys.argv[3:] install = '--install' in sys.argv[3:] # source paths builder_top_dir = pathlib.Path( os.environ['BUILDER_TOP_DIR'] ) src_dir = builder_top_dir / 'Source' docs_dir = builder_top_dir / 'Docs' web_site_dir = builder_top_dir / 'WebSite' root_dir = web_site_dir / 'root' docs_files_dir = docs_dir / 'scm-workbench_files' # output paths output_dir = pathlib.Path( 'tmp' ) output_kits_dir = output_dir / 'kits' output_user_guide_dir = output_dir / 'user-guide' output_user_guide_files_dir = output_dir / 'user-guide' / 'scm-workbench_files' shutil.rmtree( str( output_dir ) ) output_dir.mkdir( parents=True, exist_ok=True ) output_kits_dir.mkdir( parents=True, exist_ok=True ) for src in root_dir.glob( '*.html' ): copyFile( src, output_dir ) # use the user guide's CSS copyFile( docs_dir / 'scm-workbench.css', output_dir ) rc = os.system( '"%s/build-docs.py" "%s"' % (docs_dir, output_user_guide_dir) ) if rc != 0: print( 'build docs failed' ) sys.exit( 1 ) # copy doc images output_user_guide_files_dir.mkdir( parents=True, exist_ok=True ) for src in docs_files_dir.glob( '*.png' ): copyFile( src, output_user_guide_files_dir ) kit_values = { 'VERSION': version, } for kit_fmt in ('SCM-Workbench-%(VERSION)s-setup.exe', 'SCM-Workbench-%(VERSION)s.dmg'): copyFile( built_kits_dir / (kit_fmt % kit_values), output_kits_dir ) with open( str( output_dir / 'index.html' ), encoding='utf-8' ) as f: index = f.read() with open( str( output_dir / 'index.html' ), 'w', encoding='utf-8' ) as f: f.write( index % kit_values ) if testing: index = output_dir / 'index.html' if sys.platform == 'win32': import ctypes SW_SHOWNORMAL = 1 ShellExecuteW = ctypes.windll.shell32.ShellExecuteW rc = ShellExecuteW( None, 'open', str(index), None, None, SW_SHOWNORMAL ) elif sys.platform == 'darwin': cmd = '/usr/bin/open' os.spawnvp( os.P_NOWAIT, cmd, [cmd, str(index)] ) else: cmd = '/usr/bin/xdg-open' os.spawnvp( os.P_NOWAIT, cmd, [cmd, str(index)] ) print( 'Web Site created in %s for version %s' % (output_dir, version) ) if install: web_root = '/var/www/scm-workbench.barrys-emacs.org' os.system( 'ssh root@qrm.org.uk mkdir -p %s/kits' % (web_root,) ) os.system( 'ssh root@qrm.org.uk mkdir -p %s/user-guide/scm-workbench_files' % (web_root,) ) os.system( 'scp tmp/index.html tmp/scm-workbench.css root@qrm.org.uk:%s/' % (web_root,) ) os.system( 'scp tmp/kits/* root@qrm.org.uk:%s/kits/' % (web_root,) ) os.system( 'scp tmp/user-guide/* root@qrm.org.uk:%s/user-guide/' % (web_root,) ) os.system( 'scp tmp/user-guide/scm-workbench_files/* root@qrm.org.uk:%s/user-guide/scm-workbench_files' % (web_root,) ) os.system( 'ssh root@qrm.org.uk chmod -R -v a+r %s' % (web_root,) ) sys.exit( 0 )
Join us for brunch on Easter Sunday, April 21, between our 9am and 11 am services. To subscribe to updates, scroll to bottom of page. Join us as we pray for members and community. Pastor Clint is hosting a brown bag lunch Bible study in the Fellowship Hall about the Bible passage for the coming Sunday. Please come and be a part of it!
import os import json import socket class Settings(object): protocol = "http" host = "0.0.0.0" #socket.gethostbyname(socket.gethostname()) port = "1345" user_settings_directory = "~/.MediaServer" settings_file = "settings" library_file = "library" file_paths = [] library = {} settings = {} icon_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), "..", "icon.png")) def __init__(self): if not os.path.exists(self.icon_path): self.icon_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), "../..", "icon.png")) if not os.path.exists(self.icon_path): self.icon_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), "icon.png")) self.create_user_dirs() self.get_media_library() self.get_config_from_files() def get_server_address(self): return self.protocol + "://" + self.host + ":" + self.port def create_user_dirs(self): if not os.path.exists(self.get_settings_directory()): os.makedirs(self.get_settings_directory()) def get_settings_directory(self): return os.path.abspath(os.path.expanduser(self.user_settings_directory)) def get_settings_file_path(self): return os.path.abspath(os.path.expanduser(os.path.join(self.user_settings_directory, self.settings_file))) def get_library_file_path(self): return os.path.abspath(os.path.expanduser(os.path.join(self.user_settings_directory, self.library_file))) def get_config_from_files(self): try: f = open(self.get_settings_file_path(), 'r') self.settings = json.loads(f.read()) f.close() except Exception as e: print e self.settings = {"file_paths": [os.path.expanduser("~")]} f = open(self.get_settings_file_path(), 'w') f.write(json.dumps(self.settings)) f.close() self.file_paths = self.settings["file_paths"] def get_placeholder_image(self): placeholder = os.path.abspath(os.path.join(os.path.dirname( __file__ ), "..", "static", "images", "lorempixel.jpg")) if not os.path.exists(placeholder): placeholder = os.path.abspath(os.path.join(os.path.dirname( __file__ ), "../..", "static", "images", "lorempixel.jpg")) if not os.path.exists(placeholder): placeholder = os.path.abspath(os.path.join(os.path.dirname( __file__ ), "static", "images", "lorempixel.jpg")) return placeholder def write_config_settings(self): try: print self.settings["file_paths"] self.settings = {"file_paths": self.settings["file_paths"]} f = open(self.get_settings_file_path(), 'w') f.write(json.dumps(self.settings)) f.close() except Exception as e: print e def get_media_library(self): try: f = open(self.get_library_file_path(), 'r') self.library = json.loads(f.read()) f.close() except Exception as e: print e self.library = {} f = open(self.get_library_file_path(), 'w') f.write(json.dumps(self.library)) f.close() def write_library(self, library, index): print "Writing to: " + self.get_library_file_path() print index self.library[index] = {"library": library} f = open(self.get_library_file_path(), 'w') f.write(json.dumps(self.library)) f.close()
"Dr. Carri's approach to overall health benefited my family and I not only with adjustments. Her many resources helped diagnose my daughter's spine curvature when her school exam missed it. My many adjustments have released tension in my upper neck and back which allowed me to sleep better and be rested. Her services over the past 10 years provided improvement in my overall health!" 2) Dr. Carri has worked with my wife & I who both have chronic back pain/injuries and has helped keep us almost pain free & always gives us tips on healthy lifestyle habits. She has helped our 3 kids maintain healthy spines & bodies also. I trust her more than other doctors we have used as they seem to want to treat with surgery first. We Love Dr. Carri" "Dr. Carri totally got my lower back in shape. My regular visits to her help to keep me feeling great. Her natural healthy healing approach is so beneficial to mind and body. I feel like Dr. Carri is a member of our family! Not only does she treat several family members, we see her around town and at church. We truly enjoy all the benefits of her healing touch, supplements, and wisdom. Thank you!" "Due to my Rheumatoid Arthritis, I have seen many chiropractors trying to get some relief from the pain. Dr. Carri is by far the best. I have also received care from her through a couple of pregnancies. I wish I had known about her with my four earlier pregnancies! She is a gift to the community and is highly recommended."
import urllib, sys import log def make_url(params): url = 'plugin://script.media.aggregator/?' + urllib.urlencode(params) return url def get_params(): if len(sys.argv) < 3: return None param = dict() paramstring = sys.argv[2] if len(paramstring) >= 2: params = sys.argv[2] cleanedparams = params.replace('?', '') if (params[len(params) - 1] == '/'): params = params[0:len(params) - 2] pairsofparams = cleanedparams.split('&') param = {} for i in range(len(pairsofparams)): splitparams = {} splitparams = pairsofparams[i].split('=') if (len(splitparams)) == 2: param[splitparams[0]] = splitparams[1] # debug(param) return param def ScanMonitor(): import xbmc class _ScanMonitor(xbmc.Monitor): def __init__(self): log.debug('ScanMonitor - __init__') xbmc.Monitor.__init__(self) self.do_exit = False self.do_start = xbmc.getCondVisibility('Library.IsScanningVideo') def onScanStarted(self, library): log.debug('ScanMonitor - onScanFinished') if library == 'video': self.do_start = True def onScanFinished(self, library): log.debug('ScanMonitor - onScanFinished') if library == 'video': self.do_exit = True return _ScanMonitor() def wait_for_update(timeout=1000, monitor=None): try: import xbmc log.debug('wait_for_update') count = timeout if not monitor: monitor = ScanMonitor() if not monitor.do_start: log.debug('wait_for_update: no scan now') del monitor return while not monitor.abortRequested() and count: for i in xrange(10): if monitor.waitForAbort(0.1) or monitor.do_exit: log.debug('wait_for_update - Stop scan detected') del monitor return if count % 10 == 1: if not xbmc.getCondVisibility('Library.IsScanningVideo'): log.debug('wait_for_update - Library.IsScanningVideo is False') break count -= 1 log.debug('wait_for_update - Library Scanning Video - wait ({}s)'.format(timeout-count)) del monitor except BaseException: log.print_tb() import time time.sleep(1) def UpdateVideoLibrary(path=None, wait=False): import xbmc, log if path: if isinstance(path,unicode): path = path.encode('utf-8') log.debug('UpdateLibrary: {}'.format(path)) command = 'UpdateLibrary(video, {})'.format(path) else: command = 'UpdateLibrary(video)' if wait: monitor = ScanMonitor() while not monitor.abortRequested(): if not monitor.do_start or monitor.do_exit: break xbmc.sleep(100) monitor.do_start = False xbmc.executebuiltin(command, wait) if wait: log.debug('UpdateLibrary: wait for start') while not monitor.abortRequested(): if monitor.do_start: break xbmc.sleep(100) wait_for_update(monitor=monitor) def string_to_ver(s): import re m = re.search(r'(\d+)\.(\d+)', s) if m: return ( m.group(1), m.group(2) ) def kodi_ver(): import xbmc bv = xbmc.getInfoLabel("System.BuildVersion") BuildVersions = string_to_ver(bv) # import log # log.debug(BuildVersions) res = {} res['major'] = int(BuildVersions[0]) res['minor'] = int(BuildVersions[1]) return res def RunPlugin(**kwargs): import xbmc url = make_url(kwargs) xbmc.executebuiltin('RunPlugin("%s")' % url) def RunPluginSync(**kwargs): import xbmc url = make_url(kwargs) xbmc.executebuiltin('RunPlugin("%s")' % url, wait=True) if __name__ == "__main__": r = string_to_ver('18.0 Git:20190128-d81c34c465')
These airlines usually offer the cheapest flights from Leeds Bradford to Jakarta. These airlines fly from Leeds Bradford to Jakarta, but you’ll have at least one layover. This is the shortest flight time from Leeds Bradford to Jakarta. Today alone, Netflights has searched over 38 flights. 19 / Aug / 2019 is currently the lowest priced day of the year to fly the Leeds Bradford/Jakarta route. There are around 32 flights on any given week from the UK to Jakarta and the cheapest month to fly right now is August. You’ve searched for flights from Leeds Bradford to Jakarta. 32 flights go via Jakarta every week, and we've searched them all to make it easy for you to compare your options. On average, Qatar Airways has the cheapest flights, so look out for them in your search results. When you fly 11762 km from Leeds Bradford to Jakarta, you can choose from three airlines. Flights are most expensive during high season, and you can expect accommodation prices to be higher then too. Last-minute deals do crop up, but it's best to book in advance to avoid disappointment. Qatar Airways, British Airways and KLM are the most popular airlines for flying from Leeds Bradford to Jakarta, so look out for their deals and offers. To find the best price, try searching by month. Or, if you want to bag a last-minute offer, keep an eye on Netflights to monitor the prices. Flights from Leeds Bradford to Jakarta normally take 20 hrs 50 mins. Make sure you check what refreshments or meals are included, so you can plan your journey. Before you book, why not check out our hotel and car hire options? We have some hotels close to the airport – handy if you're arriving late at night, or flying back very early.
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Deleting model 'Task' db.delete_table(u'porkweb_task') # Adding model 'JobTask' db.create_table(u'porkweb_jobtask', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('job', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['porkweb.Job'])), ('taskid', self.gf('django.db.models.fields.CharField')(max_length=36)), ('taskstatus', self.gf('django.db.models.fields.CharField')(default='New', max_length=16)), ('taskresults', self.gf('django.db.models.fields.TextField')(blank=True)), )) db.send_create_signal(u'porkweb', ['JobTask']) def backwards(self, orm): # Adding model 'Task' db.create_table(u'porkweb_task', ( ('taskresults', self.gf('django.db.models.fields.TextField')(blank=True)), ('taskstatus', self.gf('django.db.models.fields.CharField')(default='New', max_length=16)), ('job', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['porkweb.Job'])), ('taskid', self.gf('django.db.models.fields.CharField')(max_length=36)), (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), )) db.send_create_signal(u'porkweb', ['Task']) # Deleting model 'JobTask' db.delete_table(u'porkweb_jobtask') models = { u'porkweb.attackparam': { 'Meta': {'object_name': 'AttackParam', '_ormbases': [u'porkweb.Param']}, 'attack': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'params'", 'to': u"orm['porkweb.AttackType']"}), u'param_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['porkweb.Param']", 'unique': 'True', 'primary_key': 'True'}) }, u'porkweb.attacktype': { 'Meta': {'object_name': 'AttackType'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}) }, u'porkweb.cracked': { 'Meta': {'object_name': 'Cracked'}, 'hash': ('django.db.models.fields.CharField', [], {'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'job': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['porkweb.Job']"}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'when': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'porkweb.hashtype': { 'Meta': {'object_name': 'HashType'}, 'hashcat': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'hashcatType': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'ocllite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'oclplus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, u'porkweb.job': { 'Meta': {'object_name': 'Job'}, 'attackType': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['porkweb.AttackType']"}), 'eta': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'hashType': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['porkweb.HashType']"}), 'hashes': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'jobServer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['porkweb.JobServer']", 'null': 'True', 'blank': 'True'}), 'progress': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'results': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'speed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'New'", 'max_length': '16'}) }, u'porkweb.jobparam': { 'Meta': {'object_name': 'JobParam', '_ormbases': [u'porkweb.Param']}, 'job': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'params'", 'to': u"orm['porkweb.Job']"}), u'param_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['porkweb.Param']", 'unique': 'True', 'primary_key': 'True'}) }, u'porkweb.jobserver': { 'Meta': {'object_name': 'JobServer'}, 'details': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ipaddr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}), 'os': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'port': ('django.db.models.fields.IntegerField', [], {'default': '8117'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'Offline'", 'max_length': '16'}) }, u'porkweb.jobtask': { 'Meta': {'object_name': 'JobTask'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'job': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['porkweb.Job']"}), 'taskid': ('django.db.models.fields.CharField', [], {'max_length': '36'}), 'taskresults': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'taskstatus': ('django.db.models.fields.CharField', [], {'default': "'New'", 'max_length': '16'}) }, u'porkweb.log': { 'Meta': {'object_name': 'Log'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'line': ('django.db.models.fields.TextField', [], {}), 'when': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'porkweb.param': { 'Meta': {'object_name': 'Param'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '64'}) } } complete_apps = ['porkweb']
Edit: Sorry guys. Some stuff came up, and I’m not feeling too hot. WaM’ll be up late Friday night.
############################################### ## Local Outlier Factor (LOF) Implementation ## ############################################### ### Import Python Libraries ### import pandas as pd from pandas import DataFrame from numpy import array, matrix ### Import R Libraries ### import rpy2.robjects as R from rpy2.robjects.packages import importr from rpy2.robjects import pandas2ri pandas2ri.activate() base = importr("base") utils = importr("utils") odtpackage = importr("dbscan") ###################### ## Global LOF Class ## ###################### class DBSCAN(object): ### DBSCAN Class Constructor ### def __init__(self, xdf, epsilon, minPts): self.xdf = xdf self.epsilon = epsilon self.minPts = minPts self.label = [] self.cluster = [] ### [TODO:] Implement Nromalization functionality ### def normalizeData(self): pass ### DBSCAN clustering estimation Function ### def DBSCAN(self, xdf): if len(xdf) > 100000: print "Warning! DBSCAN might fail for large dataset." rdf = pandas2ri.py2ri(xdf) return odtpackage.dbscan(base.as_matrix(rdf), self.epsilon, self.minPts) ### DBSCAN Execution Function ### def getOutlier(self): cls = self.DBSCAN(self.xdf) print cls for i in array(cls.rx2('cluster')): self.cluster.append(i) if i == 0: self.label.append('outlier') else: self.label.append('normal') return DataFrame({'Cluster': self.cluster, 'Label': self.label}) if __name__ == "__main__": url = '/Users/warchief/Documents/Projects/DataRepository/AnomalyDetection/test.csv' df = DataFrame.from_csv(path=url, header=0, sep=',', index_col=False) X = df['SL_RRC_CONN_AVG_PER_CELL'].values Y = df['I_DL_DRB_CELL_TPUT_MBPS'].values d = {'x': X, 'y': Y} pdf = DataFrame(data=d) db = DBSCAN(pdf, 0.5, 50) print db.getOutlier()
Hopkinson House is on the south side of Washington Square between Walnut, Spruce, 6th, and 7th Streets. In the front driveway, 10-minute parking is allowed for pick-up and drop-off. On the street directly in front of the building, 20-minute parking is available. Nearby streets are zoned for metered (pay at kiosks) or free parking. Underground valet parking is available in the Hopkinson House garage via the 6th Street entry/exit ramp. For information about public transportation and driving instructions options, click on other helpful links. On I-95 N, follow the signs for Exit 22 Central Phila./I-676 W (left lanes). Bear right and merge onto Callowhill Street. Turn left at 6th Street toward Independence Hall and proceed to Walnut Street. At the next stop sign, turn right onto S Washington Square. Hopkinson House is on the left. On I-76 E (PA Turnpike), take Exit 24 Valley Forge. After the tolls, proceed on I-76 E (Schuylkill Expressway). Merge left onto I-676 E/US 30 E (Vine Street Expressway). Follow the signs to the exit for the Ben Franklin Bridge/I-676/NJ-30. At the traffic light at the bottom of the ramp, turn right onto 6th Street. Continue to Walnut Street and at the next stop sign, turn right onto S Washington Square. Hopkinson House is on the left. On I-95 S, take Exit 22, Historic Area/Independence Hall. At the bottom of the ramp, bear right onto Callowhill Street. Turn left at S 6th Street toward Independence Hall and proceed to Walnut Street. At the next stop sign, turn right onto S Washington Square. Hopkinson House is on the left. On the NJ Turnpike S, take Exit 4 to Philadelphia/Camden. After the tolls, bear right onto NJ-73 N. Take the NJ-38 W exit toward NJ-41/Ben Franklin Bridge/Haddonfield. Follow the signs to US-30 W /Camden/Ben Franklin Bridge, taking a slight right. Take US-30 W across the bridge, exit at 5th Street, and keep right at the fork onto N Marginal Road. Turn right onto N 4th Street and right again at Walnut Street. At 6th Street, turn left. At the next stop sign, turn right onto S Washington Square. Hopkinson House is on the left.
'''Import tasks for NED-D, the galactic distances catalog. ''' import csv import os from collections import OrderedDict from html import unescape from astrocats.catalog.utils import (get_sig_digits, is_number, pbar, pretty_num, uniq_cdl) from astropy import units as un from astropy.cosmology import Planck15 as cosmo from astropy.cosmology import z_at_value from decimal import Decimal from ..supernova import SUPERNOVA from ..utils import host_clean, name_clean def do_nedd(catalog): task_str = catalog.get_current_task_str() nedd_path = os.path.join( catalog.get_current_task_repo(), 'NED26.10.1-D-13.1.0-20160930.csv') f = open(nedd_path, 'r') data = sorted(list(csv.reader(f, delimiter=',', quotechar='"'))[ 13:], key=lambda x: (x[9], x[3])) reference = "NED-D v" + nedd_path.split('-')[-2] refurl = "http://ned.ipac.caltech.edu/Library/Distances/" nedbib = "1991ASSL..171...89H" olddistname = '' loopcnt = 0 for r, row in enumerate(pbar(data, task_str)): if r <= 12: continue distname = row[3] name = name_clean(distname) # distmod = row[4] # moderr = row[5] dist = row[6] bibcode = unescape(row[8]) snname = name_clean(row[9]) redshift = row[10] cleanhost = '' if name != snname and (name + ' HOST' != snname): cleanhost = host_clean(distname) if cleanhost.endswith(' HOST') or cleanhost.startswith('SN'): cleanhost = '' if not is_number(dist): print(dist) if dist and cleanhost: catalog.nedd_dict.setdefault( cleanhost, []).append(Decimal(dist)) if snname and 'HOST' not in snname: snname, secondarysource = catalog.new_entry( snname, srcname=reference, bibcode=nedbib, url=refurl, secondary=True) if bibcode: source = catalog.entries[snname].add_source(bibcode=bibcode) sources = uniq_cdl([source, secondarysource]) else: sources = secondarysource if name == snname: if redshift: catalog.entries[snname].add_quantity( SUPERNOVA.REDSHIFT, redshift, sources) if dist: catalog.entries[snname].add_quantity( SUPERNOVA.COMOVING_DIST, dist, sources) if not redshift: try: zatval = z_at_value(cosmo.comoving_distance, float(dist) * un.Mpc, zmax=5.0) sigd = get_sig_digits(str(dist)) redshift = pretty_num(zatval, sig=sigd) except (KeyboardInterrupt, SystemExit): raise except Exception: pass else: cosmosource = catalog.entries[name].add_source( bibcode='2016A&A...594A..13P') combsources = uniq_cdl(sources.split(',') + [cosmosource]) catalog.entries[snname].add_quantity( SUPERNOVA.REDSHIFT, redshift, combsources, derived=True) if cleanhost: catalog.entries[snname].add_quantity( SUPERNOVA.HOST, cleanhost, sources) if catalog.args.update and olddistname != distname: catalog.journal_entries() olddistname = distname loopcnt = loopcnt + 1 if catalog.args.travis and loopcnt % catalog.TRAVIS_QUERY_LIMIT == 0: break catalog.journal_entries() f.close() return
A great giveaway for sports fanatics everywhere. The arm rests are fitted with built in cup holders. This portable chair is made of polyester and PVC and comes with matching carrying case with shoulder strap. A generous imprint area is offered on the back of the chair for you to add your company name and logo.
import re from ffc.backends.ufc import * # TODO: Make cell_orientation a double +1.0|-1.0 instead of the int flag in ffc/ufc/dolfin # TODO: Simplify ufc templates by introducing 'preamble' keyword in place of members, constructor, destructor domain_background = """ /// This is just here to document the memory layout of the geometry data arrays struct geometry_data { // Example dimensions std::size_t gdim = 3; std::size_t tdim = 2; std::size_t num_points = 1; // Memory layout of geometry data arrays double x[num_points * gdim]; // x[i] -> x[ip*gdim + i] double X[num_points * tdim]; // X[j] -> X[ip*tdim + j] double J[num_points * gdim * tdim]; // J[i,j] -> J[ip*gdim*tdim + i*tdim + j] double detJ[num_points]; // detJ -> detJ[ip] double K[num_points * tdim * gdim]; // K[j,i] -> K[ip*tdim*gdim + j*gdim + i] double n[num_points * gdim]; // n[i] -> n[ip*gdim + i] // In the affine case we have the relation: // x[i] = x0[i] + sum_j J[i,j] X[j] // X[j] = sum_i K[j,i] (x[i] - x0[i]) }; """ def extract_keywords(template): r = re.compile(r"%\(([a-zA-Z0-9_]*)\)") return set(r.findall(template))
Our spring Pull Clinic is on for April 21! Whether you are looking to take up skijoring, sledding, dryland carting or just want some quality time with your dog, this is the perfect opportunity to learn how to harness your dog's natural pulling talents! Learn the basics of direction commands, as well as safe ways to train your best friend to be the athlete he or she really is! For more information and an entry form and, click here!
#! /usr/bin/env python # Copyright (c) 2016 Gemini Lasswell # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. """ chatbot_reply.script, defines decorators and superclass for chatbot scripts """ from __future__ import unicode_literals import collections from functools import wraps import random import re from chatbot_reply.six import with_metaclass from chatbot_reply.constants import _HISTORY, _PREFIX def rule(pattern_text, previous_reply="", weight=1): """ decorator for rules in subclasses of Script """ def rule_decorator(func): @wraps(func) def func_wrapper(self, pattern=pattern_text, previous_reply=previous_reply, weight=weight): result = func(self) try: return self.process_reply(self.choose(result)) except Exception as e: name = (func.__module__[len(_PREFIX):] + "." + self.__class__.__name__ + "." + func.__name__) msg = (" in @rule while processing return value " "from {0}".format(name)) e.args = (e.args[0] + msg,) + e.args[1:] raise return func_wrapper return rule_decorator class ScriptRegistrar(type): """ Metaclass of Script which keeps track of newly imported Script subclasses in a list. Public class attribute: registry - a list of classes Public class method: clear - empty the registry list """ registry = [] def __new__(cls, name, bases, attributes): new_cls = type.__new__(cls, name, bases, attributes) if new_cls.__module__ != cls.__module__: cls.registry.append(new_cls) return new_cls @classmethod def clear(cls): cls.registry = [] class Script(with_metaclass(ScriptRegistrar, object)): """Base class for Chatbot Engine Scripts Classes derived from this one can be loaded by the ChatbotEngine. Subclasses may define: topic - This must be a class attribute, not an instance variable. Contains a string that is inspected when the class is imported. All rules and substitution functions in a class are associated with a topic, and will only be used to process a user's message when the user's topic is equal to the class topic. Changing this value for a class after import will have no effect. If topic is set to None, the class will not be instantiated, so __init__, setup and setup_user will not be run. If you want to share a lot of rules between two Script subclasses with different topics, have them inherit them from a base class with its topic set to None. setup(self) - a method that may be used to define alternates (see below) and to initialize bot variables. It will be called after each instance of a script object is created, and may be called again if the engine wants to reset bot variables. setup_user(self, user) - a method that is called the first time the engine is processing a message from a given user. This is a good place to initialize user variables used by a script. alternates - a dictionary of patterns. Key names must be alphanumeric and may not begin with an underscore or number. The patterns must be simple in that they can't contain references to variables or wildcards or the memorization character _. When the patterns for the rules are imported the alternates will be substituted in at import time, as opposed to user and bot variables, which are substituted in every time a rule is matched with a new message. If you have 20,000 rules, this might make a performance difference, but if you have 20, it won't. Changing self.alternates after import will have no effect on pattern matching. substitute(self, text, list_of_lists) - Any method name defined by a subclass that begins with substitute will be called with the raw text of every message (within its topic) and a list of list of words that have been split on whitespace. It must return a list of lists of words where the outer list is the same length. Use this to do things like expand contractions, interpret ascii smileys such as >:| and otherwise mess with the tokenizations. If there is more than one substitute method for a topic, they will all be called in an unpredictable order, each passed the output of the one before. @rule(pattern, previous="", weight=1) rule(self) - Methods decorated by @rule and beginning with "rule" are the gears of the script engine. The engine will select one rule method that matches a message and call it. The @rule decorator will run the method's return value through first self.choose then self.process_reply. Child classes may redefine self.choose and self.process_reply if they would like different behavior. choose(self, retval) - A method that returns a string. The @rule decorator will call self.choose on the return values of all rules. process_reply(self, string) - A method that takes a string and returns a string. The @rule decorator will call this on the return value it gets from self.choose. Public instance variables that are meant to be used by child classes, but not modified (with the exception that it's ok to change, add and delete things in the variable dictionaries, just not the dictionary objects themselves): botvars - dictionary of variable names and values that are global for all users of the chatbot engine uservars - dictionary of variable names and values for the current user userinfo - UserInfo object containing info about the sender match - a Match object (see rules.py) representing the relationship between the matched user input (and previous reply, if applicable) and the rule's patterns Public instance variable, ok to change in child classes: current_topic - string giving current conversation topic, which will limit the rule search for the next message """ topic = "all" def __init__(self): self.botvars = None self.userinfo = None self.match = None @property def uservars(self): return self.userinfo.vars def ct_get(self): return self.userinfo.topic_name def ct_set(self, newtopic): self.userinfo.topic_name = newtopic current_topic = property(ct_get, ct_set) def setup(self): """ placeholder """ pass def setup_user(self, user): """ placeholder """ pass def choose(self, args): """ Select a response from a list of possible responses. For increased flexibility, since this is used to process all return values from all rules, this can also be passed None or an empty string or list, in which case it will return the empty string, or it may be passed a string, which it will simply return. If the argument is a list of strings, select one randomly and return it. If the argument is a list of tuples containing a string and an integer weight, select a string randomly with the probability of its selection being proportional to the weight. """ if args is None or not args: reply = "" else: reply = args if isinstance(args, list) and args: reply = random.choice(args) if isinstance(args[0], tuple): args = [(string, max(1, weight)) for string, weight in args] total = sum([weight for string, weight in args]) choice = random.randrange(total) for string, weight in args: if choice < abs(weight): reply = string break else: choice -= abs(weight) return reply def process_reply(self, string): """ Process a reply before returning it to the chatbot engine. The only thing this does is use built-in string formatting to substitute in the match results. """ return string.format(*[], **self.match) class UserInfo(object): """ A class for stashing per-user information. Public instance variables: vars: a dictionary of variable names and values info: a dictionary of information about the user topic_name: the name of the topic the user is currently in msg_history: a deque containing Targets for a few recent messages repl_history: a deque containing Targets for a few recent replies """ def __init__(self, info): self.vars = {} self.info = info self.topic_name = "all" self.msg_history = collections.deque(maxlen=_HISTORY) self.repl_history = collections.deque(maxlen=_HISTORY) # ----- a couple of useful utility functions for writers of substitute methods def split_on_whitespace(text): """ Because this has to work in Py 2.6, and re.split doesn't do UNICODE in 2.6. Return text broken into words by whitespace. """ matches = [m for m in re.finditer("[\S]+", text, flags=re.UNICODE)] results = [text[m.span()[0]:m.span()[1]] for m in matches] return results def kill_non_alphanumerics(text): """remove any non-alphanumeric characters from a string and return the result. re.sub doesn't do UNICODE in python 2.6. """ matches = [m for m in re.finditer("[\w]+", text, flags=re.UNICODE)] result = "".join([text[m.span()[0]:m.span()[1]] for m in matches]) return result
The purpose of the Account Manager is to meet and exceed sales objectives in the assigned territory by promoting and selling B2B (Business to Business) interstate logo signs and/or TODS (Tourist-oriented directional signing). An Account Manager is expected to use professional sales techniques, build relationships, and develop long term advertising relationships that grow Interstate Logos sales. o Meet and exceed sales targets by targeting every eligible business within the assigned territory/account list, and identify potential growth areas. o Responsible for tracking annual renewal numbers and making calls to obtain renewal contracts and payments. o Assist customers with the design of their business logo, drawing on experience to assist customers in developing a logo that is legible and utilizes colors that have proven the most effective for visibility. o Become proficient in the use of available computer tools and asset management system (database). o Continually develop product knowledge and acquire better selling skills. o Actively participate in location's sales meetings and territory seminars/trade shows when needed. o Assist in monitoring customer payments and collections. o Review site plans, as-builts (check to ensure correct location and specifications), and work orders. o Respond to and provide resolution for any questions or concerns by Program participants (customers). o Plan each day, week, and month before the month starts; plan each sales call. o Maintain organized, up-to-date records of clients and sales activity. o Ensure your automobile has a neat and professional appearance. o Maintain a professional sales appearance. o Work a minimum 40 hours, five days a week. o Travel overnight when necessary. o Cluster your field work geographically, and manage your time effectively. o Analyze and monitor personal sales data and reports. o Be prepared to submit daily planners/call reports, sales plans, and sales forecasts on a timely basis. o Follow-up on all client production orders, and ensure timely signs installations. o Communicate with clients and managers to resolve any customer issues or concerns in real time. o Check signs, participant eligibility, etc. on a regular basis. Ability to explain the advertising business to customers from installation and product standpoints. Skill in selling or promoting advertisements. PHYSICAL DEMANDS AND WORK ENVIRONMENT The primary work environment is an office. The physical demands for this position include light lifting (20 to 30 pounds), pushing, reaching, seeing (with focus on reading, color distinction, acuity, peripheral vision, and depth perception), some sitting, standing, stooping, walking, talking, and turning. Nights spent traveling are 25 to 50%. ADDITIONAL INFORMATION The first year earning potential for this position is around $40,000 / year.
# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. from __future__ import absolute_import import os import re import tempfile import flask import werkzeug.exceptions from .forms import GenericImageModelForm from .job import GenericImageModelJob from digits import frameworks from digits import utils from digits.config import config_value from digits.dataset import GenericImageDatasetJob from digits.inference import ImageInferenceJob from digits.status import Status from digits.utils import filesystem as fs from digits.utils.forms import fill_form_if_cloned, save_form_to_job from digits.utils.routing import request_wants_json, job_from_request from digits.webapp import app, scheduler blueprint = flask.Blueprint(__name__, __name__) @blueprint.route('/new', methods=['GET']) @utils.auth.requires_login def new(): """ Return a form for a new GenericImageModelJob """ form = GenericImageModelForm() form.dataset.choices = get_datasets() form.standard_networks.choices = [] form.previous_networks.choices = get_previous_networks() prev_network_snapshots = get_previous_network_snapshots() ## Is there a request to clone a job with ?clone=<job_id> fill_form_if_cloned(form) return flask.render_template('models/images/generic/new.html', form = form, frameworks = frameworks.get_frameworks(), previous_network_snapshots = prev_network_snapshots, previous_networks_fullinfo = get_previous_networks_fulldetails(), multi_gpu = config_value('caffe_root')['multi_gpu'], ) @blueprint.route('.json', methods=['POST']) @blueprint.route('', methods=['POST'], strict_slashes=False) @utils.auth.requires_login(redirect=False) def create(): """ Create a new GenericImageModelJob Returns JSON when requested: {job_id,name,status} or {errors:[]} """ form = GenericImageModelForm() form.dataset.choices = get_datasets() form.standard_networks.choices = [] form.previous_networks.choices = get_previous_networks() prev_network_snapshots = get_previous_network_snapshots() ## Is there a request to clone a job with ?clone=<job_id> fill_form_if_cloned(form) if not form.validate_on_submit(): if request_wants_json(): return flask.jsonify({'errors': form.errors}), 400 else: return flask.render_template('models/images/generic/new.html', form = form, frameworks = frameworks.get_frameworks(), previous_network_snapshots = prev_network_snapshots, previous_networks_fullinfo = get_previous_networks_fulldetails(), multi_gpu = config_value('caffe_root')['multi_gpu'], ), 400 datasetJob = scheduler.get_job(form.dataset.data) if not datasetJob: raise werkzeug.exceptions.BadRequest( 'Unknown dataset job_id "%s"' % form.dataset.data) # sweeps will be a list of the the permutations of swept fields # Get swept learning_rate sweeps = [{'learning_rate': v} for v in form.learning_rate.data] add_learning_rate = len(form.learning_rate.data) > 1 # Add swept batch_size sweeps = [dict(s.items() + [('batch_size', bs)]) for bs in form.batch_size.data for s in sweeps[:]] add_batch_size = len(form.batch_size.data) > 1 n_jobs = len(sweeps) jobs = [] for sweep in sweeps: # Populate the form with swept data to be used in saving and # launching jobs. form.learning_rate.data = sweep['learning_rate'] form.batch_size.data = sweep['batch_size'] # Augment Job Name extra = '' if add_learning_rate: extra += ' learning_rate:%s' % str(form.learning_rate.data[0]) if add_batch_size: extra += ' batch_size:%d' % form.batch_size.data[0] job = None try: job = GenericImageModelJob( username = utils.auth.get_username(), name = form.model_name.data + extra, dataset_id = datasetJob.id(), ) # get framework (hard-coded to caffe for now) fw = frameworks.get_framework_by_id(form.framework.data) pretrained_model = None #if form.method.data == 'standard': if form.method.data == 'previous': old_job = scheduler.get_job(form.previous_networks.data) if not old_job: raise werkzeug.exceptions.BadRequest( 'Job not found: %s' % form.previous_networks.data) use_same_dataset = (old_job.dataset_id == job.dataset_id) network = fw.get_network_from_previous(old_job.train_task().network, use_same_dataset) for choice in form.previous_networks.choices: if choice[0] == form.previous_networks.data: epoch = float(flask.request.form['%s-snapshot' % form.previous_networks.data]) if epoch == 0: pass elif epoch == -1: pretrained_model = old_job.train_task().pretrained_model else: for filename, e in old_job.train_task().snapshots: if e == epoch: pretrained_model = filename break if pretrained_model is None: raise werkzeug.exceptions.BadRequest( "For the job %s, selected pretrained_model for epoch %d is invalid!" % (form.previous_networks.data, epoch)) if not (os.path.exists(pretrained_model)): raise werkzeug.exceptions.BadRequest( "Pretrained_model for the selected epoch doesn't exists. May be deleted by another user/process. Please restart the server to load the correct pretrained_model details") break elif form.method.data == 'custom': network = fw.get_network_from_desc(form.custom_network.data) pretrained_model = form.custom_network_snapshot.data.strip() else: raise werkzeug.exceptions.BadRequest( 'Unrecognized method: "%s"' % form.method.data) policy = {'policy': form.lr_policy.data} if form.lr_policy.data == 'fixed': pass elif form.lr_policy.data == 'step': policy['stepsize'] = form.lr_step_size.data policy['gamma'] = form.lr_step_gamma.data elif form.lr_policy.data == 'multistep': policy['stepvalue'] = form.lr_multistep_values.data policy['gamma'] = form.lr_multistep_gamma.data elif form.lr_policy.data == 'exp': policy['gamma'] = form.lr_exp_gamma.data elif form.lr_policy.data == 'inv': policy['gamma'] = form.lr_inv_gamma.data policy['power'] = form.lr_inv_power.data elif form.lr_policy.data == 'poly': policy['power'] = form.lr_poly_power.data elif form.lr_policy.data == 'sigmoid': policy['stepsize'] = form.lr_sigmoid_step.data policy['gamma'] = form.lr_sigmoid_gamma.data else: raise werkzeug.exceptions.BadRequest( 'Invalid learning rate policy') if config_value('caffe_root')['multi_gpu']: if form.select_gpu_count.data: gpu_count = form.select_gpu_count.data selected_gpus = None else: selected_gpus = [str(gpu) for gpu in form.select_gpus.data] gpu_count = None else: if form.select_gpu.data == 'next': gpu_count = 1 selected_gpus = None else: selected_gpus = [str(form.select_gpu.data)] gpu_count = None # Set up augmentation structure data_aug = {} data_aug['flip'] = form.aug_flip.data data_aug['quad_rot'] = form.aug_quad_rot.data data_aug['rot_use'] = form.aug_rot_use.data data_aug['rot'] = form.aug_rot.data data_aug['scale_use'] = form.aug_scale_use.data data_aug['scale'] = form.aug_scale.data data_aug['hsv_use'] = form.aug_hsv_use.data data_aug['hsv_h'] = form.aug_hsv_h.data data_aug['hsv_s'] = form.aug_hsv_s.data data_aug['hsv_v'] = form.aug_hsv_v.data data_aug['conv_color'] = form.aug_conv_color.data # Python Layer File may be on the server or copied from the client. fs.copy_python_layer_file( bool(form.python_layer_from_client.data), job.dir(), (flask.request.files[form.python_layer_client_file.name] if form.python_layer_client_file.name in flask.request.files else ''), form.python_layer_server_file.data) job.tasks.append(fw.create_train_task( job_dir = job.dir(), dataset = datasetJob, train_epochs = form.train_epochs.data, snapshot_interval = form.snapshot_interval.data, learning_rate = form.learning_rate.data[0], lr_policy = policy, gpu_count = gpu_count, selected_gpus = selected_gpus, batch_size = form.batch_size.data[0], val_interval = form.val_interval.data, pretrained_model= pretrained_model, crop_size = form.crop_size.data, use_mean = form.use_mean.data, network = network, random_seed = form.random_seed.data, solver_type = form.solver_type.data, shuffle = form.shuffle.data, data_aug = data_aug, ) ) ## Save form data with the job so we can easily clone it later. save_form_to_job(job, form) jobs.append(job) scheduler.add_job(job) if n_jobs == 1: if request_wants_json(): return flask.jsonify(job.json_dict()) else: return flask.redirect(flask.url_for('digits.model.views.show', job_id=job.id())) except: if job: scheduler.delete_job(job) raise if request_wants_json(): return flask.jsonify(jobs=[job.json_dict() for job in jobs]) # If there are multiple jobs launched, go to the home page. return flask.redirect('/') def show(job, related_jobs=None): """ Called from digits.model.views.models_show() """ return flask.render_template('models/images/generic/show.html', job=job, related_jobs=related_jobs) @blueprint.route('/large_graph', methods=['GET']) def large_graph(): """ Show the loss/accuracy graph, but bigger """ job = job_from_request() return flask.render_template('models/images/generic/large_graph.html', job=job) @blueprint.route('/infer_one.json', methods=['POST']) @blueprint.route('/infer_one', methods=['POST', 'GET']) def infer_one(): """ Infer one image """ model_job = job_from_request() remove_image_path = False if 'image_path' in flask.request.form and flask.request.form['image_path']: image_path = flask.request.form['image_path'] elif 'image_file' in flask.request.files and flask.request.files['image_file']: outfile = tempfile.mkstemp(suffix='.bin') flask.request.files['image_file'].save(outfile[1]) image_path = outfile[1] os.close(outfile[0]) else: raise werkzeug.exceptions.BadRequest('must provide image_path or image_file') epoch = None if 'snapshot_epoch' in flask.request.form: epoch = float(flask.request.form['snapshot_epoch']) layers = 'none' if 'show_visualizations' in flask.request.form and flask.request.form['show_visualizations']: layers = 'all' resize_override = '' if 'dont_resize' in flask.request.form and flask.request.form['dont_resize']: resize_override = 'none' # create inference job inference_job = ImageInferenceJob( username = utils.auth.get_username(), name = "Infer One Image", model = model_job, images = [image_path], epoch = epoch, layers = layers, resize_override = resize_override ) # schedule tasks scheduler.add_job(inference_job) # wait for job to complete inference_job.wait_completion() # retrieve inference data inputs, outputs, visualizations = inference_job.get_data() # delete job folder and remove from scheduler list scheduler.delete_job(inference_job) if remove_image_path: os.remove(image_path) image = None if inputs is not None and len(inputs['data']) == 1: image = utils.image.embed_image_html(inputs['data'][0]) # print_image = 0 # if print_image == 1: # import numpy as np # import PIL.Image # file_directory = '/home/brainstorm/srodrigues/nnworker/datasets/text/' # # file_name = '/home/brainstorm/srodrigues/nnworker/datasets/text/' + 'output_predictions.txt' # # f = open(file_name, 'w') # res = dict((name, blob.tolist()) for name,blob in outputs.iteritems())['output'] # image = (np.array(res).reshape((32, 32, 3))).astype(np.uint8) # it should be already uint8, but... # result = PIL.Image.fromarray(image) # result.save(file_directory+'image.jpg') # # f.write(str(res)) # # f.close() if request_wants_json(): return flask.jsonify({'outputs': dict((name, blob.tolist()) for name,blob in outputs.iteritems())}) else: return flask.render_template('models/images/generic/infer_one.html', model_job = model_job, job = inference_job, image_src = image, network_outputs = outputs, visualizations = visualizations, total_parameters= sum(v['param_count'] for v in visualizations if v['vis_type'] == 'Weights'), ) @blueprint.route('/infer_db.json', methods=['POST']) @blueprint.route('/infer_db', methods=['POST', 'GET']) def infer_db(): """ Infer a database """ model_job = job_from_request() if not 'db_path' in flask.request.form or flask.request.form['db_path'] is None: raise werkzeug.exceptions.BadRequest('db_path is a required field') db_path = flask.request.form['db_path'] if not os.path.exists(db_path): raise werkzeug.exceptions.BadRequest('DB "%s" does not exit' % db_path) epoch = None if 'snapshot_epoch' in flask.request.form: epoch = float(flask.request.form['snapshot_epoch']) # create inference job inference_job = ImageInferenceJob( username = utils.auth.get_username(), name = "Infer Many Images", model = model_job, images = db_path, epoch = epoch, layers = 'none', ) # schedule tasks scheduler.add_job(inference_job) # wait for job to complete inference_job.wait_completion() # retrieve inference data inputs, outputs, _ = inference_job.get_data() # delete job folder and remove from scheduler list scheduler.delete_job(inference_job) if outputs is not None and len(outputs) < 1: # an error occurred outputs = None if inputs is not None: keys = [str(idx) for idx in inputs['ids']] else: keys = None if request_wants_json(): result = {} for i, key in enumerate(keys): result[key] = dict((name, blob[i].tolist()) for name,blob in outputs.iteritems()) return flask.jsonify({'outputs': result}) else: return flask.render_template('models/images/generic/infer_db.html', model_job = model_job, job = inference_job, keys = keys, network_outputs = outputs, ) @blueprint.route('/infer_many.json', methods=['POST']) @blueprint.route('/infer_many', methods=['POST', 'GET']) def infer_many(): """ Infer many images """ model_job = job_from_request() image_list = flask.request.files.get('image_list') if not image_list: raise werkzeug.exceptions.BadRequest('image_list is a required field') if 'image_folder' in flask.request.form and flask.request.form['image_folder'].strip(): image_folder = flask.request.form['image_folder'] if not os.path.exists(image_folder): raise werkzeug.exceptions.BadRequest('image_folder "%s" does not exit' % image_folder) else: image_folder = None if 'num_test_images' in flask.request.form and flask.request.form['num_test_images'].strip(): num_test_images = int(flask.request.form['num_test_images']) else: num_test_images = None epoch = None if 'snapshot_epoch' in flask.request.form: epoch = float(flask.request.form['snapshot_epoch']) paths = [] for line in image_list.readlines(): line = line.strip() if not line: continue path = None # might contain a numerical label at the end match = re.match(r'(.*\S)\s+\d+$', line) if match: path = match.group(1) else: path = line if not utils.is_url(path) and image_folder and not os.path.isabs(path): path = os.path.join(image_folder, path) paths.append(path) if num_test_images is not None and len(paths) >= num_test_images: break # create inference job inference_job = ImageInferenceJob( username = utils.auth.get_username(), name = "Infer Many Images", model = model_job, images = paths, epoch = epoch, layers = 'none', resize_override = '' ) # schedule tasks scheduler.add_job(inference_job) # wait for job to complete inference_job.wait_completion() # retrieve inference data inputs, outputs, _ = inference_job.get_data() # delete job folder and remove from scheduler list scheduler.delete_job(inference_job) if outputs is not None and len(outputs) < 1: # an error occurred outputs = None if inputs is not None: paths = [paths[idx] for idx in inputs['ids']] if request_wants_json(): result = {} for i, path in enumerate(paths): result[path] = dict((name, blob[i].tolist()) for name,blob in outputs.iteritems()) return flask.jsonify({'outputs': result}) else: return flask.render_template('models/images/generic/infer_many.html', model_job = model_job, job = inference_job, paths = paths, network_outputs = outputs, ) def get_datasets(): return [(j.id(), j.name()) for j in sorted( [j for j in scheduler.jobs.values() if isinstance(j, GenericImageDatasetJob) and (j.status.is_running() or j.status == Status.DONE)], cmp=lambda x,y: cmp(y.id(), x.id()) ) ] def get_previous_networks(): return [(j.id(), j.name()) for j in sorted( [j for j in scheduler.jobs.values() if isinstance(j, GenericImageModelJob)], cmp=lambda x,y: cmp(y.id(), x.id()) ) ] def get_previous_networks_fulldetails(): return [(j) for j in sorted( [j for j in scheduler.jobs.values() if isinstance(j, GenericImageModelJob)], cmp=lambda x,y: cmp(y.id(), x.id()) ) ] def get_previous_network_snapshots(): prev_network_snapshots = [] for job_id, _ in get_previous_networks(): job = scheduler.get_job(job_id) e = [(0, 'None')] + [(epoch, 'Epoch #%s' % epoch) for _, epoch in reversed(job.train_task().snapshots)] if job.train_task().pretrained_model: e.insert(0, (-1, 'Previous pretrained model')) prev_network_snapshots.append(e) return prev_network_snapshots
There are fears health services will suffer “serious staffing issues” as a growing number of NHS workers are able to consider retirement. The warning has been issued after latest figures revealed more than a fifth of consultants working for NHS Scotland are aged 55 or above, with more than 100 aged 65 or over. He said: “We already know the numbers of doctors in training have fallen to a five-year low. Now we learn that, at the other end of the spectrum, the consultant workforce is ageing at a considerable rate. The British Medical Association recently expressed fears the official vacancy figure for consultants in NHS Scotland – set at 6.8 per cent – is more likely to be around 14 per cent. However, a Scottish Government spokesman said consultant staffing numbers are at a record high level, up by more than 50 per cent under its time in power. He added: “More than 60 per cent of consultant staff in NHS Scotland are aged under 50. We continue to invest in the medical workforce and, by 2021, will have increased the number of medical places at Scotland’s universities to a record high of 1,038. “The Scottish Government has been working with the Royal College of Surgeons and others to create a Locum Medical Bank, using a pool of retired consultant staff who have expressed an interest in working in remote and rural areas.
#!/usr/bin/env python3 # -*- mode: python -*- """ Simple script for helping rename TV shows """ # TODO: Need support for custom in/out # TODO: Need support for callable rename objects # TODO: Use regex to get a list of all words in filename # then make sure only ours exist in it # If needed, use spellchecker or such to make sure extra words are valid # TODO: Need way to specify alternatives for words, like USA or U.S.A. or the like. # May be easiest to just allow refex for match, like its done for extraction. # TODO: To fix duplicate names with appended extras (CSI, CSI Miami, CSI New York, etc) # Search through all the regexs keeping all the match results. Whichever match result # has the most words assoicated with it is the correct one. If there is more than one # of them with the same number of words and matches, print error and continue. # TODO: A new idea just occurred to me ... If I made a dictionary or regex that contained # all the known words (all word: lists), then I could use it as a sort of sanity check in # which to make sure that a show is really the show if it matches it's words: and didn't # contain any of the rest of them ... # TODO: Cannot search for a word twice (e.g. Brooklyn Nine Nine). # TODO: Investigate '\w+' as re.findall() as it may be better/faster/etc. import glob, sys, os import re from pprint import pprint from vidis.models import db, FeedModel, RegexModel DATEFMT = '{year:04d}-{month:02d}-{day:02d}' DATES = [ # [yy]yy_mm_dd '(?P<year>(?:19|20)[0-9]{2})[- /.](?P<month>(?:0[1-9]|1[012]))[- /.](?P<day>(?:0[1-9]|[12][0-9]|3[01]))', # dd_mm_[yy]yy '(?P<day>(?:0[1-9]|[12][0-9]|3[01]))[- /.](?P<month>(?:0[1-9]|1[012]))[- /.](?P<year>(?:19|20)[0-9]{2})', # mm_dd_[yy]yy '(?P<month>(?:0[1-9]|1[012]))[- /.](?P<day>(?:0[1-9]|[12][0-9]|3[01]))[- /.](?P<year>(?:19|20)[0-9]{2})' ] # New (easier) way to add to RenameList SimpleRenameList = { 'The Odd Couple': {}, 'Eye Candy': {}, 'Wolf Hall': {}, 'X Company': {}, 'Hindsight': {}, 'Girlfriends Guide To Divorce': {}, 'Catastrophe': { 'words': [ 'catastrophe' ] }, 'Chasing Life': { 'words': [ 'chasing', 'life' ] }, 'Better Call Saul': { 'words': [ 'better', 'call', 'saul' ] }, 'Allegiance': { 'words': [ 'allegiance' ] }, 'Fresh Off The Boat': { 'words': [ 'fresh', 'off', 'the', 'boat' ] }, 'Schitts Creek': { 'words': [ 'schitts', 'creek' ] }, 'Cockroaches': { 'words': [ 'cockroaches' ] }, 'Backstrom': { 'words': [ 'backstrom' ] }, '12 Monkeys': { 'words': [ '12', 'monkeys' ] }, 'Man Seeking Woman': { 'words': [ 'man', 'seeking', 'woman' ] }, 'State Of Affairs': { 'words': [ 'state', 'of', 'affairs' ] }, 'Galavant': { 'words': [ 'galavant' ] }, 'Spy World': { 'words': [ 'spy', 'world' ] }, 'Togetherness': { 'words': [ 'togetherness' ] }, '1600 Penn': { 'words': [ '1600', 'penn' ] }, '18 To Life': { 'words': [ '18', 'to', 'life' ] }, '2 Broke Girls': { 'words': [ '2', 'broke', 'girls' ] }, '24': { 'last': True, 'words': [ '24' ] }, '30 Rock': { 'words': [ '30', 'rock' ] }, '666 Park Avenue': { 'words': [ '666', 'park', 'avenue' ] }, '90210': { 'words': [ '90210' ] }, 'A Gifted Man': { 'words': [ 'a', 'gifted', 'man' ] }, 'A To Z': { 'words': [ 'a', 'to', 'z' ] }, 'About A Boy': { 'words': [ 'about', 'a', 'boy' ] }, 'Accidentally On Purpose': { 'words': [ 'accidentally', 'on', 'purpose' ] }, 'Against The Wall': { 'words': [ 'against', 'the', 'wall' ] }, 'Alaska State Troopers': { 'words': [ 'alaska', 'state', 'troopers' ] }, 'Alcatraz': { 'words': [ 'alcatraz' ] }, 'All About Aubrey': { 'words': [ 'all', 'about', 'aubrey' ] }, 'All Worked Up': { 'words': [ 'all', 'worked', 'up' ] }, 'Almost Human': { 'words': [ 'almost', 'human' ] }, 'Almost Royal': { 'words': [ 'almost', 'royal' ] }, 'Alphas': { 'words': [ 'alphas' ] }, 'American Dad': { 'words': [ 'american', 'dad' ] }, 'American Horror Story': { 'words': [ 'american', 'horror', 'story' ] }, 'American Idol': { 'words': [ 'american', 'idol' ] }, 'American Restoration': { 'words': [ 'american', 'restoration' ] }, 'Animal Practice': { 'words': [ 'animal', 'practice' ] }, 'Archer': { 'words': [ 'archer' ] }, 'Are You There Chelsea': { 'words': [ 'are', 'you', 'there', 'chelsea' ] }, 'Army Wives': { 'words': [ 'army', 'wives' ] }, 'Arrow': { 'words': [ 'arrow' ] }, 'Atlantis': { 'words': [ 'atlantis' ] }, 'Awake': { 'words': [ 'awake' ] }, 'Awkward': { 'words': [ 'awkward' ] }, 'Ax Men': { 'words': [ 'ax', 'men' ] }, 'Babylon': { 'words': [ 'babylon' ] }, 'Back In The Game': { 'words': [ 'back', 'in', 'the', 'game' ] }, 'Bad Judge': { 'words': [ 'bad', 'judge' ] }, 'Bad Teacher': { 'words': [ 'bad', 'teacher' ] }, 'Bang Goes The Theory': { 'words': [ 'bang', 'goes', 'the', 'theory' ] }, 'Banshee': { 'words': [ 'banshee' ] }, 'Bates Motel': { 'words': [ 'bates', 'motel' ] }, 'Beauty And The Beast': { 'words': [ 'beauty', 'and', 'the', 'beast' ] }, 'Becoming Human': { 'words': [ 'becoming', 'human' ] }, 'Bedlam': { 'words': [ 'bedlam' ] }, 'Being Eileen': { 'words': [ 'being', 'eileen' ] }, 'Being Human (US)': { 'words': [ 'being', 'human', 'us' ] }, 'Being Human': { 'last': True, 'words': [ 'being', 'human' ] }, 'Being Mary Jane': { 'words': [ 'being', 'mary', 'jane' ] }, 'Believe': { 'words': [ 'believe' ] }, 'Ben And Kate': { 'words': [ 'ben', 'and', 'kate' ] }, 'Bering Sea Gold Under The Ice': { 'words': [ 'bering', 'sea', 'gold', 'under', 'the', 'ice' ] }, 'Bering Sea Gold': { 'words': [ 'bering', 'sea', 'gold' ] }, 'Best Friends Forever': { 'words': [ 'best', 'friends', 'forever' ] }, 'Betrayal': { 'words': [ 'betrayal' ] }, 'Better Of Ted': { 'words': [ 'better', 'off', 'ted' ] }, 'Better With You': { 'words': [ 'better', 'with', 'you' ] }, 'Big Love': { 'words': [ 'big', 'love' ] }, 'Big Tips Texas': { 'words': [ 'big', 'tips', 'texas' ] }, 'Black Sails': { 'words': [ 'black', 'sails' ] }, 'Black-ish': { 'words': [ 'black', 'ish' ] }, 'Blue Bloods': { 'words': [ 'blue', 'bloods' ] }, 'Boardwalk Empire': { 'words': [ 'boardwalk', 'empire' ] }, 'Bobs Burgers': { 'words': [ 'bob', 'burger'] }, 'Body Of Proof': { 'words': [ 'body' ] }, 'Bones': { 'words': [ 'bones' ] }, 'Bored To Death': { 'words': [ 'bored', 'to.death' ] }, 'Boss': { 'words': [ 'boss' ] }, 'Boston Med': { 'words': [ 'boston', 'med' ] }, 'Bostons Finest': { 'words': [ 'boston', 'finest' ] }, 'Breaking Bad': { 'words': [ 'breaking', 'bad' ] }, 'Breaking In': { 'words': [ 'breaking', 'in' ] }, 'Breakout Kings': { 'words': [ 'breakout', 'kings' ] }, 'Breakout': { 'last': True, 'words': [ 'breakout' ] }, 'Britain\'s Really Disgusting Food': { 'words': [ 'brdf' ] }, 'Broadchurch': { 'words': [ 'broadchurch' ] }, 'Brooklyn Nine Nine': { 'words': [ 'brooklyn', 'nine' ] }, # TODO: No way to search for two of a single word 'Brothers And Sisters': { 'words': [ 'brothers', 'and', 'sisters' ] }, 'Brothers': { 'last': True, 'words': [ 'brothers' ] }, 'Burn Notice': { 'words': [ 'burn', 'notice' ] }, 'CSI Miami': { 'words': [ 'csi', 'miami' ] }, 'CSI New York': { 'words': [ 'csi', 'ny' ] }, 'CSI': { 'last': True, 'words': [ 'csi' ] }, 'Californication': { 'words': [ 'californication' ] }, 'Camelot': { 'words': [ 'camelot' ] }, 'Camp': { 'words': [ 'camp' ] }, 'Caprica': { 'words': [ 'caprica' ] }, 'Castle': { 'words': [ 'castl' ] }, 'Celebrity Rehab With Dr. Drew': { 'words': [ 'celebrity', 'rehab', 'drew' ] }, 'Chaos': { 'words': [ 'chaos' ] }, 'Charlies Angels': { 'words': [ 'charlies', 'angels' ] }, 'Chase': { 'words': [ 'chase' ] }, 'Chasing Mummies': { 'words': [ 'chasing', 'mummies' ] }, 'Chicago Fire': { 'words': [ 'chicago', 'fire' ] }, 'Chicago PD': { 'last': True, 'words': [ 'chicago', 'p', 'd' ] }, 'Childrens Hospital': { 'words': [ 'childrens', 'hospital' ] }, 'Chuck': { 'words': [ 'chuck' ] }, 'City Homicide': { 'words': [ 'city', 'homicide' ] }, 'Cold Case': { 'words': [ 'cold', 'case' ] }, 'Collision': { 'words': [ 'collision' ] }, 'Combat Hospital': { 'words': [ 'combat', 'hospital' ] }, 'Community': { 'words': [ 'community' ] }, 'Constantine': { 'words': [ 'constantine' ] }, 'Continuum': { 'words': [ 'continuum' ] }, 'Copper': { 'words': [ 'copper' ] }, 'Cosmos - A Space Time Odyssey': { 'words': [ 'cosmos', 'a', 'space', 'time', 'odyssey' ] }, 'Cougar Town': { 'words': [ 'cougar', 'town' ] }, 'Covert Affairs': { 'words': [ 'covert', 'affairs' ] }, 'Cracker': { 'words': [ 'cracker' ] }, 'Creature Shop Challenge': { 'words': [ 'creature', 'shop', 'challenge' ] }, 'Criminal Minds Suspect Behavior': { 'words': [ 'criminal', 'minds', 'suspect', 'behav' ] }, 'Criminal Minds': { 'last': True, 'words': [ 'criminal', 'minds' ] }, 'Crisis Control': { 'words': [ 'crisis', 'control' ] }, 'Crisis': { 'words': [ 'crisis' ] }, 'Cristela': { 'words': [ 'cristela' ] }, 'Crossing Lines': { 'words': [ 'crossing', 'lines' ] }, 'Cuckoo': { 'words': [ 'cuckoo' ] }, 'Cult': { 'words': [ 'cult' ] }, 'Curb Your Enthusiasm': { 'words': [ 'curb', 'your', 'enthusiasm' ] }, 'Dada': { 'last': True, 'words': [ 'dads' ] }, 'Damages': { 'words': [ 'damages' ] }, 'Dancing On The Edge': { 'words': [ 'dancing', 'on', 'the', 'edge' ] }, 'Dancing With The Stars': { 'words': [ 'dancing', 'with', 'the', 'stars' ] }, 'Dark Blue': { 'words': [ 'dark', 'blue' ] }, 'Deadliest Catch': { 'words': [ 'deadliest', 'catch' ] }, 'Defiance': { 'words': [ 'defiance' ] }, 'Desperate Housewives': { 'words': [ 'desperate', 'housewives' ] }, 'Detroit 187': { 'words': [ 'det' ] }, 'Devious Maids': { 'words': [ 'devious', 'maids' ] }, 'Dexter': { 'words': [ 'dexter' ] }, 'Do No Harm': { 'words': [ 'do', 'no', 'harm' ] }, 'Doctor Who': { 'words': [ 'doctor', 'who', '2005' ] }, 'Dollhouse': { 'words': [ 'dollhouse' ] }, 'Dominion': { 'words': [ 'dominion' ] }, 'Downfall': { 'words': [ 'downfall' ] }, 'Dracula': { 'words': [ 'dracula', '2013' ] }, 'Dream House': { 'words': [ 'dream', 'house' ] }, 'Dream Machines': { 'words': [ 'dream', 'machines' ] }, 'Drop Dead Diva': { 'words': [ 'drop', 'dead', 'diva' ] }, 'Eagleheart': { 'words': [ 'eagleheart' ] }, 'Eastwick': { 'words': [ 'eastwick' ] }, 'Elementary': { 'words': [ 'elementary' ] }, 'Emily Owens M.D.': { 'words': [ 'emily', 'owens' ] }, 'Empire': { 'last': True, 'words': [ 'empire' ] }, 'Enlightened': { 'words': [ 'enlightened' ] }, 'Episodes': { 'words': [ 'episodes' ] }, 'Eureka': { 'words': [ 'eureka' ] }, 'Extant': { 'words': [ 'extant' ] }, 'Fairly Legal': { 'words': [ 'fairly', 'legal' ] }, 'Falling Skies': { 'words': [ 'falling', 'skies' ] }, 'Family Guy': { 'words': [ 'family', 'guy' ] }, 'Family Tools': { 'words': [ 'family', 'tools' ] }, 'Family Tree': { 'words': [ 'family', 'tree' ] }, 'Fargo': { 'words': [ 'fargo' ] }, 'Fear Factor': { 'words': [ 'fear', 'factor' ] }, 'Finding Bigfoot': { 'words': [ 'finding', 'bigfoot' ] }, 'Finding Carter': { 'words': [ 'finding', 'carter' ] }, 'Flash Forward': { 'words': [ 'flash', 'forward' ] }, 'Flashpoint': { 'words': [ 'flash', 'point' ] }, 'Forever': { 'words': [ 'forever' ] }, 'Franklin And Bash': { 'words': [ 'franklin', 'and', 'bash' ] }, 'Freakshow': { 'words': [ 'freakshow' ] }, 'Free Agents': { 'words': [ 'free', 'agents' ] }, 'Friends With Better Lives': { 'words': [ 'friends', 'with', 'better', 'lives' ] }, 'Fringe': { 'words': [ 'fringe' ] }, 'Funny As Hell': { 'words': [ 'funny', 'as', 'hell' ] }, 'Futurama': { 'words': [ 'futurama' ] }, 'Game Of Thrones': { 'words': [ 'game', 'of', 'thrones' ] }, 'Gang World': { 'words': [ 'gang', 'world' ] }, 'Gary Unmarried': { 'words': [ 'gary', 'unmarried' ] }, 'Ghost Lab': { 'words': [ 'ghost', 'lab' ] }, 'Ghost Whisperer': { 'words': [ 'ghost', 'whisperer' ] }, 'Girl Meets World': { 'words': [ 'girl', 'meets', 'world' ] }, 'Girls': { 'last': True, 'words':[ 'girls' ] }, 'Glee': { 'words': [ 'glee' ] }, 'Glue': { 'words': [ 'glue' ] }, 'Good Vibes': { 'words': [ 'good', 'vibes' ] }, 'Gossip Girl': { 'words': [ 'gossip', 'girl' ] }, 'Gotham': { 'words': [ 'gotham' ] }, 'Graceland': { 'words': [ 'graceland' ] }, 'Gracepoint': { 'words': [ 'gracepoint' ] }, 'Greek': { 'words': [ 'greek' ] }, 'Greys Anatomy': { 'words': [ 'greys', 'anatomy' ] }, 'Grimm': { 'words': [ 'grimm' ] }, 'Ground Floor': { 'words': [ 'ground', 'floor' ] }, 'Growing Up Fisher': { 'words': [ 'growing', 'up', 'fisher' ] }, 'Hank': { 'words': [ 'hank' ] }, 'Hannibal': { 'words': [ 'hannibal' ] }, 'Happily Divorced': { 'words': [ 'happily', 'divorced' ] }, 'Happy Endings': { 'words': [ 'happy', 'endings' ] }, 'Happy Town': { 'words': [ 'happy', 'town' ] }, 'Happy Valley': { 'words': [ 'happy', 'valley' ] }, 'Hard Time': { 'words': [ 'hard', 'time' ] }, 'Harrys Law': { 'words': [ 'harrys', 'law' ] }, 'Haven': { 'words': [ 'haven' ] }, 'Hawaii Five\'0': { 'words': [ 'hawaii', 'five', '0' ] }, 'Hawthorne': { 'words': [ 'hawthorne' ] }, 'Heavy': { 'words': [ 'heavy' ] }, 'Helix': { 'words': [ 'helix' ] }, 'Hellcats': { 'words': [ 'hellcats' ] }, 'Hello Ladies': { 'words': [ 'hello', 'ladies' ] }, 'Hello Ross': { 'words': [ 'hello', 'ross' ] }, 'Hells Kitchen': { 'words': [ 'hells', 'kitchen' ] }, 'Hens Behaving Badly': { 'words': [ 'hens', 'behaving', 'badly' ] }, 'Heroes': { 'words': [ 'heroes' ] }, 'High School USA': { 'words': [ 'high', 'school', 'usa' ] }, 'Highway Patrol': { 'words': [ 'highway', 'patrol' ] }, 'Hoarders': { 'words': [ 'hoarders' ] }, 'Homeland': { 'words': [ 'homeland' ] }, 'Hostages': { 'words': [ 'hostages' ] }, 'Hot In Cleveland': { 'words': [ 'hot', 'in', 'cleveland' ] }, 'Hotel Stephanie': { 'words': [ 'hotel', 'stephanie' ] }, 'House Of Lies': { 'words': [ 'house', 'of', 'lies' ] }, 'House': { 'last': True, 'words': [ 'house' ] }, 'How I Met Your Mother': { 'words': [ 'how', 'i', 'met', 'your', 'mother' ] }, 'How To Be A Gentleman': { 'words': [ 'how', 'to', 'be', 'a', 'gentleman' ] }, 'How To Get Away With Murder': { 'words': [ 'how', 'to', 'get', 'away', 'with', 'murder' ] }, 'How To Live With Your Parents': { 'words': [ 'how', 'to', 'live', 'with', 'your', 'parents' ] }, 'Huge': { 'words': [ 'huge' ] }, 'Human Target': { 'words': [ 'human', 'targe' ] }, 'Hung': { 'words': [ 'hung' ] }, 'Ice Road Truckers': { 'words': [ 'ice', 'road', 'truckers' ] }, 'In Plain Sight': { 'words': [ 'in', 'plain', 'sight' ] }, 'In The Flesh': { 'words': [ 'in', 'the', 'flesh' ] }, 'In Treatment': { 'words': [ 'in', 'treatment' ] }, 'Injustice': { 'words': [ 'injustice' ] }, 'Inside Amy Schumer': { 'words': [ 'inside', 'amy', 'schumer' ] }, 'Instant Mom': { 'words': [ 'instant', 'mom' ] }, 'Intelligence': { 'last': True, 'words': [ 'intelligence' ] }, 'Jane By Design': { 'words': [ 'jane', 'by', 'design' ] }, 'Jane The Virgin': { 'words': [ 'jane', 'the', 'virgin' ] }, 'Jennifer Falls': { 'words': [ 'jennifer', 'falls' ] }, 'Justified': { 'words': [ 'justified' ] }, 'Kingdom': { 'words': [ 'kingdom' ] }, 'Kirstie': { 'words': [ 'kirstie' ] }, 'Kitchen Nightmares': { 'words': [ 'kitchen', 'nightmares' ] }, 'LA Ink': { 'words': [ 'la', 'ink' ] }, 'Last Comic Standing': { 'words': [ 'last', 'comic', 'standing' ] }, 'Last Man Standing': { 'words': [ 'last', 'man', 'standing' ] }, 'Last Resort': { 'words': [ 'last', 'resort' ] }, 'Law And Order CI': { 'words': [ 'law', 'and', 'order', 'criminal', 'intent' ] }, 'Law And Order LA': { 'words': [ 'law', 'and', 'order', 'la' ] }, 'Law And Order SVU': { 'words': [ 'law', 'and', 'order', 's', 'v', 'u' ] }, 'Law And Order UK': { 'words': [ 'law', 'and', 'order', 'uk' ] }, 'Law And Order': { 'last': True, 'words': [ 'law', 'and', 'order' ] }, 'Legit': { 'words': [ 'legit' ] }, 'Leverage': { 'words': [ 'leverage' ] }, 'Lie To Me': { 'words': [ 'lie', 'to', 'me' ] }, 'Life On A Wire': { 'words': [ 'life', 'on', 'a', 'wire' ] }, 'Life Unexpected': { 'words': [ 'life', 'unexpected' ] }, 'Lights Out': { 'words': [ 'lights', 'out' ] }, 'Live To Dance': { 'words': [ 'live', 'to', 'dance' ] }, 'Lone Star': { 'words': [ 'lone', 'star' ] }, 'Long Island Medium': { 'words': [ 'long', 'island', 'medium' ] }, 'Lost': { 'words': [ 'lost' ] }, 'Love Bites': { 'words': [ 'love', 'bites' ] }, 'Mad Dogs': { 'words': [ 'mad', 'dogs' ] }, 'Mad Love': { 'words': [ 'mad', 'love' ] }, 'Mad Men': { 'words': [ 'mad', 'men' ] }, 'Madam Secretary': { 'words': [ 'madam', 'secretary' ] }, 'Made In Jersey': { 'words': [ 'made', 'in', 'jersey' ] }, 'Magic City': { 'words': [ 'magic', 'city' ] }, 'Major Crimes': { 'words': [ 'major', 'crimes' ] }, 'Man Up': { 'words': [ 'man', 'up' ] }, 'Marry Me': { 'words': [ 'marry', 'me' ] }, 'Marvels Agent Carter': { 'words': [ 'marvels', 'agent', 'carter' ] }, 'Marvels Agents Of S.H.I.E.L.D.': { 'words': [ 'marvels', 'agents', 'of', 's.h.i.e.l.d' ] }, 'Masters Of Sex': { 'words': [ 'masters', 'of', 'sex' ] }, 'Matador (US)': { 'words': [ 'matador' ] }, 'Medium': { 'words': [ 'medium' ] }, 'Melissa And Joey': { 'words': [ 'melissa', 'and', 'joey' ] }, 'Melrose Place': { 'words': [ 'melrose', 'place' ] }, 'Memphis Beat': { 'words': [ 'memphis', 'beat' ] }, 'Men Of A Certain Age': { 'words': [ 'men', 'of', 'a', 'certain', 'age' ] }, 'Mercy': { 'words': [ 'mercy' ] }, 'Merlin': { 'words': [ 'merli' ] }, 'Miami Medical': { 'words': [ 'miami', 'medical' ] }, 'Mike And Molly': { 'words': [ 'mike', 'and', 'molly' ] }, 'Mind Games': { 'words': [ 'mind', 'games' ] }, 'Miranda': { 'words': [ 'miranda' ] }, 'Missing': { 'words': [ 'missing', '2012' ] }, 'Mistresses': { 'words': [ 'mistresses' ] }, 'Mixed Britannia': { 'words': [ 'mixed', 'britannia' ] }, 'Mob City': { 'words': [ 'mob', 'city' ] }, 'Mockingbird Lane': { 'words': [ 'mockingbird', 'lane' ] }, 'Modern Family': { 'words': [ 'modern', 'family' ] }, 'Mom': { 'words': [ 'mom' ] }, 'Monday Mornings': { 'words': [ 'monday', 'mornings' ] }, 'Mongrels': { 'words': [ 'mongrels' ] }, 'Monk': { 'words': [ 'monk' ] }, 'Motive': { 'words': [ 'motive' ] }, 'Mr. Pickles': { 'words': [ 'mr', 'pickles' ] }, 'Mr. Sunshine': { 'words': [ 'mr', 'sunshine' ] }, 'Mulaney': { 'words': [ 'mulaney' ] }, 'Murderland': { 'words': [ 'murderland' ] }, 'My Babysitters A Vampire': { 'words': [ 'my', 'babysitters', 'a', 'vampire' ] }, 'My Boys': { 'words': [ 'my', 'boys' ] }, 'My Generation': { 'words': [ 'my', 'generatio' ] }, 'NCIS Los Angeles': { 'words': [ 'ncis', 'los', 'angeles' ] }, 'NCIS New Orleans': { 'words': [ 'ncis', 'new', 'orleans' ] }, 'NCIS': { 'last': True, 'words': [ 'ncis' ] }, 'NTSF-SD-SUV': { 'words': [ 'ntsf', 'sd', 'suv' ] }, 'NYC 22': { 'words': [ 'nyc', '22' ] }, 'Nashville': { 'words': { 'nashville' } }, 'Necessary Roughness': { 'words': [ 'necessary', 'roughness' ] }, 'New Girl': { 'words': [ 'new', 'girl' ] }, 'Nikita': { 'words': [ 'nikita' ] }, 'Nip Tuck': { 'words': [ 'nip', 'tuck' ] }, 'No Ordinary Family': { 'words': [ 'no', 'ordinary', 'family' ] }, 'Numb3rs': { 'words': [ 'numb3rs' ] }, 'Nurse Jackie': { 'words': [ 'nurse', 'jackie' ] }, 'Off The Map': { 'words': [ 'off', 'the', 'map' ] }, 'Offspring': { 'words': [ 'offspring' ] }, 'Once Upon A Time In Wonderland': { 'words': [ 'once', 'upon', 'a', 'time', 'in', 'wonderland' ] }, 'Once Upon A Time': { 'last': True, 'words': [ 'once', 'upon', 'a', 'time' ] }, 'One Tree Hill': { 'words': [ 'one', 'tree', 'hill' ] }, 'Orphan Black': { 'words': [ 'orphan', 'black' ] }, 'Outlaw': { 'words': [ 'outlaw' ] }, 'Outsourced': { 'words': [ 'outsourced' ] }, 'Package Deal': { 'words': [ 'package', 'deal' ] }, 'Pan Am': { 'words': [ 'pan', 'am' ] }, 'Parenthood': { 'words': [ 'parenthood' ] }, 'Parks And Recreation': { 'words': [ 'parks', 'and', 'recreation' ] }, 'Party Down': { 'words': [ 'party', 'down' ] }, 'Past Life': { 'words': [ 'past', 'life' ] }, 'Perception': { 'words': [ 'perception' ] }, 'Perfect Couples': { 'words': [ 'perfect', 'couples' ] }, 'Person Of Interest': { 'words': [ 'person', 'of', 'interest' ] }, 'Persons Unknown': { 'words': [ 'person', 'unknown' ] }, 'Plain Jane': { 'words': [ 'plain', 'jane' ] }, 'Political Animals': { 'words': [ 'political', 'animals' ] }, 'Portlandia': { 'words': [ 'portlandia' ] }, 'Pretty Little Liars': { 'words': [ 'pretty', 'little', 'liars' ] }, 'Prey (UK)': { 'words': [ 'prey', 'uk' ] }, 'Prime Suspect': { 'words': [ 'prime', 'suspect' ] }, 'Primeval New World': { 'words': [ 'primeval', 'new', 'world' ] }, 'Primeval': { 'last': True, 'words': [ 'primeval' ] }, 'Private Practice': { 'words': [ 'private', 'practice' ] }, 'Psych': { 'words': [ 'psych' ] }, 'Raising Hope': { 'words': [ 'raising', 'hope' ] }, 'Rake': { 'words': [ 'rake' ] }, 'Ravenswood': { 'words': [ 'ravenswood' ] }, 'Ray Donovan': { 'words': [ 'ray', 'donovan' ] }, 'Recjless': { 'words': [ 'reckless' ] }, 'Red Band Society': { 'words': [ 'red', 'band', 'society' ] }, 'Redwood Kings': { 'words': [ 'redwood', 'kings' ] }, 'Reign': { 'words': [ 'reign' ] }, 'Remember Me': { 'words': [ 'remember', 'me' ] }, 'Rescue Me': { 'words': [ 'rescue', 'me' ] }, 'Rescue Special Ops': { 'words': [ 'rescue', 'special', 'ops' ] }, 'Resurrection': { 'words': [ 'resurrection' ] }, 'Retired At 35': { 'words': [ 'retired', 'at', '35' ] }, 'Revenge': { 'words': [ 'revenge' ] }, 'Revolution': { 'words': [ 'revolution' ] }, 'Ringer': { 'words': [ 'ringer' ] }, 'Rizzoli And Isles': { 'words': [ 'rizzoli', 'and', 'isles' ] }, 'Rob': { 'words': [ 'rob' ] }, 'Romantically Challenged': { 'words': [ 'romantically', 'challenged' ] }, 'Rookie Blue': { 'words': [ 'rookie', 'blue' ] }, 'Royal Navy Caribbean Patrol': { 'words': [ 'royal', 'navy', 'caribbean', 'patrol' ] }, 'Royal Pains': { 'words': [ 'royal', 'pains' ] }, 'Rubicon': { 'words': [ 'rubicon' ] }, 'Rules Of Engagement': { 'words': [ 'rules', 'of', 'engagement' ] }, 'Running Wilde': { 'words': [ 'running', 'wilde' ] }, 'Rush': { 'last': True, 'words': [ 'rush' ] }, 'Salem': { 'words': [ 'salem' ] }, 'Same Name': { 'words': [ 'same', 'name' ] }, 'Sanctuary': { 'words': [ 'sanctuary' ] }, 'Satisfaction': { 'words': [ 'satisfaction' ] }, 'Saturday Night Live': { 'words': [ 'saturday', 'night', 'live' ] }, 'Saving Grace': { 'words': [ 'saving', 'grace' ] }, 'Saving Hope': { 'words': [ 'saving', 'hope' ] }, 'Scandal': { 'words': [ 'scandal' ] }, 'Scorpion': { 'words': [ 'scorpion' ] }, 'Scoundrels': { 'words': [ 'scoundrels' ] }, 'Scrubs': { 'words': [ 'scrubs' ] }, 'Sea Patrol UK': { 'words': [ 'sea', 'patrol', 'uk' ] }, 'Sea Patrol': { 'words': [ 'sea', 'patrol' ] }, 'Sean Saves The World': { 'words': [ 'sean', 'saves', 'the', 'world' ] }, 'Secret Diary Of A Call Girl': { 'words': [ 'secret', 'diary', 'call', 'girl' ] }, 'Secret Girlfriend': { 'words': [ 'secret', 'girl', 'friend' ] }, 'Secret State': { 'words': [ 'secret', 'state' ] }, 'Seed': { 'words': [ 'seed' ] }, 'Selfie': { 'words': [ 'selfie' ] }, 'Sex Rehab With Dr. Drew': { 'words': [ 'sex', 'rehab', 'drew' ] }, 'Shameless': { 'words': [ 'shameless' ] }, 'Sherlock': { 'words': [ 'sherlock' ] }, 'Sherri': { 'words': [ 'sherri' ] }, 'Shit My Dad Says': { 'words': [ 'shit', 'my', 'dad', 'says' ] }, 'Silicon Valley': { 'words': [ 'silicon', 'valley' ] }, 'Sirens': { 'words': [ 'sirens' ] }, 'Sister Wives': { 'words': [ 'sister', 'wives' ] }, 'Skins (US)': { 'words': [ 'skins', 'us' ] }, 'Sleepy Hollow': { 'words': [ 'sleepy', 'hollow' ] }, 'Smash': { 'words': [ 'smash' ] }, 'Sons Of Anarchy': { 'words': [ 'sons', 'of', 'anarchy' ] }, 'Sons Of Tucson': { 'words': [ 'sons', 'of', 't' ] }, 'Southland': { 'words': [ 'southland' ] }, 'Spartacus Blood And Sand': { 'words': [ 'spartacus', 'blood', 'sand' ] }, 'Spartacus Gods Of The Arena': { 'words': [ 'spartacus', 'gods', 'arena' ] }, 'Spartacus': { 'words': [ 'spartacus' ] }, 'Stalker': { 'words': [ 'stalker' ] }, 'Star Wars Rebels': { 'words': [ 'star', 'wars', 'rebels' ] }, 'Star Wars The Clone Wars': { 'words': [ 'star', 'wars', 'clone', 'wars' ] }, 'Stargate Universe': { 'words': [ 'stargate', 'universe' ] }, 'State Of Georgia': { 'words': [ 'state', 'of', 'georgia' ] }, 'Steven Seagal Lawman': { 'words': [ 'steven', 'seagal', 'law', 'man' ] }, 'Strange Empire': { 'words': [ 'strange', 'empire' ] }, 'Suburgatory': { 'words': [ 'suburgatory' ] }, 'Summer Camp': { 'words': [ 'summer', 'camp' ] }, 'Super Fun Night': { 'words': [ 'super', 'fun', 'night' ] }, 'Superjail': { 'words': [ 'superjail' ] }, 'Supernanny': { 'words': [ 'supernanny' ] }, 'Supernatural': { 'words': [ 'super', 'natural' ] }, 'Surviving Jack': { 'words': [ 'surviving', 'jack' ] }, 'Survivor': { 'last': True, 'words': [ 'survivor' ] }, 'Survivors': { 'words': [ 'survivors', '2008' ] }, 'Switched At Birth': { 'words': [ 'switched', 'at', 'birth' ] }, 'Teen Wolf': { 'words': [ 'teen', 'wolf' ] }, 'Terra Nova': { 'words': [ 'terra', 'nova' ] }, 'Terriers': { 'words': [ 'terriers' ] }, 'The 100': { 'words': [ 'the', '100' ] }, 'The Affair': { 'words': [ 'the', 'affair' ] }, 'The Americans': { 'words': [ 'the', 'americans' ] }, 'The Big Bang Theory': { 'words': [ 'the', 'big', 'bang', 'theory' ] }, 'The Big C': { 'last': True, 'words': [ 'the', 'big', 'c' ] }, 'The Blacklist': { 'words': [ 'the', 'blacklist' ] }, 'The Bomb Squad': { 'words': [ 'the', 'bomb', 'squad' ] }, 'The Borgias': { 'words': [ 'the', 'borgias' ] }, 'The Bridge': { 'words': [ 'the', 'bridge' ] }, 'The Cape': { 'words': [ 'the', 'cap' ] }, 'The Carrie Diaries': { 'words': [ 'the', 'carrie', 'diaries' ] }, 'The Chicago Code': { 'words': [ 'chicago', 'code' ] }, 'The Cleveland Show': { 'words': [ 'the', 'cleveland', 'show' ] }, 'The Client List': { 'words': [ 'the', 'client', 'list' ] }, 'The Closer': { 'words': [ 'the', 'closer' ] }, 'The Crazy Ones': { 'words': [ 'the', 'crazy', 'ones' ] }, 'The Cult': { 'words': [ 'the', 'cult' ] }, 'The Deep End': { 'words': [ 'the', 'deep', 'end' ] }, 'The Defenders': { 'words': [ 'the', 'defender' ] }, 'The Event': { 'words': [ 'the', 'event' ] }, 'The Exes': { 'words': [ 'the', 'exes' ] }, 'The F Word ': { 'words': [ 'tfw' ] }, 'The F Word': { 'words': [ 'the', 'f', 'word' ] }, 'The Finder': { 'words': [ 'the', 'finder' ] }, 'The Firm': { 'words': [ 'the', 'firm' ] }, 'The Flash': { 'words': [ 'the', 'flash' ] }, 'The Following': { 'words': [ 'the', 'following' ] }, 'The Forgotten': { 'words': [ 'the', 'forgotten' ] }, 'The Fosters': { 'words': [ 'the', 'fosters' ] }, 'The Gates': { 'words': [ 'the', 'gates' ] }, 'The Glades': { 'words': [ 'the', 'glades' ] }, 'The Goldbergs': { 'words': [ 'the', 'goldbergs' ] }, 'The Good Guys': { 'words': [ 'the', 'good', 'guys' ] }, 'The Good Wife': { 'words': [ 'the', 'good', 'wife' ] }, 'The Hard Times Of RJ Berger': { 'words': [ 'the', 'hard', 'times', 'of', 'r', 'j', 'berger' ] }, 'The Intern': { 'words': [ 'the', 'intern' ] }, 'The Killing': { 'words': [ 'the', 'killing' ] }, 'The Last Ship': { 'words': [ 'the', 'last', 'ship' ] }, 'The Listener': { 'words': [ 'the', 'listener' ] }, 'The Lottery': { 'words': [ 'the', 'lottery' ] }, 'The Lying Game': { 'words': [ 'the', 'lying', 'game' ] }, 'The Marriage Ref': { 'words': [ 'the', 'marriage', 'ref' ] }, 'The McCarthy\'s': { 'words': [ 'the', 'mccarthys' ] }, 'The Mentalist': { 'words': [ 'the', 'mentalist' ] }, 'The Michael J Fox Show': { 'words': [ 'the', 'michael', 'j', 'fox', 'show' ] }, 'The Middle': { 'words': [ 'the', 'middle' ] }, 'The Millers': { 'words': [ 'the', 'millers' ] }, 'The Mindy Project': { 'words': [ 'the', 'mindy', 'project' ] }, 'The Missing': { 'words': [ 'the', 'missing' ] }, 'The Mob Doctor': { 'words': [ 'the', 'mob', 'doctor' ] }, 'The Mysteries Of Laura': { 'words': [ 'the', 'mysteries', 'of', 'laura' ] }, 'The Neighbors': { 'words': [ 'the', 'neighbors' ] }, 'The New Adventures Of Old Christine': { 'words': [ 'new', 'adventures', 'old', 'christine' ] }, 'The New Normal': { 'words': [ 'the', 'new', 'normal' ] }, 'The Newsroom': { 'words': [ 'the', 'newsroom' ] }, 'The Nine Lives Of Chloe King': { 'words': [ 'the', 'nine', 'lives', 'chloe', 'king' ] }, 'The Office': { 'words': [ 'the', 'office', 'u?s?' ] }, 'The Originals': { 'words': [ 'the', 'originals' ] }, 'The Paradise': { 'words': [ 'the', 'paradise' ] }, 'The Paul Reiser Show': { 'words': [ 'the', 'paul', 'r[ie]{2}ser', 'show' ] }, 'The Playboy Club': { 'words': [ 'the', 'playbody', 'club' ] }, 'The Protector': { 'words': [ 'the', 'protector' ] }, 'The Real L Word': { 'words': [ 'the', 'real', 'l', 'word' ] }, 'The River': { 'last': True, 'words': [ 'the', 'river' ] }, 'The Sarah Jane Adventures': { 'words': [ 'sarah', 'jane', 'adventures' ] }, 'The Secret Circle': { 'words': [ 'the', 'secret', 'circle' ] }, 'The Secret Life Of The American Teenager': { 'words': [ 'secret', 'life', 'american', 'teenager' ] }, 'The Simpsons': { 'words': [ 'the', 'simpsons' ] }, 'The Strain': { 'words': [ 'the', 'strain' ] }, 'The Tomorrow People': { 'words': [ 'the', 'tomorrow', 'people' ] }, 'The Tudors': { 'words': [ 'the', 'tudors' ] }, 'The Tunnel': { 'words': [ 'the', 'tunnel' ] }, 'The Vampire Diaries': { 'words': [ 'the', 'vampire', 'diaries' ] }, 'The Village': { 'words': [ 'the', 'village' ] }, 'The Voice': { 'words': [ 'the', 'voice' ] }, 'The Walking Dead': { 'words': [ 'the', 'walking', 'dead' ] }, 'The Whole Truth': { 'words': [ 'the', 'whole', 'truth' ] }, 'The X Factor': { 'words': [ 'the', 'x', 'factor' ] }, 'This Is Not My Life': { 'words': [ 'this', 'is', 'not', 'my', 'life' ] }, 'Three Rivers': { 'words': [ 'three', 'rivers' ] }, 'Through The Wormhole': { 'words': [ 'through', 'wormhole' ] }, 'Thundercats': { 'words': [ 'thundercats' ] }, 'Til Death': { 'words': [ 'til', 'death' ] }, 'Top Chef Masters': { 'words': [ 'top', 'chef', 'masters' ] }, 'Top Chef': { 'words': [ 'top', 'chef' ] }, 'Torchwood': { 'words': [ 'torchwood' ] }, 'Total Divas': { 'words': [ 'total', 'divas' ] }, 'Touch': { 'words': [ 'touch' ] }, 'Traffic Light': { 'words': [ 'traffic', 'light' ] }, 'Trauma': { 'words': [ 'trauma' ] }, 'Treme': { 'words': [ 'treme' ] }, 'Trophy Wife': { 'words': [ 'trophy', 'wife' ] }, 'Truckers': { 'words': [ 'truckers' ] }, 'True Blood': { 'words': [ 'true', 'blood' ] }, 'True Detective': { 'words': [ 'true', 'detective' ] }, 'True Justice': { 'words': [ 'true', 'justice' ] }, 'True North': { 'words': [ 'true', 'north' ] }, 'Turn': { 'words': [ 'turn' ] }, 'Two And A Half Men': { 'words': [ 'two', 'and', 'a', 'half', 'men' ] }, 'Tyrant': { 'words': [ 'tyrant' ] }, 'Ugly Americans': { 'words': [ 'ugly', 'americans' ] }, 'Ugly Betty': { 'words': [ 'ugly', 'betty' ] }, 'Uncle': { 'last': True, 'words': [ 'uncle' ] }, 'Under The Dome': { 'words': [ 'under', 'the', 'dome' ] }, 'Undercover Boss': { 'words': [ 'undercover', 'boss' ] }, 'Undercovers': { 'words': [ 'undercovers' ] }, 'Underemployed': { 'words': [ 'underemployed' ] }, 'Unforgettable': { 'words': [ 'unforgettable' ] }, 'United States Of Tara': { 'words': [ 'united', 'states', 'of', 'tara' ] }, 'Unsupervised': { 'words': [ 'unsupervised' ] }, 'Up All Night': { 'words': [ 'up', 'all', 'night' ] }, 'Utopia': { 'words': [ 'utopia' ] }, 'V': { 'words': [ 'v', '2009' ] }, 'Veep': { 'words': [ 'veep' ] }, 'Vegas': { 'words': [ 'vegas' ] }, 'Vikings': { 'words': [ 'vikings' ] }, 'Warehouse 13': { 'words': [ 'warehouse', '13' ] }, 'We Are Men': { 'words': [ 'we', 'are', 'men' ] }, 'Web Therapy': { 'words': [ 'web', 'therapy' ] }, 'Weeds': { 'words': [ 'weeds' ] }, 'Welcome To The Family': { 'words': [ 'welcome', 'to', 'the', 'family' ] }, 'White Collar': { 'words': [ 'white', 'collar' ] }, 'Whitney': { 'words': [ 'whitney' ] }, 'Wilfred': { 'words': [ 'wilfred' ] }, 'Witches Of East End': { 'words': [ 'witches', 'of', 'east', 'end' ] }, 'Working Class': { 'words': [ 'working', 'class' ] }, 'You\'re The Worst': { 'words': [ 'youre', 'the', 'worst' ] }, 'Z Nation': { 'words': [ 'z', 'nation' ] }, 'Zero Hour': { 'words': [ 'zero', 'hour' ] }, DATEFMT + ' - WWE Friday Night Smackdown.{ext}': { 'words': [ 'wwe', 'smackdown' ], 'extract': [r + '.*\.(?P<ext>(?:avi|mkv|mp4))$' for r in DATES] }, DATEFMT + ' - WWE Saturday Morning Slam.{ext}': { 'words': [ 'wwe', 'saturday', 'morning', 'slam' ], 'extract': [r + '.*\.(?P<ext>(?:avi|mkv|mp4))$' for r in DATES] }, DATEFMT + ' - WWE Monday Night Raw.{ext}': { 'words': [ 'wwe', 'raw' ], 'extract': [r + '.*\.(?P<ext>(?:avi|mkv|mp4))$' for r in DATES] }, DATEFMT + ' - WWE Main Event.{ext}': { 'words': [ 'wwe', 'main', 'event' ], 'extract': [r + '.*\.(?P<ext>(?:avi|mkv|mp4))$' for r in DATES] }, DATEFMT + ' - iMPACT Wrestling.{ext}': { 'words': [ 'impact', 'wrestling' ], 'extract': [r + '.*\.(?P<ext>(?:avi|mkv|mp4))$' for r in DATES] } } db.drop_all() db.create_all() s = db.session() i = 0 for name,items in SimpleRenameList.items(): if 'extract' in items: r = RegexModel(title=name, extractDefault=False, identifyDefault=True, renameDefault=False, # TODO: This field should be converted into MULTIPLE regexes ... extract=items['extract'], identify=','.join(items['words']), rename=name) else: if 'words' in items: identify = ','.join(items['words']) else: identify = ','.join([word.lower() for word in name.split(' ')]) r = RegexModel(title=name, extractDefault=True, identifyDefault=True, renameDefault=True, extract=None, identify=identify, rename=name) f = FeedModel(title=name, regexes=[r]) s.add(f) s.commit() i += 1 sys.exit(0) class RenameShow: split = re.compile(r".*\b[s]?(\d{1,2})[ex]?(\d{2})\b.*(mp4|mkv|avi)$", re.IGNORECASE) split2 = re.compile(r".*\b[s]?(\d{1,2})[ex]?(\d{2})e?(\d{2})\b.*(mp4|mkv|avi)$", re.IGNORECASE) _PERCENT = 0 _FORMAT = 1 def __init__(self, identify, replacement, extract = None): expr = '^.*' + '.*'.join(identify) + '.*$' self.identify = re.compile(expr, re.IGNORECASE) self.replacement = replacement if extract is None: self.extract = [self.split, self.split2] self.rfmt = self._PERCENT else: if type(extract) is list: self.extract = [re.compile('^.*' + e) for e in extract] self.rfmt = self._FORMAT else: # TODO: is re? self.extract = re.compile(extract) self.rfmt = self._PERCENT def RenameIf(self, filename): m = self.identify.match(filename) if m: if self.rfmt == self._PERCENT: for e in self.extract: m = e.search(filename) if m: if len(m.groups()) > 3: return self.replacement + ' - S%02dE%02d-E%02d.%s' % tuple(self._normalizeExtracted(m.groups())) else: return self.replacement + ' - S%02dE%02d.%s' % tuple(self._normalizeExtracted(m.groups())) print("echo [INT] Show known, but cannot detect season and episode: " + filename) else: for e in self.extract: m = e.match(filename) if m: return self.replacement.format(**self._normalizeExtracted(m.groupdict())) print("echo [EXT] Show known, but cannot detect season and episode: " + filename) return None def _normalizeExtracted(self, fields): if type(fields) is dict: f = {} for g in fields.keys(): if fields[g].isdigit(): f[g] = int(fields[g]) else: f[g] = fields[g] else: f = [] for g in fields: if g.isdigit(): f.append(int(g)) else: f.append(g) return f def __repr__(self): if self.rfmt == self._FORMAT: e = '[ ' c = '' for p in self.extract: e += '%s%s' % (c, p.pattern) c = ',\n\t\t' e += ' ]' else: e = self.extract.pattern return "{\n\t'extract': %s,\n\t'identify': %s,\n\t'replacement': %s,\n\t'rfmt': %s\n}" % \ ( e, self.identify.pattern, self.replacement, self.rfmt ) RenameList = [] # Create the simple rename regex's for name,items in SimpleRenameList.items(): #exp = '^.*' if 'last' not in items or items['last'] == False: #for n in items['words']: # exp += n + '.*' #exp += '(?:.*20[01][0-9])?.*?(?!264|720p|187)s?(\d?\d)(?:e|x)?(\d\d).*(avi|mkv|mp4)$' if 'extract' in items: obj = RenameShow(items['words'], name, items['extract']) else: obj = RenameShow(items['words'], name) RenameList.append(obj) # Create the simple rename regex's that are required to be after the # others for name,items in SimpleRenameList.items(): if 'last' in items and items['last'] == True: if 'extract' in items: obj = RenameShow(items['words'], name, items['extract']) else: obj = RenameShow(items['words'], name) RenameList.append(obj) # Execute the renames testing = len(sys.argv) > 1 for e in ('*.mp4', '*.avi', '*.mkv'): for f in glob.glob(e): found = False for r in RenameList: o = r.RenameIf(f) if o: found = True if f != o: if testing: break print('mv "%s" "%s"' % (f, o)) break if not found: if testing: print('Unknown: {0}'.format(f)) else: print('echo "Unknown: {0}"'.format(f)) TODO=""" Table Shows: id (unique), title (final name of converted show title, overridable) Table Identify: show_id (from Shows), sequence, regex?, criteria Table Extract: show_id, sequence Table Rename: show_id, sequence """
Vintage Weiss Red Pink AB Crystal Rhinestone Butterfly Brooch Pin is simply stunning. This gorgeous butterfly is set with faceted marquis and round prong set ruby red and pink colored rhinestones. Exceptional condition, all stones have superb color and make this a magnificent piece. Signed Weiss on a cartouche. Brooch measures 2 1/4" tall and 2 1/2" wide and weighs 23 grams.
"""Test max queue size limits.""" from multiprocessing import Process import datetime import os import signal import time import pytest from tasktiger import Task, Worker from tasktiger.exceptions import QueueFullException from .config import DELAY from .tasks import decorated_task_max_queue_size, simple_task, sleep_task from .test_base import BaseTestCase from .utils import external_worker class TestMaxQueue(BaseTestCase): """TaskTiger test max queue size.""" def test_task_simple_delay(self): """Test enforcing max queue size using delay function.""" self.tiger.delay(simple_task, queue='a', max_queue_size=1) self._ensure_queues(queued={'a': 1}) # Queue size would be 2 so it should fail with pytest.raises(QueueFullException): self.tiger.delay(simple_task, queue='a', max_queue_size=1) # Process first task and then queuing a second should succeed Worker(self.tiger).run(once=True, force_once=True) self.tiger.delay(simple_task, queue='a', max_queue_size=1) self._ensure_queues(queued={'a': 1}) def test_task_decorated(self): """Test max queue size with decorator.""" decorated_task_max_queue_size.delay() self._ensure_queues(queued={'default': 1}) with pytest.raises(QueueFullException): decorated_task_max_queue_size.delay() def test_task_all_states(self): """Test max queue size with tasks in all three states.""" # Active task = Task(self.tiger, sleep_task, queue='a') task.delay() self._ensure_queues(queued={'a': 1}) # Start a worker and wait until it starts processing. worker = Process(target=external_worker) worker.start() time.sleep(DELAY) # Kill the worker while it's still processing the task. os.kill(worker.pid, signal.SIGKILL) self._ensure_queues(active={'a': 1}) # Scheduled self.tiger.delay( simple_task, queue='a', max_queue_size=3, when=datetime.timedelta(seconds=10), ) # Queued self.tiger.delay(simple_task, queue='a', max_queue_size=3) self._ensure_queues( active={'a': 1}, queued={'a': 1}, scheduled={'a': 1} ) # Should fail to queue task to run immediately with pytest.raises(QueueFullException): self.tiger.delay(simple_task, queue='a', max_queue_size=3) # Should fail to queue task to run in the future with pytest.raises(QueueFullException): self.tiger.delay( simple_task, queue='a', max_queue_size=3, when=datetime.timedelta(seconds=10), )
Home >> Awards >> Past >> Think! This Web site has logic test questions, links to opinion and editorial news pages, Letters to the Editors from English newspapers around the world, and resources to discussion groups and pages. The main page uses a blue background, minimal black & white graphics, and dividers for its layout. Subsequent pages use lighter backgrounds and are mainly text-based. The links are generally found as hypertext in the middle of paragraphs and are easy to locate. Each page has a link back to the home page. This site is for high school or older students though some of it could be used in middle school provided the teacher has reviewed the material beforehand. The author provides links to a variety of editorial and opinion news columns and original material. Much of the politics discussed concerns the Canadian government but the editorials come from newspapers in Canada and from around the world. The Letters to the Editor cover Canada and the United States. The additional outside resources include politics, social issues, religion, and technology. This Web site has material that may be considered controversial by some readers (but that is sort of the point to the site) and so all material should be viewed by teachers first. This would be a good site for philosophy, current events, ethics, and journalism classes. A very interesting site that will make its readers "Think".
from copy import deepcopy from nose.tools import assert_raises, eq_, ok_ from six import b from lxmlbind.api import List, of, tag from lxmlbind.tests.test_person import Person @tag("person-list") @of(Person) class PersonList(List): """ Example using typed list. """ pass def test_person_list(): """ Verify list operations. """ person1 = Person() person1.first = "John" person2 = Person() person2.first = "Jane" # test append and __len__ person_list = PersonList() eq_(len(person_list), 0) person_list.append(person1) eq_(len(person_list), 1) eq_(person1._parent, person_list) person_list.append(person2) eq_(len(person_list), 2) eq_(person2._parent, person_list) eq_(person_list.to_xml(), b("""<person-list><person type="object"><first>John</first></person><person type="object"><first>Jane</first></person></person-list>""")) # noqa # test that append is preserving order person1_copy = deepcopy(person1) person2_copy = deepcopy(person2) person_list_reverse = PersonList() person_list_reverse.append(person2_copy) person_list_reverse.append(person1_copy) eq_(person_list[0], person_list_reverse[1]) eq_(person_list[1], person_list_reverse[0]) eq_(person_list_reverse.to_xml(), b"""<person-list><person type="object"><first>Jane</first></person><person type="object"><first>John</first></person></person-list>""") # noqa # test that append is preserving order person1_copy = deepcopy(person1) person2_copy = deepcopy(person2) person_list_reverse = PersonList() person_list_reverse.append(person2_copy) person_list_reverse.append(person1_copy) eq_(person_list[0], person_list_reverse[1]) eq_(person_list[1], person_list_reverse[0]) eq_(person_list_reverse.to_xml(), b"""<person-list><person type="object"><first>Jane</first></person><person type="object"><first>John</first></person></person-list>""") # noqa # test __getitem__ eq_(person_list[0].first, "John") eq_(person_list[0]._parent, person_list) eq_(person_list[1].first, "Jane") eq_(person_list[0]._parent, person_list) # test __iter__ eq_([person.first for person in person_list], ["John", "Jane"]) ok_(all([person._parent == person_list for person in person_list])) # test __delitem__ with assert_raises(IndexError): del person_list[2] del person_list[1] eq_(len(person_list), 1) eq_(person_list.to_xml(), b("""<person-list><person type="object"><first>John</first></person></person-list>""")) # test __setitem__ person_list[0] = person2 eq_(person_list.to_xml(), b("""<person-list><person type="object"><first>Jane</first></person></person-list>"""))
You see Pareto’s Principle applied to sales all the time — the top 20% of a sales force produces 80% of a company’s revenues and margins — and it’s applicable in a variety of sectors. In B2B contexts, for example, rep performance in similar territories often varies by 300% between top and bottom quintiles, and in retail stores selling productivity typically varies by a factor of three to four. So it’s no surprise that a company’s usual response to stalled growth is to hire more stars. There are a few problems with the hire-stars approach, however. First, there are only so many stars to go around since everyone is fighting over the same candidates. Second, even if you do manage to hire stars, their unique skill sets may not be easily portable. Research indicates that there’s good chance that a star at Company A won’t be a star — or even productively relevant — at Company B. The third reason is simple math. Even though 80% of sales may currently come from 20% of reps, incremental improvements in the majority’s performance will have, in the aggregate, a much bigger impact on growth than stars do. We’re not arguing that stars don’t matter, because they definitely do. But, at the same time companies must do more than rely on stars if they want to improve their overall sales performance. Companies that have adopted a subscription-as-service model (SaaS) are a great example. In the early years of a SaaS venture, stars typically generate the bulk of revenues, and they are often revered and feared internally for the relationships and power they wield. But, as the venture matures, and they continue to close a few big annual deals, they can limit growth since the SaaS model requires higher volumes. Although it’s easier than ever to create a SaaS business, it’s also harder to scale one. There’s a lot of competition, which keeps a lid on prices and increases customer acquisition costs. A recent survey of 159 SaaS firms with at least $2.5 million in revenues found that almost 55% were spending more than a dollar to get a dollar in annual contract value. It’s also a tough talent market, especially in sales. If companies want to scale, they need to improve their sales processes, and this is especially true of SaaS businesses. It is as important as the products and services they sell and the customers they sell them to, and it’s a key to competitive advantage. Understand the sales tasks. When it comes to sales effectiveness, managers need to consider the tasks that reps must perform, not just their personalities and generic selling skills. These tasks will depend on your company’s strategy, the customers targeted by that strategy, and the business model you’ve put in place to acquire and retain those customers. Consider a SaaS service such as file sharing or various communications tools such as collaboration or meeting software. These applications aren’t typically mission-critical for customers, and are sold at relatively low monthly subscription prices. Buyers can gather a lot of pre-sale information via an online search, which allows them to act more quickly and decisively. On the seller’s end, “dialing for dollars” is paramount. They conduct online demos and provide prospects with a semi-customized proposal with a few clicks on the website to make the initial sale. But scaling this type of business typically requires “land and expand” sales tasks such as up-sells (getting the customer to purchase a premium version of the product) and cross-sells (getting the initial customer(s) to provide positive referrals to others in that organization). For example, ScriptLogic, which sold simple IT diagnostic tools to system administrators in the IT departments of small and mid-sized companies, built a good business with this sales approach and a “Point, Click, Done” value proposition. A SaaS platform service such as CRM or MAS (Marketing Automation Service), on the other hand, requires sophisticated integration to install annual or multi-year contracts. This is a complex initial sale with a longer selling cycle that is harder to do online or by phone. To add to that, reps often have to involve the vendor’s engineers in the selling process. Reps selling CRM services have vastly different tasks than reps selling communication tools. They must focus on landing renewals, increasing price through new functionalities and premium packages sold to different decision makers, and minimizing customer churn. They must also deal with a different decision-making process and budgeting procedures at accounts. The same approach which served ScriptLogic so well in SMB was not effective in selling its products to enterprise accounts, and ScriptLogic was eventually acquired by Quest Software, which employed a very different sales approach in the enterprise segment. It’s also important to keep in mind that sales tasks typically change over the course of a product-market life cycle. Generally, customer education and applications development are often key tasks in early stages. But as the market develops and standards emerge, sales people spend more time selling against functionally-equivalent brands or developing third-party relationships. If your sales process doesn’t keep pace with these changes, strategy execution and growth will falter. Match your sales process and resources to the buying process. Most sales organizations spend a lot of time and money tracking progress (or not) through their sales “funnels.” But selling is always more about the buyer than the seller, and most customer buying journeys resemble a meandering path rather than a progressively tapering funnel. B2B buyers, for example, tend to work through four parallel streams to make a purchase decision. So it’s important to understand where customers are in their journeys and how to interact with them appropriately at a given stage. With SaaS, the initial stage usually starts when the potential customer recognizes a fixable problem or opportunity. The seller can help to trigger that recognition in any of a number of ways, including starting a content marketing or SEO campaign to generate inbound leads, cultivating referrals from existing customers, making sales calls, planning conferences, sending emails to build awareness, and using social media to generate word of mouth. Subsequent stages again depend on buyer behavior and strategy. Most SaaS businesses have three tiers: a self-serve tier that allows for trial evaluation, a second tier that allows a single or departmental decision maker to engage and experience, and a third tier that requires selling to multiple stakeholders at the customer. Although some stars can navigate across all tiers, most reps can’t. So, in order to optimize the productivity of your sales force, you must determine where in the process different reps should get involved. Often, high-velocity inside sales reps are productive at the lower tier but counter-productive at higher tiers, which involve a more complex, cross-functional decision-making unit. Use tools to turn data into information. Considering the average U.S. company already has more data in its CRM system than in the entire Library of Congress, you probably feel overwhelmed by Big Data. That’s why it’s important to keep in mind that the role of data is to help you make better decisions, and in order to separate signal from noise, you need to know what you are measuring and use the right tools to measure it. Think about forecasting. Most firms put their pipeline information into a CRM either weekly or monthly and then review the volume and value of leads in that pipeline. In order to forecast for the following month or quarter, they typically extrapolate future performance from that snapshot: “Bob did $200,000 in sales last quarter, so let’s budget him for $250,000 next quarter,” and so on. But buying streams, especially for SaaS, are more like a motion picture than a snapshot, which means you should be measuring flows such as “what is Bob’s ratio of Monthly Recurring Revenue to Sales Qualified Leads (SQL)?” or “what is Sally’s ratio of Commits vs. SQLs?” These questions will inform a big decision: hire more people like Bob or find out what Sally is doing right. These categories will help you separate signal from noise. Volume Data: metrics that track volume by tracking Wins from number of Marketing Qualified Leads (MQLs) and SQLs. Conversion Data: ratios that track, for instance, how many MQLs result in SQLs. Opportunity Costs: extrapolations across multiple metrics. For example, you may compare the cost and Monthly Recurring Revenue generated by a marketing campaign in 30 days versus alternative uses of that money, the customer acquisition costs of your online versus field-sales team, up-sell and churn percentages, and so on. Many sales efforts need this process because business now changes often and fast. It’s true that any process is only as good as the people managing that process. But hiring Sales Operations or other “data analysts” without an iterative process in place is a recipe for frustration and expensive failure. And failure doesn’t scale. Conversely, make sure not to follow your process myopically or in a rote manner. Remember to look up: there are stars. But also remember that you don’t need to move everyone to the 90th percentile. Moving up a quartile would be a big deal, and that’s the role of a relevant sales process. Jacco van der Kooij is the founder and CEO of Winning By Design, a firm that works with early-stage companies on sales and other go-to-market issues. He is also the co-author, with Fernando Pizarro, of Blueprints for a SaaS Sales Organization.
''' Support for a tag that allows skipping over functions while debugging. ''' import linecache import re from _pydevd_bundle.pydevd_constants import dict_contains # To suppress tracing a method, add the tag @DontTrace # to a comment either preceding or on the same line as # the method definition # # E.g.: # #@DontTrace # def test1(): # pass # # ... or ... # # def test2(): #@DontTrace # pass DONT_TRACE_TAG = '@DontTrace' # Regular expression to match a decorator (at the beginning # of a line). RE_DECORATOR = re.compile(r'^\s*@') # Mapping from code object to bool. # If the key exists, the value is the cached result of should_trace_hook _filename_to_ignored_lines = {} def default_should_trace_hook(frame, filename): ''' Return True if this frame should be traced, False if tracing should be blocked. ''' # First, check whether this code object has a cached value ignored_lines = _filename_to_ignored_lines.get(filename) if ignored_lines is None: # Now, look up that line of code and check for a @DontTrace # preceding or on the same line as the method. # E.g.: # #@DontTrace # def test(): # pass # ... or ... # def test(): #@DontTrace # pass ignored_lines = {} lines = linecache.getlines(filename) i_line = 0 # Could use enumerate, but not there on all versions... for line in lines: j = line.find('#') if j >= 0: comment = line[j:] if DONT_TRACE_TAG in comment: ignored_lines[i_line] = 1 #Note: when it's found in the comment, mark it up and down for the decorator lines found. k = i_line - 1 while k >= 0: if RE_DECORATOR.match(lines[k]): ignored_lines[k] = 1 k -= 1 else: break k = i_line + 1 while k <= len(lines): if RE_DECORATOR.match(lines[k]): ignored_lines[k] = 1 k += 1 else: break i_line += 1 _filename_to_ignored_lines[filename] = ignored_lines func_line = frame.f_code.co_firstlineno - 1 # co_firstlineno is 1-based, so -1 is needed return not ( dict_contains(ignored_lines, func_line - 1) or #-1 to get line before method dict_contains(ignored_lines, func_line)) #method line should_trace_hook = None def clear_trace_filter_cache(): ''' Clear the trace filter cache. Call this after reloading. ''' global should_trace_hook try: # Need to temporarily disable a hook because otherwise # _filename_to_ignored_lines.clear() will never complete. old_hook = should_trace_hook should_trace_hook = None # Clear the linecache linecache.clearcache() _filename_to_ignored_lines.clear() finally: should_trace_hook = old_hook def trace_filter(mode): ''' Set the trace filter mode. mode: Whether to enable the trace hook. True: Trace filtering on (skipping methods tagged @DontTrace) False: Trace filtering off (trace methods tagged @DontTrace) None/default: Toggle trace filtering. ''' global should_trace_hook if mode is None: mode = should_trace_hook is None if mode: should_trace_hook = default_should_trace_hook else: should_trace_hook = None return mode
Virgil van Dijk and Georginio Wijnaldum have played prominent roles for Liverpool this season. "Thursday night I had a tablet from the doctor for my knee", said Wijnaldum, quoted by the Liverpool Echo. Fabinho felt [something] a little bit; it will not be a problem, but there was no reason for him to be out today. "The manager called me and said 'Do you think you can play?' I said I was as positive as I can". In the morning I was still weak and had diarrhoea. "But even at half-time I had to run off to get to the toilet". I was like 'Oh no!'. "I think that City dominated the game", Kluivert continued. "It's not so much about how well you play, it's getting the three points". "You can see the improvement". It is hard [competing with City] but we already knew from the beginning that it would be hard. "But we're pleased to get a clean sheet and to get the three points was the main thing, obviously". "We have to see how it is going to work". In that time Neville has won eight Premier League titles for Manchester United and knows what it takes to come out on top in a tense title race. The 26-year-old said that Liverpool, who were nine points ahead of City at one stage in December, were not short of belief going into the home stretch of the season.
#! /usr/bin/env python3 import textwrap import argparse import numpy as np import random import math import time proginfo = textwrap.dedent('''\ This python script compares the efficiencies of different schemes implementing the periodic boundary conditions for Ising model problem. Author: JQ e-mail: gohjingqiang [at] gmail.com Date: 29-10-2014 ''') parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=proginfo) parser.add_argument('-l', '--L', type=int, default=10, help='L, the number of spin along the edges of a \ 2D square lattice. Default (10)') parser.add_argument('-n', '--num', type=int, default=100, help='The total number of Monte Carlo sweeps. \ Default (100)') args = parser.parse_args() start = time.time() # Initialize the system L = args.L print(L) spin = np.ones((L, L)) # 2D square lattice, spin up T = 300 # 300 K, for temperature # Method 3, using if-else conditions random.seed(10) for k in range(args.num): for i in range(L): for j in range(L): # Determine the displacement along i-dimension if i == 0: di1 = L - 1 di2 = 1 elif i == L - 1: di1 = L - 2 di2 = 0 else: di1 = i - 1 di2 = i + 1 # Determine the displacement along j-dimension if j == 0: dj1 = L - 1 dj2 = 1 elif j == L - 1: dj1 = L - 2 dj2 = 0 else: dj1 = j - 1 dj2 = j + 1 # eflip, the change in the energy of system if we flip the # spin[i, j]. eflip depends on the configuration of 4 neighboring # spins. For instance, with reference to spin[i, j], we should evaluate # eflip based on spin[i+1, j], spin[i-1, j], spin[i, j+1], spin[i, j-1] eflip = 2*spin[i, j]*( spin[di1, j] + # -1 in i-dimension spin[di2, j] + # +1 in i-dimension spin[i, dj1] + # -1 in j-dimension spin[i, dj2] # +1 in j-dimension ) # Metropolis algorithm if eflip <= 0.0: spin[i, j] = -1.0*spin[i, j] else: if (random.random() < math.exp(-1.0*eflip/T)): spin[i, j] = -1.0*spin[i, j] end = time.time() print(spin) print(end - start)
A warranty can provide important peace of mind for vehicle owners by covering the cost of the maintenance and repairs required to keep the vehicle on the road. At SMC Cars UK we can help provide a range of comprehensive vehicle warranty options. Please contact us today for more information.
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2016 Zomboided # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # This module allows some limited interaction with the service via # a set of commands import xbmcaddon import xbmcvfs import string from libs.common import setAPICommand, clearAPICommand, getAPICommand from libs.utility import debugTrace, errorTrace, infoTrace, newPrint, getID # Get the first argument which will indicate the connection that's being dealt with command = sys.argv[1] lcommand = command.lower() debugTrace("Entered api.py with parameter " + command) if not getID() == "": if lcommand == "disconnect": setAPICommand("Disconnect") elif lcommand == "cycle": setAPICommand("Cycle") elif lcommand == "fake": setAPICommand("Fake") elif lcommand == "real": setAPICommand("Real") elif lcommand == "pause": setAPICommand("Pause") elif lcommand == "restart": setAPICommand("Restart") elif lcommand == "reconnect": setAPICommand("Reconnect") elif lcommand == "getip": setAPICommand("GetIP") elif lcommand.startswith("connect"): connection = command[8:].strip(' \t\n\r') if connection.isdigit(): c = int(connection) addon = xbmcaddon.Addon(getID()) # Adjust the 11 below to change conn_max if c > 0 and c < 11: connection = addon.getSetting(str(c) + "_vpn_validated") if not connection == "": setAPICommand(connection) else: errorTrace("api.py", "Connection requested, " + str(c) + " has not been validated") else: errorTrace("api.py", "Invalid connection, " + str(c) + " requested") else: if xbmcvfs.exists(connection): setAPICommand(connection) else: errorTrace("api.py", "Requested connection, " + connection + " does not exist") else: errorTrace("api.py", "Unrecognised command: " + command) else: errorTrace("api.py", "VPN service is not ready") debugTrace("-- Exit api.py --")
And it’s goddamn delicious. All us diabetics out there are foaming at the mouth. Bravo. SRQNews.com investigators have uncovered what may be the biggest revelation and human satisfaction since the lawnmower. According to nearly a decade of research, if you are attractive, fit, have a solid personality and look like Channing Tatum it’s not that hard to get laid for free. This is life changing for the blessed, good looking fuckers out there.
#def countdown(n): # if n<= 0: # print "Blastoff!" # else: # print n # countdown(n-1) #def countup(n): # if n == 10: # print "Blastoff!" # else: # print n # countup(n+1) #def main(): # countdown(10) # countup(-5) # #main() # #def count_up_from(start, stop): # if start> stop: # print "Blastoff!" # else: # print start # count_up_from(start + 1,stop) #def count_down_from(start, stop): # if start< stop: # print "Blastoff!" # else: # print start # count_down_from(start - 1, stop) #def main(): # count_up_from(-1,20) # count_down_from(20, -1) # #main() # # #Adder #def adder(Number, total): # Number = raw_input("Next number: ") # if Number == '': # print "The running total is: " + str(total) + "." # else: # total = total + float(Number) # print "Running total: " + str(total) + "." # return adder(Number, total) # #adder("Number", 0) # #Biggest #def Biggest(Numbers): # Numeral = raw_input("Next Number: ") # if Numeral == '': # print str(Numbers) # else: # if Numbers > float(Numeral): # Biggest(Numbers) # else: # Biggest(float(Numeral)) #Biggest(-float("inf")) #Smallest #def Smallest(Numbers): # Numeral = raw_input("Next Number: ") # if Numeral == '': # print str(Numbers) # else: # if Numbers < float(Numeral): # Smallest(Numbers) # else: # Smallest(float(Numeral)) # #Smallest(float("inf")) #Power def pow_(x,n): if n == 0: return 1 else: return x * pow_(x, n-1) def main(): print pow_(2,4) main()
The shortest of our films and the first one we ever made, but by far our most dramatic. With powerful imagery and a superb orchestral music track, this film is a really engaging watch, and, to be honest, just a little bit scary! Watch in horror as dead and dying cars are torn apart like paper, and tossed high in the air by mechanical grabbers. Their cruel and powerful claws show no more mercy to their victims than a cat would show to a hapless mouse. Alongside the cars in this graveyard of metalwork you will also see fridges, radiators, pipes, and washing machines - anything that can be melted down and recycled into something new. With surrounding materials like woods and plastics stripped away, the old metal is dropped onto a conveyor belt. At its end, a powerful grinding and shredding machine tears the metal into thousands of shards. Hot and steaming from their experience in the shredder, the fragments emerge to be magnetically sorted. As an iron-based metal, steel is known as 'ferrous', which means it can be picked up by magnets. This leaves behind the more precious non-magnetic, non-ferrous metals such as copper and aluminium. And so it is that the cars and fridges that we started with go towards building two great metal mountains - one ferrous, and the other non-ferrous. Massive digging machines cut the mountains down, and carry their loads to waiting trains; these will take the metal to foundries for further sorting and melting - maybe to make a coke can, a new car or washing machine. Click to add 'Scrap Metal' to your download list.
#!/usr/bin/env python3 from os import walk import os, re, sys, subprocess from datetime import datetime import argparse ### ## Generates a HTML table of eXist-db dist artifacts ### tmp_dir="/tmp/exist-nightly-build/dist" default_build_dir = tmp_dir + "/source" default_output_dir = tmp_dir + "/target" # parse command line arguments parser = argparse.ArgumentParser(description="Generate an index.html table of nightly builds") parser.add_argument("-b", "--git-branch", default="develop", dest="git_branch", help="The git branch to use") parser.add_argument("-u", "--github-repo-url", default="https://github.com/eXist-db/exist", dest="github_repo_url", help="Public URL of the GitHub repo") parser.add_argument("-d", "--build-dir", default=default_build_dir, dest="build_dir", help="The directory containing the eXist-db build") parser.add_argument("-o", "--output-dir", default=default_output_dir, dest="output_dir", help="The directory containing the built eXist-db artifacts") parser.add_argument("-f", "--file-name", default="table.html", dest="filename", help="The name for the generated HTML file") args = parser.parse_args() print(f"""Generating {args.output_dir}/{args.filename}...""") # find all files existFiles = [] for (dirpath, dirnames, filenames) in walk(args.output_dir): for filename in filenames: if "exist" in filename and "SNAPSHOT" in filename and ".sha256" not in filename: existFiles.append(filename) # get hashes buildLabelPattern = re.compile("exist-(?:distribution|installer)-[0-9]+\.[0-9]+\.[0-9]+(?:-RC[\-0-9]+)?-SNAPSHOT(?:-(?:win|unix))?\+([0-9]{12,14})\.(?:jar|dmg|tar\.bz2|zip)") buildLabels = set() for name in existFiles: groups = buildLabelPattern.match(name).groups() buildLabels.add(groups[0]) # start writing table f = open(args.output_dir + "/" + args.filename, "w") f.write("""<div> <table id="myTable" class="tablesorter"> <thead> <tr> <th>Date</th> <th>Build Label</th> <th>Git Hash</th> <th>Downloads</th> </tr> </thead> <tbody> """) # iterate over hashes fileExtPattern = re.compile(".+\.(jar|dmg|tar\.bz2|zip)$") labelPattern = re.compile("exist-(?:distribution|installer)-[0-9]+\.[0-9]+\.[0-9]+(?:-RC[\-0-9]+)?-SNAPSHOT(?:-(?:win|unix))?\+([0-9]{12,14})\.(?:jar|dmg|tar\.bz2|zip)$") for buildLabel in buildLabels: # group files per download types = {}; recentDate = "" for file in existFiles: if buildLabel in file: groups = fileExtPattern.match(file).groups() types[groups[0]] = file changeDate = datetime.strptime(buildLabel, "%Y%m%d%H%M%S").strftime("%Y-%m-%d") if changeDate > recentDate: recentDate = changeDate gitBeforeDate = datetime.strptime(buildLabel, "%Y%m%d%H%M%S").strftime("%Y-%m-%d %H:%M:%S") gitProcess = subprocess.run(["git", "rev-list", "-1", "--before=\"" + gitBeforeDate + "\"", args.git_branch], cwd=args.build_dir, stdout=subprocess.PIPE, encoding='utf-8', check=True) gitHash = gitProcess.stdout.strip()[:7] labelGroups = labelPattern.match(file).groups() label = labelGroups[0] f.write(f""" <tr> <td>{changeDate}</td> <td>{label}</td> <td><a href="{args.github_repo_url}/commit/{gitHash}">{gitHash}</a></td> <td> <ul> """) for type in types.keys(): f.write(f""" <li><a href="{str(types.get(type))}">{type}</a> ({('%.1f' % (float(os.path.getsize(args.output_dir + "/" + types.get(type))) / (1024 * 1024)))} MB) <a href="{str(types.get(type))}.sha256">SHA256</a></li> """) print(f"""Added {str(types.get(type))}""") f.write(f""" </ul> </tr> </tbody> </table> """) f.write("""<script>$(function(){$("#myTable").tablesorter({sortList : [[0,1]]}); });</script> </div>""") f.close() print("Done.")
witheskin . LeylaBest. ShineBlondie. MaryTheNight. MaxStevensonDirtyxSlavex4uartistdanceHotNikki69 .LovePinkLily69VeronicaSweetXXXShineBlondieartistdance .SileStiaLeylaBestRoseMilleramazinghotassxx .KateAndSellyxxxLeylaBestSquirtyGirlssssbIgbOObtiGhtBody .SunnyElliKimbi4uLunaBBWxoxoBritniLoer .derekfoxTwoSexyGirls4UCassandraAngelLittleSarra .SlimQuentinSelimaKhanDominaXLamazinghotassxx .foxyAMORtsxTHAYLORxMistressLaylaKateAndSellyxxx .ExcellentCpl4UHotNikki69imyourSEXprophetDevilsFuckersx .JonahMercuryyLittleCuteJanenaughtynastycandyts4u .hotchocolategirlMaxStevensonDiamond1AssLittleSarra . dirtyextremefetiZDragZderekfoxKristinaMurr .JASMINsecret4UGentleJasmineMoonBlossomDiamond1Ass .JASMINsecret4UStudBlueEyes4uviphotshowcutebabies2 .VeronicaSweetXXXXXEroticbehaviorLittleCuteJaneUrOnlyLover .naughtynastyArdentJohn1DirtyxSlavex4uLissaRoss .lina12inchxTHAYLORxRebeccaLustnaughtynasty .HotAndHorny4youHotNikki69SunnyElliShineBlondie .BreeBlondecandyllizetxxxBreeBlondeZaraJonez .StudBlueEyes4uAmie20JoyofLoveAmie20 .LexiKaneBreeBlondeTavsanJoyofLove .lina12inch00AmberXartistdanceExoticBarbaraxX .
#!/usr/bin/python # Author: Porter Liu import re import sys import json import string import urllib2 import os.path from poster.encode import MultipartParam from poster.encode import multipart_encode from poster.streaminghttp import register_openers # http://foo.com/allbuilds/iOS-client.123/InHouse_123.ipa configuration_smaple = { "base_url" : "http://foo.com/allbuilds/", "build_pattern" : "iOS-client.(\\d+)", "build_path" : "iOS-client.{bn}/InHouse_{bn}.ipa", "tot_url" : "http://bar.com/tot/", } def show_configuration_file_syntax(): print( configuration_smaple ) # we need one argument for configuration file if len( sys.argv ) != 2: print( 'Usage: ' + sys.argv[0] + ' configuration_file' ) exit( 1 ) # generate build number filename from configuration filename # for instance: foo.json => foo.buildnumber.txt temp1, temp2 = os.path.splitext( os.path.basename( sys.argv[1] ) ) buildnumber_filename = temp1 + '.buildnumber.txt'; # open & load configuration in JSON format try: configurationFile = open( sys.argv[1] ) try: config = json.load( configurationFile ) except Exception, e: print( e ) exit( 1 ) finally: configurationFile.close() except Exception, e: print( e ) exit( 1 ) # verify configuration file for key in configuration_smaple.keys(): if key not in config: print( 'Failed to find "' + key + '" in ' + sys.argv[1] ) show_configuration_file_syntax() exit( 1 ) # # load the last processed build number # build_number = 0 if os.path.exists( buildnumber_filename ): temp = open( buildnumber_filename, 'r' ) build_number = string.atoi( temp.read() ) temp.close() print( 'old build number = ' + str( build_number ) ) # # find out the latest build number # try: remotefile = urllib2.urlopen( config['base_url'] ) data = remotefile.read() remotefile.close() except Exception, e: print( 'failed to access "' + config['base_url'] + '", ' + str( e ) ) exit( 1 ) temp_build_number = build_number pattern = config['build_pattern'] po = re.compile( pattern ) mo = po.findall( data ) if mo: for item in mo: n = string.atoi( item ) if n > temp_build_number: temp_build_number = n print( 'current max build number = ' + str( temp_build_number ) ) if temp_build_number <= build_number: print( 'no new build' ) sys.exit( 0 ) else: build_number = temp_build_number print( 'will use ' + str( build_number ) + ' as build number' ) # # generate package url and download # url = ( config['base_url'] + config['build_path'] ).format( bn = build_number ) print( 'package URL = ' + url ) package_filename = os.path.basename( url ) print( 'package filename = ' + package_filename ) data = None try: remotefile = urllib2.urlopen( url ) data = remotefile.read() remotefile.close() except Exception, e: print( 'failed to access package URL, ' + str( e ) ) sys.exit( 1 ) try: localFile = open( package_filename, 'wb' ) localFile.write( data ) localFile.close() except Exception, e: print( 'failed to create local file, ' + str( e ) ) sys.exit( 1 ) # # upload package file onto TOT # register_openers() #datagen, headers = multipart_encode( { 'file' : open( ipa_filename, 'rb' ), 'changelog' : build_name + '.' + str( build_number ), 'submit' : 'Submit' } ) ipa = MultipartParam.from_file( 'file', package_filename ) ipa.filetype = 'application/octet-stream' changelog = MultipartParam( 'changelog', str( build_number ) ) submit = MultipartParam( 'submit', 'Submit' ) datagen, headers = multipart_encode( [ ipa, changelog, submit ] ) request = urllib2.Request( config['tot_url'] + '/upload.php', datagen, headers ) print urllib2.urlopen( request ).read() # delete the package os.remove( package_filename ) # # save the current build number # temp = open( buildnumber_filename, 'w' ) temp.write( str( build_number ) ) temp.close()
I had soooo much fun photographing baby Zachary at his home in Gravette. He was such a pleasure to photograph and made my job sooo easy. Here he is a chunky little guy at 10 days old, weighing 9.5lbs when born.
# -*- coding: utf-8 -*- # This file is a part of MediaDrop (http://www.mediadrop.net), # Copyright 2009-2014 MediaDrop contributors # For the exact contribution history, see the git revision log. # The source code in this file is dual licensed under the MIT license or # the GPLv3 or (at your option) any later version. # See LICENSE.txt in the main project directory, for more information. from mediadrop.lib.filetypes import VIDEO from mediadrop.lib.players import FileSupportMixin, RTMP from mediadrop.lib.test.pythonic_testcase import * from mediadrop.lib.uri import StorageURI from mediadrop.model import MediaFile class FileSupportMixinTest(PythonicTestCase): def test_can_play_ignores_empty_container(self): class FakePlayer(FileSupportMixin): supported_containers = set(['mp4']) supported_schemes = set([RTMP]) fake_player = FakePlayer() media_file = MediaFile() media_file.container = '' media_file.type = VIDEO uri = StorageURI(media_file, 'rtmp', 'test', server_uri='rtmp://stream.host.example/play') assert_equals('', uri.file.container, message='It is important that the server uri has no container.') assert_equals((True, ), fake_player.can_play([uri])) import unittest def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(FileSupportMixinTest)) return suite
OECD-FAO Agricultural Outlook 2018-2027 – Thrakika Ekkokistiria S.A. This chapter describes the market situation and highlights the latest set of quantitative medium-term projections for world and national cotton markets for the ten-year period 2018-27. World cotton production is expected to grow at a slower pace than consumption during the first few years of the outlook period, reflecting lower prices and releases of global stocks accumulated between 2010 and 2014. India will remain the world’s largest country for cotton production, while the global area devoted to cotton is projected to recover slightly despite a decrease of 3% in China. Processing of raw cotton in China is expected to continue its long-term downward trend, while India will become the world’s largest country for cotton mill consumption. In 2027, the United States remains the world’s main exporter, accounting for 36% of global exports. Cotton prices are expected to be lower than in the base period (2015-17) in both real and nominal terms, as the world cotton price is continuously under pressure due to high stock levels and competition from synthetic fibres. The recovery in the world cotton market continued during the 2017 marketing year following the slight increase in production in 2016, with production reaching 25.6 Mt. Global cotton production recovered by about 11.1% in 2017 due to improved yields and recovered areas. In addition, on-going stock releases helped to stabilise world consumption, although total world stocks remain at a very high level (at 19.2 Mt, still about eight months of world consumption). Production increased in almost all major cotton producing countries, including the People’s Republic of China (hereafter “China”) which recovered by 7% in 2017. Pakistan, the United States, Turkey and India increased production by 24%, 24%, 18% and 9%, respectively due to increases in yields and in the area planted. Global cotton demand increased slightly during the 2017 marketing year to 25.0 Mt. Mill consumption estimates show an increase of 3% (to 5.3 Mt) in India and in a stable 8.0 Mt in China. Mill consumption increased in Viet Nam by 12% and in Bangladesh by 6.9% as Chinese direct investment in mills continued. The increase in Pakistan was 4%. Global cotton trade recovered by 1.0% in 2017 to 8 Mt. Increases in imports by Bangladesh, Pakistan and Viet Nam were insufficient to offset the decline in many countries’ import demand from 2016. China’s cotton support policy has continually narrowed the price gap between domestic and imported cotton, and both cotton prices were moving almost in parallel in 2017. In addition, US exports remained stable at 3.1 Mt from 2016, and Australia’s exports continued to increase by 3% in 2017 due to a recovery in production from 2014. Although the world cotton price is continuously under pressure due to high stock levels and strong competition from synthetic fibres, cotton prices are expected to be relatively stable in nominal terms during the outlook period. This makes cotton less competitive because prices for polyester are significantly lower than both international and domestic cotton prices. During 2018-27, relative stability is expected as government support policies continue to stabilise markets in major cotton-producing countries. However, world cotton prices are expected to be lower than the average in the base period (2015-17) in both real and nominal terms. World production is expected to grow at a slower pace than consumption during the first few years of the outlook period, reflecting the anticipated lower price levels and projected releases of global stocks accumulated between 2010 and 2014. The stock-to-use ratio is expected to be 39% in 2027, which is well below the average of the 2000s of 46%. The global land use devoted to cotton is projected to remain slightly lower than the average in the base period. Global cotton yields will grow slowly as production gradually shifts from relatively high yielding countries, notably China, to relatively low-yielding ones in South Asia and West Africa. World cotton use is expected to grow at 0.9% p.a. as a result of slower economic and population growth in comparison with 2000s, reaching 28.7 Mt in 2027. Consumption in China is expected to fall by 12.5% from the base period to 6.9 Mt in 2027, continuing its downward trend, while India will become the world’s largest country for cotton mill consumption with an increase by 42.2% to 7.5 Mt in 2027. Higher cotton mill consumption by 2027 is also foreseen for Viet Nam, Indonesia, Bangladesh, and Turkey, with consumption increasing by 74%, 45%, 34% and 17% from the base period respectively. It is expected that global cotton trade will grow more slowly compared to previous years. Trade in 2027 is expected, however, to exceed the average of the 2000s. To obtain value-added in the textile industry, there has been a shift in the past several years towards trading cotton yarn and man-made fibres rather than raw cotton, and this is expected to continue. Global raw cotton trade will nevertheless reach 9.4 Mt by 2027, 19% higher than the average of the base period 2015-17. In 2027 the United States remains the world’s largest exporter, accounting for 36% of global exports, 1% point higher in the base period. Brazil’s exports are projected to reach 1.2 Mt in 2027, 0.5 Mt more than in the base period. This makes Brazil the second largest exporter overtaking India. The third largest exporter will be Australia with exports increasing from 0.7 Mt in the base period to 1.0 Mt. Cotton producing countries in Sub-Saharan Africa will increase their exports to 1.6 Mt by 2027. On the import side, China’s imports are expected to slightly grow to 1.2 Mt in 2027 which is still a low level in comparison to those reached during the last decade. Suppressed low domestic consumption and releases of stocks, as well as reduced producer support are behind this development. China’s dominant role in the world cotton market will be significantly challenged as other importing countries emerge. It is projected that imports in Viet Nam and Bangladesh will increase respectively by 0.8 Mt and 0.5 Mt, and Indonesia and Turkey will import 1.0 Mt and 0.8 Mt by 2027 respectively. While continuing increases in farm labour costs and competition for land and other natural resources from alternative crops place significant constraints on growth, higher productivity driven by technological progress and the adoption of better cotton practices, including the use of certified seeds, high density planting systems and short duration varieties. Altogether, this creates significant potential for cotton production to expand in the next decade. While the medium-term prospects are for sustained growth, there may be potential short-term uncertainties in the current outlook period which may result in short-term volatility in demand, supply and prices. A sudden slow-down in the global economy, a sharp drop in trade of global textiles and clothing, competitive prices and quality from synthetic fibres, and changes in government policies are important factors that can affect the cotton market. Cotton prices are expected to be relatively stable in nominal terms especially in the latter half of the projection period, although the world cotton price is continuously under pressure due to high stock levels and competition from synthetic fibres. Cotton markets are expected to stabilise as government support policies continue in major cotton-producing countries during 2018-27. Global cotton stocks grew slightly in 2017, but are expected to decrease to 11 Mt by 2027, which corresponds to five months of world consumption. The stock-to-use ratio is expected to drop to around 40% in 2027; substantially below the 80% observed in the base period. Relative stability is expected in China’s cotton market after the government has been shifting its cotton policy resulting in reduced stock accumulation during the projection period. Note: Cotlook price A index, Midding 1 3/32”.c.f.r. fat Eastern ports (August/July). World production is expected to reach 27.7 Mt in 2027, mainly sustained by yield growth, with an average increase of 1.6% p.a. over the projection period. However, world production is expected to grow at a slower pace than consumption during the first years of the outlook period, reflecting the anticipated lower price levels and projected releases of stocks that were accumulated between 2010 and 2014. Additionally, the Outlook foresees a slight decline in world cotton area in the first two years of the projection period, which is followed by a gradual increase thereafter. The global cotton area is projected to recover throughout the outlook period, despite a 1% decrease in China. The average global cotton yield will grow slowly, as production shares gradually shift from relatively high yielding countries, notably China, to relatively low-yielding areas in South Asia and West Africa. Yield growth in China is expected to slow down from over 3% p.a. over the past decade to 1% p.a. for the next ten years. Cotton producers in China still have high per hectare yields (about twice the world average), but they are realised with relatively labour-intensive technologies. Due to small plots with limited water resource and low mechanization, cotton farmers especially in the eastern provinces face high and rising production costs. The Outlook projections foresee that India will produce 7.9 Mt of cotton by 2027, which is approximately one third of the projected world output. Indian farmers continue to apply new technologies to improve their yield potential. The adoption of genetically modified (GM) cotton in India is part of a shift in practices and technology-use that resulted in more than doubling cotton production between 2003 and the base period. Yields are expected to grow by 1.9% p.a. during 2018-2027, which is above the annual growth rate during 2008-17, due to improved management practices. On the other hand, it is important to note that India’s variability in cotton yield is determined by the monsoon pattern in rain-fed regions. Climate change could affect this pattern and impact cotton yields in the future. Pakistan accounts for the fourth largest share of global production. Projections indicate that Pakistan will produce 2.4 Mt of cotton by 2027. Production will increase by about 1.4% annually, as a result of area expansions and yield improvements. Similarly to Pakistan, India is expected to realise faster growth in the cotton area than in other crops. Production is projected to increase with annual growth rates of about 2.3%. However, in absolute terms, production in Pakistan is lower than in India as it lags considerably behind India in the adoption of GM cotton. African countries – mainly Benin, Mali, Burkina Faso, Côte d’Ivoire and Cameroon – are expected to contribute 2 Mt to world production by 2027, 33% above the base period. It is worth noting that the growth reported in Burkina Faso is taking place simultaneously with a move from GM cotton back to non-GM. GM cotton yielded shorter fibres than the conventional variety thus not allowing for smooth and stable thread that is essential for textile production. Total demand for cotton, which amounted to 24.5 Mt in the base period, is expected to reach 28.7 Mt in 2027. This figure exceeds the 2007 historical consumption record and corresponds to 0.9% p.a. growth over the next ten years. However, this increase is not uniform across the period of analysis. While consumption grows faster than population in the next ten years, consumption on a per capita basis in 2027 is expected to remain below the peaks reached during 2005-07 and 2010 (Figure 10.5). Asia is confirmed as the number one area of the world for cotton consumption, mainly due to cheaper labour, lower electricity costs and weaker environmental regulations. One of the main factors weakening the cotton consumption recovery is severe competition from synthetic fibres. Based on the assumption of relatively low oil prices, polyester prices are projected to remain significantly lower than cotton, which puts downward pressure on cotton markets throughout the projection period. In addition, cotton consumption will be influenced not only by macroeconomic trends but also by evolving tastes and preferences, including the increasing awareness with respect to marine plastic pollution. Scientific studies have demonstrated how a single synthetic garment can shed thousands of synthetic microfibers in a single wash and these microfibers get past the filter systems in treatment plants and end up in rivers and the ocean. Consumption in China is expected to fall by 13% from the base period to 6.9 Mt following the downward trend that started in 2009. China’s share of world cotton consumption is projected to fall to 24% in 2027, from 32% in the base period. As a consequence China loses its position as the largest cotton mill consumer – a position it has maintained since the 1960s – to India. India is expected to consume 7.5 Mt in 2027, increasing its share in total world consumption from 21% in the base period to 26% in 2027. Mill consumption in Pakistan is estimated to increase by 18% over the projection period, while Viet Nam is projected to keep its consumption at high levels. Chinese direct investment in mills might not continue in these countries because local prices are slowly moving closer to global levels behind gradually increasing farm labour costs in these countries for the next decade. Higher cotton mill consumption by 2027 is also foreseen for Bangladesh, Indonesia, Turkey and other Asian countries (mainly Turkmenistan and Uzbekistan). The fastest growth among major consumers is expected in Bangladesh, Viet Nam and Indonesia, where consumption is expected to grow at 3.5%, 2.9% and 2.1% p.a. respectively, as their textile industries are expected to continue the rapid expansion that began in 2010. While Bangladesh had been widely expected to reduce its textile exports after the phase-out of the Multi-Fibre Arrangement (MFA) in 2005, its garment exports and cotton spinning have still flourished. Global cotton trade is expected to follow the ongoing transformation of the world textile industry which began several years ago, mainly driven by rising labour costs, cotton support prices, and incentives to obtain added value in the cotton supply chain. There has been a tendency in recent years to gradually replace raw cotton trade with trade of cotton yarn and man-made fibres. However, global raw cotton trade is expected to recover to 9.4 Mt in 2027, about 19% higher than during the base period, even though this would be still below 10.0 Mt, the average level for 2011-12. The world largest exporter throughout the outlook period is the United States, accounting for 36% of global exports in 2027 (35% in the base period) followed by Brazil and Australia (Figure 10.6). Exports from Brazil will reach 1.2 Mt from 0.8 Mt in the base period. Australia is expected to increase exports by over 2.8% annually to reach 1.0 Mt by 2027. Over the past few years, given its surge in productivity and production, India has become a major player on the world cotton market. However India’s exports are expected to fall to 0.9 Mt in 2027 and the country is expected to account for 9% of the world’s cotton exports while this share was 14% in the base period due to growing domestic uses. Note: Top 5 importers (2007-2016): Bangladesh, China, India, Turkey, Viet Nam. Top 5 exporters (2007-2016): Australia, Brazil, European Union, India, and the United States. Sub-Saharan African countries continue to play a major role as cotton exporters. It is expected that their share in world trade will grow to 18% with exports reaching 1.6 Mt by 2027. However, trade in the region has been volatile in the past few decades. Cotton mill consumption is limited throughout Sub-Saharan Africa and many countries export virtually all their production. With the increases in productivity, in particular through the adoption of bio-tech cotton in this region, production and exports are expected to be 25% and 26% higher respectively in 2027 compared to the base period. The transition in trade also induces changes in the composition of importers in the world cotton economy. Although China lost its position as the world’s largest importer in 2015, over the outlook period its share of world cotton imports will remains stable at about 13%. The projected 1.2 Mt of cotton imports entering China in 2027 would be far smaller than the peak imports of about 5 Mt in 2011. In contrast, Bangladesh and Viet Nam are projected to be the leading importers. By 2027, they are expected to increase their imports by 41% and 69%, accounting for over 40% of world trade. While the medium-term prospects for the world cotton market are stable, there will be potential short-term volatilities in demand, supply and prices that may result in significant short-term uncertainties in the projection period. The demand for raw cotton is derived from the demand for textiles and clothing, which is very sensitive to changes in economic conditions. In the scenario of a sudden slow-down in the global economy, global consumption of textiles and clothing would experience a sharp drop, which would also impact the raw cotton market. As an example, the 2008-09 financial crisis, which caused average global consumption to fall by over 10%, resulted in a 40% reduction of cotton prices. Despite the intention of the governments of Viet Nam, Bangladesh and India to promote and increase production, factors such as limited area, water scarcity and climate change constrain their efforts. Malaysia is actively pursuing a Free Trade Agreement with the European Union. This should increase Malaysia’s textile export to the European Union and subsequently increase domestic consumption of cotton. China’s cotton policies are one of the main sources of uncertainty in the global cotton sector. In particular its stock holdings have an important impact on the world market. Building on the reforms of 2014, China may take further steps to modify its policies in the next decade. This would have important implications for the world market in general, and possibly impact specific industries in partner countries, such as the cotton spinning sector in Viet Nam. Global cotton yields will grow slowly, as production gradually shifts from relatively high yielding countries, notably China leading to significantly higher yields, to relatively lower yielding ones in India and South Asian countries. GM adoption in the United States has reduced the cost of growing cotton, and the adoption of GM varieties specifically targeted to local production conditions in Australia has also increased productivity. In India producers adopted GM crops and updated their management practices. However, average yields remain far below those of many other cotton producers and the GM varieties are very vulnerable against adverse weather conditions, causing other countries to take a more conservative approach to GM adoption. No trade restrictions have yet been applied to cotton fibre, yarn, or other textile products made with GM cotton, but GM adoption has nonetheless been slow in many countries. The recent example of Burkina Faso, where farmers realised that the applied GM varieties had shorter fibres and led to reduced market revenues, leading them to go back to GM free varieties, shows however another level of uncertainty regarding the GM adoption. Future productivity growth in countries with low yields will in general be determined by their adoption of new technologies, including mechanisation and increased input use.
# HedgehogHD - Vector Graphics Platform Game Engine # Copyright (C) 2010 Andrew Clunis <andrew@orospakr.ca> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import logging class Tile(object): '''16x16 Tile instance in a Chunk These are never shared between chunks; they are used only once each. Contains the parameters describing the Collision, graphics Block, and flip properties of a given 16x16 tile in a Chunk. These are not shared by any other Chunks. 64 of which exist in a Chunk, arranged in 8x8. They contain: * horizontal and vertical flip bits * a reference to an artwork tile and collision block (through the index) by Tile ID collision solidity control bits, for the primary and alternate layers (aka, paths) # SSTT YXII IIII IIII ''' def __init__(self, chunk, tile_word): self.chunk = chunk self.alternate_collision_solidity = (tile_word & 0xC000) >> 14 self.normal_collision_solidity = (tile_word & 0x3000) >> 12 self.tile_index = tile_word & 0x3FF self.y_flipped = (tile_word & 0x800) >> 11 self.x_flipped = (tile_word & 0x400) >> 10 if(self.alternate_collision_solidity > 3): logging.error("Impossible alternate collision value in chunk?!: %d" % self.aternate_collision) exit(-1) if(self.normal_collision_solidity > 3): logging.error("Impossible normal collision value in chunk?!: %d" % self.normal_collision) exit(-1) # reaching back through all the references is really kinda icky, # should really make better encapsulation. self.primary_collision = None # if((self.tile_index >= len(self.chunk.chunk_array.primary_collision_index.ids)) or (self.tile_index >= len(self.chunk.chunk_array.secondary_collision_index.ids))): # logging.warning("Tile index greater than length of collision index asked for. available: %d/%d, index: %d" % (len(self.chunk.chunk_array.primary_collision_index.ids), len(self.chunk.chunk_array.primary_collision_index.ids), self.tile_index)) # else: primary_col_id = self.chunk.chunk_array.primary_collision_index.ids[self.tile_index] # TODO ick, this breaks encapsulation a bit too much self.primary_collision = self.chunk.chunk_array.sonic2.coll1.tiles[primary_col_id] if(self.chunk.chunk_array.secondary_collision_index is not None): secondary_col_id = self.chunk.chunk_array.secondary_collision_index.ids[self.tile_index] self.secondary_collision = self.chunk.chunk_array.sonic2.coll1.tiles[secondary_col_id] def toSVG(self, xml): if(self.primary_collision is not None): colour = "000000" if(self.x_flipped and self.y_flipped): colour = "ff00ff" elif(self.y_flipped): colour = "ff0000" elif(self.x_flipped): colour = "0000ff" # transform="scale(%d, %d)" % (-1 if self.x_flipped else 1, -1 if self.y_flipped else 1), with xml.g(transform="translate(%d, %d) scale(%d, %d)" % (16 if self.x_flipped else 0, 16 if self.y_flipped else 0, -1 if self.x_flipped else 1, -1 if self.y_flipped else 1), style="stroke:#%s" % colour): # with xml.rect(width="16", height="16", style="fill:none;stroke:#000000"): # pass self.primary_collision.toSVG(xml)
EBO.ai is a fast-growing chatbot company that gives companies the ability to use AI technology to get closer to their customers. EBO.ai was founded with a very simple idea: to build a company that could address the need of automating human communication at scale. Too much time is being spent in traditional customer service without a positive outcome. At EBO.ai we believe that technology should allow customers to connect with businesses at any time, with zero effort. Data collection in terms of Know Your Customer (KYC) – improving processes through analytical data not preciously available through conventional applications. The company was founded in 2017 by Gege Gatt, a technology entrepreneur, passionate about delivering value for companies through AI. Trained as a lawyer, he is CEO of EBO and also co-founder of four other start-ups including ICON, a web-application development company. He is the Chairman of Beacon Media Group which incorporates a digital news portal, a national radio station, as well as a Director of Yellow – one of the leading companies in the publishing industry. He is also a keen photographer and has had his photos published by the BBC and Saatchi & Saatchi. EBO.ai is backed by VC investment and run by an experienced leadership team that brings together decades of experience in technology, communication and business-process reengineering. We’re changing the way businesses innovate and manage their customer service channels. EBO.ai is much more than just another chatbot. We work with businesses to create something that is tailored to their individual requirements, with a ‘personality’ for the bot and a choice of vocabulary to match the task at hand. Being cloud based, the technology is easy to deploy and scale, and can be linked to other IT platforms, including CRM and e-commerce systems. It is also increasingly being used for internal communications and other purposes, such as on-boarding new employees. An off-the-shelf chatbot rarely has any of the personalisation and improvements needed to make a truly excellent impression on customers or patients. We have successfully built our technology stack and consultancy know-how and are targeting the healthcare and financial services sectors. Our geographic focus is currently the UK, but this is widening all the time. We are also further developing our AI abilities by utilising conversational data to create predictions which improve user-journeys and sustain business growth. As with all agile companies, we never stop refining and improving, and our future plans are a reflection of this. EBO.ai was spun out of ICON, a technology development agency. Whilst our team is truly international, spanning talent from Canada, the UK, Austria, Germany and many more countries, the company is headquartered in Malta and that's where we feel home.
# -*- coding: utf-8 -*- # # To Do # - 'Debug' mode: logs per serie folder, need to use scanner logging # - search word pick serie, do levenstein i partially match only (few chars difference) ### Imports ### # Python Modules # import re import os import datetime # HAMA Modules # import common # Functions: GetPlexLibraries, write_logs, UpdateMeta Variables: PlexRoot, FieldListMovies, FieldListSeries, FieldListEpisodes, DefaultPrefs, SourceList from common import Dict import AnimeLists # Functions: GetMetadata, GetAniDBTVDBMap, GetAniDBMovieSets Variables: AniDBMovieSets import tvdb4 # Functions: GetMetadata Variables: None import TheTVDBv2 # Functions: GetMetadata, Search Variables: None import AniDB # Functions: GetMetadata, Search, GetAniDBTitlesDB Variables: None import TheMovieDb # Functions: GetMetadata, Search Variables: None import FanartTV # Functions: GetMetadata Variables: None import Plex # Functions: GetMetadata Variables: None import TVTunes # Functions: GetMetadata Variables: None import OMDb # Functions: GetMetadata Variables: None import MyAnimeList # Functions: GetMetadata Variables: None import AniList # Functions: GetMetadata Variables: None import Local # Functions: GetMetadata Variables: None import anidb34 # Functions: AdjustMapping Variables: None ### Variables ### ### Pre-Defined ValidatePrefs function Values in "DefaultPrefs.json", accessible in Settings>Tab:Plex Media Server>Sidebar:Agents>Tab:Movies/TV Shows>Tab:HamaTV ####### def ValidatePrefs(): Log.Info("".ljust(157, '=')) Log.Info ("ValidatePrefs(), PlexRoot: "+Core.app_support_path) #Reset to default agent setting Prefs['reset_to_defaults'] #avoid logs message on first accesslike: 'Loaded preferences from DefaultPrefs.json' + 'Loaded the user preferences for com.plexapp.agents.lambda' filename_xml = os.path.join(common.PlexRoot, 'Plug-in Support', 'Preferences', 'com.plexapp.agents.hama.xml') filename_json = os.path.join(common.PlexRoot, 'Plug-ins', 'Hama.bundle', 'Contents', 'DefaultPrefs.json') Log.Info ("[?] agent settings json file: '{}'".format(os.path.relpath(filename_json, common.PlexRoot))) Log.Info ("[?] agent settings xml prefs: '{}'".format(os.path.relpath(filename_xml , common.PlexRoot))) if Prefs['reset_to_defaults'] and os.path.isfile(filename_xml): os.remove(filename_xml) #delete filename_xml file to reset settings to default PrefsFieldList = list(set(common.FieldListMovies + common.FieldListSeries + common.FieldListEpisodes + common.DefaultPrefs)) # set is un-ordered lsit so order is lost filename = os.path.join(Core.app_support_path, 'Plug-ins', 'Hama.bundle', 'Contents', 'DefaultPrefs.json') if os.path.isfile(filename): try: json = JSON.ObjectFromString(Core.storage.load(filename), encoding=None) ### Load 'DefaultPrefs.json' to have access to default settings ### except Exception as e: json = None; Log.Info("Error :"+str(e)+", filename: "+filename) if json: Log.Info ("Loaded: "+filename) Pref_list={} for entry in json: #Build Pref_list dict from json file Pref_list[entry['id']]=entry #if key in Prefs gives: KeyError: "No preference named '0' found." so building dict if entry['type']=='bool': if entry['type']==1: Pref_list[entry['id']]['value'] = 'true' else: Pref_list[entry['id']]['value'] = 'false' for entry in Pref_list: # Check fields not in PrefsFieldList and sources mispelled if entry not in PrefsFieldList: Log.Info("Next entry not in PrefsFieldList, so will not be updated by the engine") elif entry not in common.DefaultPrefs: # Check for mispelled metadata sources for source in Prefs[entry].replace('|', ',').split(','): if source.strip() not in common.SourceList+('None', ''): Log.Info(" - Source '{}' invalid".format(source.strip())) Log.Info("Prefs[{key:<{width}}] = {value:<{width2}}{default}".format(key=entry, width=max(map(len, PrefsFieldList)), value=Prefs[entry] if Prefs[entry]!='' else "Error, go in agent settings, set value and save", width2=max(map(len, [Pref_list[x]['default'] for x in Pref_list])), default=' (still default value)' if Prefs[entry] == Pref_list[entry]['default'] else " (Default: "+Pref_list[entry]['default']+")")) for entry in PrefsFieldList: if entry not in Pref_list: Log.Info("Prefs[{key:<{width}}] does not exist".format(key=entry, width=max(map(len, PrefsFieldList)))) #Plex Media Server\Plug-in Support\Preferences\com.plexapp.agents.hama.xml Log.Info("".ljust(157, '=')) return MessageContainer('Success', "DefaultPrefs.json valid") ### Pre-Defined Start function ############################################################################################################################################ def Start(): Log.Info("".ljust(157, '=')) Log.Info("HTTP Anidb Metadata Agent by ZeroQI (Forked from Atomicstrawberry's v0.4, AnimeLists XMLs by SdudLee) - CPU: {}, OS: {}".format(Platform.CPU, Platform.OS)) #HTTP.CacheTime = CACHE_1DAY # in sec: CACHE_1MINUTE, CACHE_1HOUR, CACHE_1DAY, CACHE_1WEEK, CACHE_1MONTH HTTP.CacheTime = CACHE_1MINUTE*30 ValidatePrefs() common.GetPlexLibraries() # Load core files AnimeLists.GetAniDBTVDBMap() AnimeLists.GetAniDBMovieSets() AniDB.GetAniDBTitlesDB() ### Movie/Serie search ################################################################################################################################################### def Search(results, media, lang, manual, movie): from common import Log #Import here for startup logging to go to the plex pms log orig_title = media.name if movie else media.show Log.Open(media=media, movie=movie, search=True) Log.Info('=== Search() ==='.ljust(157, '=')) Log.Info("title: '%s', name: '%s', filename: '%s', manual: '%s', year: '%s'" % (orig_title, media.name, media.filename, str(manual), media.year)) #if media.filename is not None: filename = String.Unquote(media.filename) #auto match only Log.Info("start: {}".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f"))) Log.Info("".ljust(157, '=')) if not orig_title: return #clear-cache directive if orig_title == "clear-cache": HTTP.ClearCache() results.Append(MetadataSearchResult(id='clear-cache', name='Plex web cache cleared', year=media.year, lang=lang, score=0)) return ### Check if a guid is specified "Show name [anidb-id]" ### Log.Info('--- force id ---'.ljust(157, '-')) if orig_title and orig_title.isdigit(): orig_title = "xxx [tvdb-{}]".format(orig_title) #Support tvdbid as title, allow to support Xattr from FileBot with tvdbid filled in match = re.search(r"(?P<show>.*?) ?\[(?P<source>(anidb(|[2-9])|tvdb(|[2-9])|tmdb|tsdb|imdb))-(?P<guid>[^\[\]]*)\]", orig_title, re.IGNORECASE) if match is not None: guid=match.group('source') + '-' + match.group('guid') if guid.startswith('anidb') and not movie and max(map(int, media.seasons.keys()))>1: Log.Info('[!] multiple seasons = tvdb numbering, BAKA!') results.Append(MetadataSearchResult(id=guid, name=match.group('show')+" ["+guid+']', year=media.year, lang=lang, score=100)) Log.Info("Forced ID - source: {}, id: {}, title: '{}'".format(match.group('source'), match.group('guid'), match.group('show'))) else: #if media.year is not None: orig_title = orig_title + " (" + str(media.year) + ")" ### Year - if present (manual search or from scanner but not mine), include in title ### Log.Info('--- source searches ---'.ljust(157, '-')) maxi, n = 0, 0 if movie or max(map(int, media.seasons.keys()))<=1: maxi, n = AniDB.Search(results, media, lang, manual, movie) if maxi<50 and movie: maxi = TheMovieDb.Search(results, media, lang, manual, movie) if maxi<80 and not movie or n>1: maxi = max(TheTVDBv2.Search(results, media, lang, manual, movie), maxi) Log.Info("".ljust(157, '=')) Log.Info("end: {}".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f"))) Log.Close() ### Update Movie/Serie from metadata.id assigned ######################################################################################################################### def Update(metadata, media, lang, force, movie): from common import Log #Import here for startup logging to go to the plex pms log Log.Open(media=media, movie=movie, search=False) source = metadata.id.split('-', 1)[0] error_log = { 'AniDB summaries missing' :[], 'AniDB posters missing' :[], 'anime-list AniDBid missing':[], 'anime-list studio logos' :[], 'TVDB posters missing' :[], 'TVDB season posters missing':[], 'anime-list TVDBid missing' :[], 'Plex themes missing' :[], 'Missing Episodes' :[], 'Missing Specials' :[], 'Missing Episode Summaries' :[], 'Missing Special Summaries':[]} Log.Info('=== Update() ==='.ljust(157, '=')) Log.Info("id: {}, title: {}, lang: {}, force: {}, movie: {}".format(metadata.id, metadata.title, lang, force, movie)) Log.Info("start: {}".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f"))) # Major meta source hard required orders (ignoring id info): # mappingList: AnimeLists->TheTVDBv2/tvdb4/AniDB->AdjustMapping # mappingList['season_map']: AnimeLists->TheTVDBv2->AdjustMapping # mappingList['relations_map']: AniDB->AdjustMapping # mappingList['absolute_map']: tvdb4->TheTVDBv2->AniDB dict_AnimeLists, AniDBid, TVDBid, TMDbid, IMDbid, mappingList = AnimeLists.GetMetadata(media, movie, error_log, metadata.id) dict_tvdb4 = tvdb4.GetMetadata(media, movie, source, TVDBid, mappingList) dict_TheTVDB, IMDbid = TheTVDBv2.GetMetadata(media, movie, error_log, lang, source, AniDBid, TVDBid, IMDbid, mappingList) dict_AniDB, ANNid, MALid = AniDB.GetMetadata(media, movie, error_log, source, AniDBid, TVDBid, AnimeLists.AniDBMovieSets, mappingList) dict_TheMovieDb, TSDbid, TMDbid, IMDbid = TheMovieDb.GetMetadata(media, movie, TVDBid, TMDbid, IMDbid) dict_FanartTV = FanartTV.GetMetadata( movie, TVDBid, TMDbid, IMDbid) dict_Plex = Plex.GetMetadata(metadata, error_log, TVDBid, Dict(dict_TheTVDB, 'title')) dict_TVTunes = TVTunes.GetMetadata(metadata, Dict(dict_TheTVDB, 'title'), Dict(mappingList, AniDBid, 'name')) #Sources[m:eval('dict_'+m)] dict_OMDb = OMDb.GetMetadata(movie, IMDbid) #TVDBid=='hentai' dict_MyAnimeList = MyAnimeList.GetMetadata(MALid, "movie" if movie else "tvshow", media) dict_AniList = AniList.GetMetadata(AniDBid, MALid) dict_Local = Local.GetMetadata(media, movie) if anidb34.AdjustMapping(source, mappingList, dict_AniDB, dict_TheTVDB, dict_FanartTV): dict_AniDB, ANNid, MALid = AniDB.GetMetadata(media, movie, error_log, source, AniDBid, TVDBid, AnimeLists.AniDBMovieSets, mappingList) Log.Info('=== Update() ==='.ljust(157, '=')) Log.Info("AniDBid: '{}', TVDBid: '{}', TMDbid: '{}', IMDbid: '{}', ANNid:'{}', MALid: '{}'".format(AniDBid, TVDBid, TMDbid, IMDbid, ANNid, MALid)) common.write_logs(media, movie, error_log, source, AniDBid, TVDBid) common.UpdateMeta(metadata, media, movie, {'AnimeLists': dict_AnimeLists, 'AniDB': dict_AniDB, 'TheTVDB': dict_TheTVDB, 'TheMovieDb': dict_TheMovieDb, 'FanartTV': dict_FanartTV, 'tvdb4': dict_tvdb4, 'Plex': dict_Plex, 'TVTunes': dict_TVTunes, 'OMDb': dict_OMDb, 'Local': dict_Local, 'AniList': dict_AniList, 'MyAnimeList': dict_MyAnimeList}, mappingList) Log.Info("end: {}".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f"))) Log.Close() ### Agent declaration ################################################################################################################################################## class HamaTVAgent(Agent.TV_Shows): # 'com.plexapp.agents.none', 'com.plexapp.agents.opensubtitles' name, primary_provider, fallback_agent, contributes_to, accepts_from = 'HamaTV', True, False, None, ['com.plexapp.agents.localmedia'] languages = [Locale.Language.English, 'fr', 'zh', 'sv', 'no', 'da', 'fi', 'nl', 'de', 'it', 'es', 'pl', 'hu', 'el', 'tr', 'ru', 'he', 'ja', 'pt', 'cs', 'ko', 'sl', 'hr'] def search (self, results, media, lang, manual): Search (results, media, lang, manual, False) def update (self, metadata, media, lang, force ): Update (metadata, media, lang, force, False) class HamaMovieAgent(Agent.Movies): name, primary_provider, fallback_agent, contributes_to, accepts_from = 'HamaMovies', True, False, None, ['com.plexapp.agents.localmedia'] languages = [Locale.Language.English, 'fr', 'zh', 'sv', 'no', 'da', 'fi', 'nl', 'de', 'it', 'es', 'pl', 'hu', 'el', 'tr', 'ru', 'he', 'ja', 'pt', 'cs', 'ko', 'sl', 'hr'] def search (self, results, media, lang, manual): Search (results, media, lang, manual, True) def update (self, metadata, media, lang, force ): Update (metadata, media, lang, force, True)
Know Swainsboro High Class of 1958 graduates that are NOT on this List? Help us Update the 1958 Class List by adding missing names. More 1958 alumni from Swainsboro HS have posted profiles on Classmates.com®. Click here to register for free at Classmates.com® and view other 1958 alumni. Missing some friends from Swainsboro that graduated with you in 1958? Check the list below that shows the Swainsboro class of '58.
# -*- coding: utf-8 -*- import re from .exceptions import ValidationError from . import utils from .constants import _ EMAIL_REGEX = re.compile( # dot-atom r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # quoted-string r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"' # domain (max length of an ICAAN TLD is 22 characters) r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,253}[A-Z0-9])?\.)+[A-Z]{2,22}$', re.IGNORECASE ) def clean_domain(value, field_name=None, error_class=ValidationError): new_value = "user@%s" % value if not EMAIL_REGEX.match(new_value): message = _(u"Invalid Domain: %s") % value raise error_class(message, field_name=field_name) def clean_email(value, field_name=None, error_class=ValidationError): if not EMAIL_REGEX.match(value): message = _(u"Invalid Email: %s") % value raise error_class(message, field_name=field_name) def clean_username(value, field_name=None, error_class=ValidationError): #TODO: pass def clean_email_or_username(value, field_name=None, error_class=ValidationError): valid_email = False valid_username = False try: clean_email(value, field_name, error_class) valid_email = True except: pass if not valid_email: try: clean_username(value, field_name, error_class) valid_username = True except: pass if not valid_email and not valid_username: message = _(u"Invalid Username: %s") % value raise error_class(message, field_name=field_name) def clean_email_or_domain(value, field_name=None, error_class=ValidationError): valid_email = False valid_domain = False try: clean_email(value, field_name, error_class) valid_email = True except: pass if not valid_email: try: clean_domain(value, field_name, error_class) valid_domain = True except: pass if not valid_email and not valid_domain: message = _(u"Invalid Email or Domain: %s") % value raise error_class(message, field_name=field_name) def clean_ip_address(value, field_name=None, error_class=ValidationError): valid = utils.check_ipv4(value) or utils.check_ipv6(value) if not valid: message = _(u"Invalid IP Address: %s") % value raise error_class(message, field_name=field_name) def clean_ip_address_or_network(value, field_name=None, error_class=ValidationError): valid = utils.check_ipv4(value) or utils.check_ipv6(value) or utils.check_is_network(value) if not valid: message = _(u"Invalid IP Address: %s") % value raise error_class(message, field_name=field_name) def clean_hostname(value, field_name=None, error_class=ValidationError): valid = True if value: vals = value.split(".") if value is None or len(value.strip()) == 0: valid = False elif len(value) > 255: valid = False elif len(vals) < 3: valid = False elif value.strip().lower() == "unknow": valid = False domain = ".".join(vals[(len(vals) - 2):(len(vals))]) if len(domain) > 63: valid = False if not valid: message = _(u"Invalid Hostname: %s") % value raise error_class(message, field_name=field_name)
Free sources for teachers, educational ideas and suggestions, instructional subjects, Personal Learning Networks, Undertaking Primarily based Studying, Google, Evernote, Discovery Education and more. Don’t feel to dangerous for Willie as he raised a lot of money this reporting period and sadly I think he’s simply getting began. Gary Chartrand, the anti trainer and pro privatization grocer, had quite just a few of his associates donate to him and in contrast to Willie’s wife they may afford more than a greenback. Here’s a trace though, in the event you see individuals just like the Clements, Halverson or Stein supporting any person working for college board you should assist their opponent. They only believe in gimmicks too. Our members are the nationwide insurance coverage associations in 35 international locations, representing undertakings that account for spherical 9516a612519e290134a26e1ea306914a0dfa744c4a298db52becf72747df4e665c of complete European premium income, straight make use of 985 000 folks and invest nearly €9 900bn within the economic system. Mortgage-related securities created by non-governmental issuers (comparable insurance to industrial banks, savings and mortgage institutions, private mortgage insurance coverage corporations, mortgage bankers and different secondary market issuers) could also be supported by assorted forms of insurance coverage or ensures, together with specific person mortgage, title, pool and hazard insurance coverage and letters of credit, which may be issued by governmental entities, private insurers or the mortgage poolers. Free assets for academics, educational concepts and suggestions, instructional subjects, Personal Studying Networks, Project Based Learning, Google, Evernote, Discovery Schooling and extra. I’ve friends and family who work in not-faculty settings. I have spent some summers working in such locations myself. For my high school students, a lot of whom are already holding down a job, that is a worthwhile connection. My lessons about speaking to others and talking to prospects are better classes because I can join them to, for instance, my time at a catalog call heart. Figuring out what it is like inside native employers as a result of I’ve mates or family there may be helpful for me, nevertheless it’s helpful for my college students, too. Once they get on the bus on the finish of the day, I don’t simply fold up in a closet until tomorrow. In selecting the Emerson Collective, Mr. Duncan joined one of his former top aides at USDOE, Ms. Russlyn Ali. Mr. Duncan labored together with Ms. Ali at USDOE on the $4.35 billion Race to the High (RTTT) , which provided stimulus money to states as an incentive to undertake the Common Core requirements and assessments, increase constitution faculties, and use test scores to evaluate lecturers – all ideas promoted by the company training reformers. Here is a video of Mr. Perriello sharing his ideas on RTTT in March 2012 as President and CEO of the Heart for American Progress Motion Fund. Key dates for colleges, together with moderation days, skilled learning days and public holidays. In EXTRA’s unique agreement with NA, the 7 HS seats were break up 4-three with EXTRA getting the extra seat – New Action proposed that since EXTRA had gotten extra votes in the 2013 election. EXTRA selected its four individuals and also an alternate. Within a few days of beginning the petition campaign, one of the NA folks pulled out and we needed to fill this position ASAP – and we had an issue with our alternate who a few of us believed was not going to stay in educating – and thus we would lose that seat if we gained. Each class is on a mission. Now, with Google Classroom, we have now a mission management. Designed with teachers and college students, it helps easily connect the class, track their progress, and achieve more collectively. (2) – You had been pleased along with your work on Take a look at One so that you begin to chill out and pay extra consideration to your other courses or your life outside of sophistication. The strain is off and you cut back on your study time. I’m not a big fan of relaxed college students. The A turns into a B and finally a C and you’ll be mystified as to how you lost the A. If you were pleased, that is not an excellent cause to slack off. Don’t do it.
import math import mathphys.constants as consts import mathphys.units as units import mathphys.constants as consts class Beam: def __init__(self, energy, current = 0): self.energy = energy self.current = current self.brho, self.velocity, self.beta, self.gamma = Beam.calc_brho(self.energy) @staticmethod def calc_brho(energy): electron_rest_energy_GeV = units.joule_2_eV * consts.electron_rest_energy / 1e9 gamma = energy/electron_rest_energy_GeV beta = math.sqrt(((gamma-1.0)/gamma)*((gamma+1.0)/gamma)) velocity = consts.light_speed * beta brho = beta * (energy * 1e9) / consts.light_speed return brho, velocity, beta, gamma def __str__(self): r = '' r += '{0:<10s} {1:f} GeV'.format('energy:', self.energy) r += '\n{0:<10s} {1:f}'.format('gamma:', self.gamma) r += '\n{0:<10s} 1 - {1:e}'.format('beta:', 1.0-self.beta) r += '\n{0:<10s} {1:.0f} - {2:f} m/s'.format('velocity:', consts.light_speed, consts.light_speed - self.velocity) r += '\n{0:<10s} {1:f} T.m'.format('brho:', self.brho) return r
Abdominal separation or diastasis rectus abdominus (DRA) is the widening of the gap of the linea alba (the connective tissue) between the right and left rectus abdominus. DRA is a very common condition that many women don’t even know they have. It is not often brought up in prenatal education and is often missed in many pre or postpartum exercises classes. Some research states that 100% of women in pregnancy have some degree of separation1 (think about it – something has to give!). Another body of research reported that 36% of women continue to have a separation at 5-7 weeks post-delivery2. Research also shows that left untreated, the gap at 8-weeks remains unchanged at 1-year postpartum3. It has also been shown that 66% of women with DRA also have some level of pelvic floor dysfunction4 (e.g. bladder or bowel control problems and/or pelvic organ prolapse). Improper training in attempt to correct the DRA can worsen the pelvic floor dysfunction, not to mention worsen the existing separation. I have seen men who have given themselves a diastasis with inappropriate abdominal training! Even if women or men know they have a DRA, advice on how to resolve the separation varies dramatically. Promises of closing the gap and resolving the diastasis are often given. Unfortunately, these promises rarely deliver – there is often a degree of irreversible connective tissue stretch that does not completely recover. Abdominoplasty, or a Tummy Tuck, is a surgical procedure that closes the connective tissue component of DRA completely. This procedure is major surgery, and is not covered by basic health care. Does that mean that you cannot reduce the gap between the recti with exercise? No, often you can with proper training. You can also improve the tensile strength of this connective tissue, and the tone and strength of the deep abdominals (transversus abdominus). This improves the support and appearance of your abdominal wall as well as provide stability for your back and pelvis. There is often an emotional component to the DRA for the postpartum mother. With more slack to the abdominal wall, the abdominal organs are not held in as well and protrude outwards when sitting or standing. This tends to get worse as the day progresses and the deep abdominals fatigue. A woman with DRA is often asked if she is pregnant, and this can be understandably upsetting to her. Expectations of our society to rebound right back to the pre-pregnancy state are everywhere – in the media as well as our own pre-conceived notions. Marketed programs for DRA promising to “close the gap” do not help the pressure either. Proper training of the abdominals is necessary. There are many products and exercise regimens aimed at treating DRA on the market. But here is the thing: everyone’s body is different. The degree of the separation; the awareness and control of the deep abdominal muscles; the strength and endurance of the abdominal muscles; the existence of co-existing pelvic floor dysfunction – all of these is different from person to person. It is unrealistic to paint everyone with a DRA with the same brush. Treatment needs to be individualized.
# Copyright 2021, The TensorFlow Federated Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Library for creating digit recognition tasks on EMNIST.""" import enum from typing import Optional, Union import tensorflow as tf from tensorflow_federated.python.learning import keras_utils from tensorflow_federated.python.learning import model from tensorflow_federated.python.simulation.baselines import baseline_task from tensorflow_federated.python.simulation.baselines import client_spec from tensorflow_federated.python.simulation.baselines import task_data from tensorflow_federated.python.simulation.baselines.emnist import emnist_models from tensorflow_federated.python.simulation.baselines.emnist import emnist_preprocessing from tensorflow_federated.python.simulation.datasets import client_data from tensorflow_federated.python.simulation.datasets import emnist class DigitRecognitionModel(enum.Enum): """Enum for EMNIST digit recognition models.""" CNN_DROPOUT = 'cnn_dropout' CNN = 'cnn' TWO_LAYER_DNN = '2nn' _DIGIT_RECOGNITION_MODELS = [e.value for e in DigitRecognitionModel] def _get_digit_recognition_model(model_id: Union[str, DigitRecognitionModel], only_digits: bool) -> tf.keras.Model: """Constructs a `tf.keras.Model` for digit recognition.""" try: model_enum = DigitRecognitionModel(model_id) except ValueError: raise ValueError('The model argument must be one of {}, found {}'.format( model, _DIGIT_RECOGNITION_MODELS)) if model_enum == DigitRecognitionModel.CNN_DROPOUT: keras_model = emnist_models.create_conv_dropout_model( only_digits=only_digits) elif model_enum == DigitRecognitionModel.CNN: keras_model = emnist_models.create_original_fedavg_cnn_model( only_digits=only_digits) elif model_enum == DigitRecognitionModel.TWO_LAYER_DNN: keras_model = emnist_models.create_two_hidden_layer_model( only_digits=only_digits) else: raise ValueError('The model id must be one of {}, found {}'.format( model_enum, _DIGIT_RECOGNITION_MODELS)) return keras_model def create_digit_recognition_task_from_datasets( train_client_spec: client_spec.ClientSpec, eval_client_spec: Optional[client_spec.ClientSpec], model_id: Union[str, DigitRecognitionModel], only_digits: bool, train_data: client_data.ClientData, test_data: client_data.ClientData) -> baseline_task.BaselineTask: """Creates a baseline task for digit recognition on EMNIST. Args: train_client_spec: A `tff.simulation.baselines.ClientSpec` specifying how to preprocess train client data. eval_client_spec: An optional `tff.simulation.baselines.ClientSpec` specifying how to preprocess evaluation client data. If set to `None`, the evaluation datasets will use a batch size of 64 with no extra preprocessing. model_id: A string identifier for a digit recognition model. Must be one of 'cnn_dropout', 'cnn', or '2nn'. These correspond respectively to a CNN model with dropout, a CNN model with no dropout, and a densely connected network with two hidden layers of width 200. only_digits: A boolean indicating whether to use the full EMNIST-62 dataset containing 62 alphanumeric classes (`True`) or the smaller EMNIST-10 dataset with only 10 numeric classes (`False`). train_data: A `tff.simulation.datasets.ClientData` used for training. test_data: A `tff.simulation.datasets.ClientData` used for testing. Returns: A `tff.simulation.baselines.BaselineTask`. """ emnist_task = 'digit_recognition' if eval_client_spec is None: eval_client_spec = client_spec.ClientSpec( num_epochs=1, batch_size=64, shuffle_buffer_size=1) train_preprocess_fn = emnist_preprocessing.create_preprocess_fn( train_client_spec, emnist_task=emnist_task) eval_preprocess_fn = emnist_preprocessing.create_preprocess_fn( eval_client_spec, emnist_task=emnist_task) task_datasets = task_data.BaselineTaskDatasets( train_data=train_data, test_data=test_data, validation_data=None, train_preprocess_fn=train_preprocess_fn, eval_preprocess_fn=eval_preprocess_fn) def model_fn() -> model.Model: return keras_utils.from_keras_model( keras_model=_get_digit_recognition_model(model_id, only_digits), loss=tf.keras.losses.SparseCategoricalCrossentropy(), input_spec=task_datasets.element_type_structure, metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]) return baseline_task.BaselineTask(task_datasets, model_fn) def create_digit_recognition_task( train_client_spec: client_spec.ClientSpec, eval_client_spec: Optional[client_spec.ClientSpec] = None, model_id: Union[str, DigitRecognitionModel] = 'cnn_dropout', only_digits: bool = False, cache_dir: Optional[str] = None, use_synthetic_data: bool = False) -> baseline_task.BaselineTask: """Creates a baseline task for digit recognition on EMNIST. The goal of the task is to minimize the sparse categorical crossentropy between the output labels of the model and the true label of the image. When `only_digits = True`, there are 10 possible labels (the digits 0-9), while when `only_digits = False`, there are 62 possible labels (both numbers and letters). This classification can be done using a number of different models, specified using the `model_id` argument. Below we give a list of the different models that can be used: * `model_id = cnn_dropout`: A moderately sized convolutional network. Uses two convolutional layers, a max pooling layer, and dropout, followed by two dense layers. * `model_id = cnn`: A moderately sized convolutional network, without any dropout layers. Matches the architecture of the convolutional network used by (McMahan et al., 2017) for the purposes of testing the FedAvg algorithm. * `model_id = 2nn`: A densely connected network with 2 hidden layers, each with 200 hidden units and ReLU activations. Args: train_client_spec: A `tff.simulation.baselines.ClientSpec` specifying how to preprocess train client data. eval_client_spec: An optional `tff.simulation.baselines.ClientSpec` specifying how to preprocess evaluation client data. If set to `None`, the evaluation datasets will use a batch size of 64 with no extra preprocessing. model_id: A string identifier for a digit recognition model. Must be one of 'cnn_dropout', 'cnn', or '2nn'. These correspond respectively to a CNN model with dropout, a CNN model with no dropout, and a densely connected network with two hidden layers of width 200. only_digits: A boolean indicating whether to use the full EMNIST-62 dataset containing 62 alphanumeric classes (`True`) or the smaller EMNIST-10 dataset with only 10 numeric classes (`False`). cache_dir: An optional directory to cache the downloadeded datasets. If `None`, they will be cached to `~/.tff/`. use_synthetic_data: A boolean indicating whether to use synthetic EMNIST data. This option should only be used for testing purposes, in order to avoid downloading the entire EMNIST dataset. Returns: A `tff.simulation.baselines.BaselineTask`. """ if use_synthetic_data: synthetic_data = emnist.get_synthetic() emnist_train = synthetic_data emnist_test = synthetic_data else: emnist_train, emnist_test = emnist.load_data( only_digits=only_digits, cache_dir=cache_dir) return create_digit_recognition_task_from_datasets(train_client_spec, eval_client_spec, model_id, only_digits, emnist_train, emnist_test)
Peculiar Branch Chapter 3A | Sci-Ence! Justice Leak! I don’t know if you’ve ever been put in charge of the security for a peace conference between warring magical worlds from different dimensions, where the fate of the multiverse could hang in the balance, but it’s really, really boring. For a start, you wouldn’t believe how many presentations you have to sit through. There are some things that have become ubiquitous throughout the multiverse, and Powerpoint is one of them. I’m reliably informed that Bill Gates hired a level three magic user to embed a charm in the software code, so that anyone who had more than three subordinates in their job would automatically find themselves using the thing. Well, I say reliably informed, Tony The Liar told me, but I still like to believe it. So I had to sit around a table in a conference centre, drinking foul coffee out of tiny china cups with a lad called Terry from Birmingham who’d been assigned to look after the Queen of the Fae, and a nice-looking sort from Leeds called Sandra, whose first words to me had been “I have a black-belt in jiu-jitsu” and who sat as far away from me as possible (the Wallace charm strikes again) and was bodyguarding the Longagovian ambassador. There were also people from the security services of each of the other worlds there to shadow us — a fat-looking gobboe from Fairyland, one of the few they have left there, an Elvish woman named Dralucia from the Misty Worlds, and there was a chair which looked empty but which everyone swore contained a magic user from Faraway And Longago who had transcended the need for corporeal form. Personally, I thought the crafty sod had just used that as an excuse not to turn up, as a variety — no, I take that back — as a succession of middling nobodies came up in front of us to show us pie charts and tell us about the fire regulations and show us little embedded videos about the planopolitical situation that told us nothing we didn’t already know. I swear two of them had got each other’s Powerpoint presentation by mistake and not noticed. Luckily, one of the other things that is constant across every universe is sloping off for a crafty fag, so I waited for the gobboe to go on a break, and then I joined him outside in the drizzle. “Be my guest.” he replied, pulling one out of the packet. He looked at me very strangely for a moment, and then lit my cigarette with the end of his. I thought about this for a while. I thought about how we’d been sitting in an out-of-town conference centre of the type that managed to be just inside the ringroad while simultaneously being completely bloody inaccessible, in a small room with windows that didn’t open that was beginning to stink of spilled coffee and stale farts, listening to tedious little wanksplats explain the finer details of the Dangerous Substances and Explosive Atmospheres Regulations (2002) to us. And then I thought about his description of this as “a pleasant change”. And with that, Skjorvorvorvik pinched out his cigarette, stuck it behind his ear, and headed inside, just as the drizzle turned into a downpour. I threw the rest of mine into a puddle and followed him. This entry was posted in books and tagged fiction, peculiar branch. Bookmark the permalink. Is Peculiar Branch a national unit under the Home Office or ACPO? Otherwise Terry and Sandra would be working for West Midlands Anomalous Occurences Department and West Yorkshire Anomalous Occurences Department respectively and you could add a whole new layer of soulcrushing procedural tedium for Sgt. Wallace. (Assuming the book goes the way I’m planning, anyway).
#!/bin/usr/python import sys, getopt from trace_gen import TraceGen def main(): tracer = TraceGen(39, 28, 32) filepath = sys.argv[1] filename = filepath + "test_load.tr" file = open(filename, "w") file.write(tracer.print_header()) file.write(tracer.print_comment("Load from address - 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60")) for i in range(0, 64, 4): temp_vaddr = (1 << 31) | i temp_ptag = (1<<19) file.write(tracer.send_load(temp_vaddr, temp_ptag, False)) for i in range(0, 64, 4): file.write(tracer.recv_data(i)) file.write(tracer.test_done()) file.close() filename = filepath + "test_uncached_load.tr" file = open(filename, "w") file.write(tracer.print_header()) file.write(tracer.print_comment("Uncached Load from address 36")) temp_vaddr = (1 << 31) | 36 temp_ptag = (1 << 19) file.write(tracer.send_load(temp_vaddr, temp_ptag, True)) file.write(tracer.recv_data(36)) file.write(tracer.test_done()) file.close() if __name__ == "__main__": main()
Bancrofts Heating & Plumbing are your local central heating and plumbing company. We specialise in installing full heating systems and replacement boilers. The Bancroft family business has been serving Torquay, Paignton, Brixham and the South Hams areas for over 25 years and offer a professional, reliable service which is guaranteed and insurance backed. We can attend to all your general plumbing problems throughout Torbay. Whether you're looking for a full system installation, a boiler replacement, a new radiator or an annual service we can provide assistance throughout torbay and the south Hams. Bancrofts Heating & Plumbing are your Worcester Bosch Accredited installer in Torquay, Paignton, Brixham, Kingswear and Dartmouth. Updating your bathroom in Torbay? Call Bancrofts Heating & Plumbing for a competitive quote to install your bathroom? We can help you plan and source your ideal bathroom. In 2017 the entire range of Worcester Greenstar gas boilers have been awarded Which? Best Buy. Bancrofts are a Worcester Accredited company which means that we have the advantage of being able to offer some very reassuring guarantees on qualifying Worcester Greenstar boilers. Bancrofts Heating & Plumbing are your local central heating company specialising in installing full heating systems and replacement boilers. The Bancroft family business has been serving the Torbay and South Hams areas for over 25 years and offer a professional, reliable service which is guaranteed and insurance backed.
"""Ce module contient les fonctions, classes et exceptions qui peuvent être utilisées dans un programme Python pour le Monde de Reeborg. """ # When generating documentation using sphinx, these modules are both # unavailable and not needed try: from browser import window RUR = window.RUR except ImportError: from collections import defaultdict window = defaultdict(str) print("\n --> Skipping importing from browser for sphinx.\n") # All functions from Javascript used below should have names of the form # RUR._xyz_ and be defined in commands.js; functions and methods should appear # in the same order as they appear in the English version. def au_but(): #py:at_goal """Indique si Reeborg a atteint la position demandée. Returns: True si Reeborg a atteint son but, False autrement. """ return RUR._at_goal_() def observer(expr): #py:add_watch """Ajoute une expression Python valide (donnée comme une chaîne) à la liste des variables à observer. """ RUR.add_watch(expr) def construit_un_mur(): #py:build_wall """Indique à Reeborg de construire un mur devant sa position.""" RUR._build_wall_() def transporte(obj=None): #py:carries_object """ Indique si Reeborg transporte un ou des objets. Args: obj: paramètre optionnel qui est le nom d'un objet sous forme de chaîne de caractères. Returns: une liste d'objets retrouvés. Si Reeborg ne transporte aucun objet, ou si un objet spécifié comme paramètre n'est pas présent, le résultat est une liste vide. Exemples possibles: >>> transporte() ["jeton", "pomme"] >>> transporte("jeton") ["jeton"] >>> transporte("fraise") [] """ if obj is not None: ans = RUR._carries_object_(obj) else: ans = RUR._carries_object_() return list(ans) def efface_print(): #py:clear_print """Efface le texte précédemment écrit avec des fonctions print().""" RUR._clear_print_() def robot_par_defaut(): #py:default_robot """Retourne un robot recréé pour correspondre au robot par défaut.""" class Robot(RobotUsage): def __init__(self): self.body = RUR._default_robot_body_() return Robot() def dir_js(obj): #py:dir_js """Liste les attributs et méthodes d'un objet Javascript.""" RUR._dir_js_(obj) def dir_py(obj): #py:dir_py """Lists attributes and methods of a Python object, excluding those whose name start with a double underscore and are considered to be private. """ attrs = [] for attr in dir(obj): if attr.startswith("__"): continue if callable(getattr(obj, attr)): attr += "()" attrs.append(attr) print_html(str("\n".join(attrs)).replace("&", "&amp").replace("<", "&lt;" ).replace(">", "&gt;").replace("\n", "<br>")) def termine(): #py:done """Termine l'exécution d'un programme.""" RUR._done_() def rien_devant(): #py:front_is_clear """Indique si un obstacle (mur, clôture, eau, mur de brique, etc.) bloque le chemin. Returns: True si le chemin est non bloqué, False autrement.""" return RUR._front_is_clear_() def dans_le_sac(): #py:in_the_bag return dict(RUR._in_the_bag_()) def est_face_au_nord(): #py:is_facing_north """Indique si Reeborg fait face au nord (haut de l'écran) ou non.""" return RUR._is_facing_north_() def avance(): #py:move """Avance d'une case""" RUR._move_() def pas_de_surlignement(): #py:no_highlight """Empêche le surlignement de lignes de code d'être effectué. Pour véritablement éliminer tout effet lié au surlignement de lignes de code, il peut être nécessaire d'exécuter un programme à deux reprises.""" RUR._no_highlight_() def objet_ici(obj=None): #py:object_here """ Indique si un ou des types d'objets se trouvent à la position du robot. Args: obj: paramètre optionnel qui est le nom d'un objet sous forme de chaîne de caractères. Returns: une liste d'objets retrouvés. Si aucun objet n'est présent ou si un objet spécifié comme paramètre n'est pas présent, le résultat est une liste vide. Exemples possibles: >>> objet_ici() ["jeton", "pomme"] >>> objet_ici("jeton") ["jeton"] >>> objet_ici("fraise") [] """ if obj is not None: ans = RUR._object_here_(obj) else: ans = RUR._object_here_() return list(ans) # convert from js list-like object to proper Python list def pause(ms=None): #py:pause """Pause l'éxecution du programme à l'écran. Si un argument (temps, en millisecondes) est fourni, l'exécution redémarre automatiquement après que ce temps ait été écoulé. """ if ms is None: RUR._pause_() else: RUR._pause_(ms) def depose(obj=None): #py:put """Dépose un objet. Si Reeborg transporte plus d'un type d'objet, on doit spécifier lequel sinon ceci causera une exception.""" if obj is None: RUR._put_() else: RUR._put_(obj) def enregistrement(bool): #py:recording """Arrête ou redémarre les enregistrement d'actions de Reeborg. Args: bool: True si on veut avoir des enregistrement, False autrement """ RUR._recording_(bool) def plus_de_robots(): #py:remove_robots """Élimine tous les robots existants""" RUR._remove_robots_() def rien_a_droite(): #py:right_is_clear """Indique si un obstacle (mur, clôture, eau, mur de brique, etc.) se trouve à la droite immédiate de Reeborg. Returns: True si un obstacle est à la droite, False autrement.""" return RUR._right_is_clear_() def couleur_de_trace(couleur): #py:set_trace_color """Change la couleur de trace du robot. Args: couleur: quatre formats sont possibles soit les noms de couleur du web (en anglais), les formats rgb et rgba, et la notation hexadécimale. Exemples possibles:: >>> couleur_de_trace("red") # nom de couleur en anglais >>> couleur_de_trace("rgb(125, 0, 0)") >>> couleur_de_trace("rgba(125, 0, 0, 0.5)") >>> couleur_de_trace("#FF00FF") """ RUR._set_trace_color_(couleur) def style_de_trace(style="normal"): #py:set_trace_style """Change le style de trace du robot. Args: style: "épais" ou "epais" (sans accent) pour une trace plus visible, "invisible" pour une trace invisible(!), "normal" ou ne pas spécifier d'argument pour avoir le style normal. Le choix "invisible" est équivalent à couleur_de_trace("rgba(0, 0, 0, 0)") c'est-à-dire une couleur complètement transparente. La trace plus épaisse est centrée et ne permet pas de voir qu'un virage à droite est constitué de trois virages à gauche, ni de distinguer les aller-retours. """ if style in ["épais", "epais"]: style = "thick" elif style == "normal": style = "default" elif style == "invisible": pass # leave as is else: raise ReeborgError("Valeur de style inconnue pour style_de_trace().") RUR._set_trace_style_(style) def son(bool): #py:sound """Active ou désactive les effets sonores.""" RUR._sound_(bool) def prend(obj=None): #py:take """Prend un objet. Si plus d'un type d'objet se trouve à l'endroit où Reeborg est, on doit spécifier lequel sinon ceci causera une exception. """ if obj is None: RUR._take_() else: RUR._take_(obj) def pense(ms): #py:think """Fixe un délai entre les actions de Reeborg à l'écran.""" RUR._think_(ms) def tourne_a_gauche(): #py:turn_left """Reeborg tourne à sa gauche.""" RUR._turn_left_() def voir_source_js(fn): #py:view_source_js """Affiche le code source d'une fonction Javascript.""" RUR._view_source_js_(fn) def mur_devant(): #py:wall_in_front """Indique si un mur bloque le chemin. Returns: True si un mur est devant, False autrement.""" return RUR._wall_in_front_() def mur_a_droite(): #py:wall_on_right """Indique si un mur se trouve immédiatement à la droite de Reeborg. Returns: True si un mur est à la droite, False autrement.""" return RUR._wall_on_right_() def Monde(url, nom=None): #py:World """Permet de sélectioner un monde donné à l'intérieur d'un programme. Si le monde présentement utilisé est différent, le résultat de l'exécution de cette instruction fera en sorte que le monde spécifié par le paramètre `url` sera choisi sans que le reste du programme ne soit déjà exécuté. Si le monde spécifié est déjà le monde choisi, la fonction `Monde(...)` est ignorée et le reste du programme est exécuté. Le monde spécifié sera ajouté au sélecteur s'il n'est pas déjà présent. Args: url: deux choix possibles, soit un nom apparaissant dans le sélecteur de monde, ou un lien à un document accessible via Internet. nom: paramètre optionnel; si ce paramètre est choisi, le nom apparaissant dans le sélecteur sera nom. Exemples: >>> Monde("But 1") # monde inclus par défaut >>> Monde("http://reeborg.ca/mon_monde") # exemple fictif # le nom http://reeborg.ca/mon_monde sera ajouté au sélecteur >>> Monde("http://reeborg.ca/mon_monde", "Bonjour") # le nom Bonjour sera ajouté au sélecteur pour indiquer ce monde. """ if nom is None: RUR.file_io.load_world_from_program(url) else: RUR.file_io.load_world_from_program(url, nom) def max_nb_instructions(nb): #py:set_max_nb_instructions """Surtout destiné aux créateurs de mondes, ceci permet de changer le nombre maximal d'instructions exécutées par un robot. """ RUR._set_max_steps_(nb) def max_nb_robots(nb): #py:set_max_nb_robots """Surtout destiné aux créateurs de mondes, ceci permet de limiter le nombre de robots permis dans un monde donné. """ RUR._set_max_nb_robots_(nb) def print_html(html, append=False): #py:print_html """Surtout destiné aux créateurs de monde, la fonction print_html() est semblable à print() sauf qu'elle accepte du texte html. """ RUR.output.print_html(html, append) window['print_html'] = print_html def nouvelles_images_de_robot(images): #py:new_robot_images """Surtout destiné aux créateurs de mondes, ceci permet de remplacer les images utilisées pour le robot par d'autres images. Une explication plus détaillée viendra. """ if "est" in images: images["east"] = images["est"] if "ouest" in images: images["west"] = images["ouest"] if "nord" in images: images["north"] = images["nord"] if "sud" in images: images["south"] = images["sud"] RUR._new_robot_images_(images) def MenuPersonalise(contenu): #py:MakeCustomMenu """À l'intention des éducateurs. Permet de créer des menus de monde personalisés. Voir la documentation pour plus de détails.""" RUR.custom_menu.make(contenu) class RobotUsage(object): #py:UR def __init__(self, x=1, y=1, orientation='est', jeton=None): #py:UR.__init__ """Créé un robot usagé. Args: x: coordonnée horizontale; un entier supérieur ou égal à 1 y: coordonnée vertical; un entier supérieur ou égal à 1 orientation: une des valeurs suivante: "nord", "sud", est", "ouest" jeton: nombre initial de jetons à donner au robot; un entier positif, ou la chaîne "inf" pour un nombre infini. """ if jeton is None: robot = RUR.robot.create_robot(x, y, orientation) else: robot = RUR.robot.create_robot(x, y, orientation, jeton) self.body = robot RUR.world.add_robot(self.body) def __str__(self): #py:UR.__str__ location = "({}, {})".format(self.body.x, self.body.y) if self.body._orientation == RUR.EAST: facing = "est face à l'est" elif self.body._orientation == RUR.WEST: facing = "est face à l'ouest" elif self.body._orientation == RUR.NORTH: facing = "est face au nord" elif self.body._orientation == RUR.SOUTH: facing = "est face au sud" if 'token' in self.body.objects: if self.body.objects['token'] == 'inf': carries = "transporte un nombre infini de jetons." else: carries = 'transporte %s jetons' % self.body.objects['token'] else: carries = 'ne transporte pas de jetons' return "RobotUsage situé en {} {} {}.".format(location, facing, carries) # NOQA def avance(self): #py:UR.move """avance d'une case""" RUR.control.move(self.body) def au_but(self): #py:UR.at_goal """Indique si Reeborg a atteint la position demandée. Returns: True si Reeborg a atteint son but. """ return RUR.control.at_goal(self.body) def construit_un_mur(self): #py:UR.build_wall """Indique à Reeborg de construire un mur devant sa position.""" RUR.control.build_wall(self.body) def rien_devant(self): #py:UR.front_is_clear """Indique si un obstacle (mur, clôture, eau, mur de brique, ) bloque le chemin. Returns: True si le chemin est non bloqué, False autrement.""" return RUR.control.front_is_clear(self.body) def mur_devant(self): #py:UR.wall_in_front """Indique si un mur bloque le chemin. Returns: True si un mur est devant, False autrement.""" return RUR.control.wall_in_front(self.body) def rien_a_droite(self): #py:UR.right_is_clear """Indique si un obstacle (mur, clôture, eau, mur de brique, etc.) se trouve à la droite immédiate de Reeborg. Returns: True si un obstacle est à la droite, False autrement.""" return RUR.control.right_is_clear(self.body) def mur_a_droite(self): #py:UR.wall_on_right """Indique si un mur se trouve immédiatement à la droite de Reeborg. Returns: True si un mur est à la droite, False autrement.""" return RUR.control.wall_on_right(self.body) def dans_le_sac(self): #py:UR.in_the_bag return dict(RUR._in_the_bag_(self.body)) def est_face_au_nord(self): #py:UR.is_facing_north """Indique si Reeborg fait face au nord (haut de l'écran) ou non.""" return RUR.control.is_facing_north(self.body) def depose(self, obj=None): #py:UR.put """Dépose un objet. Si Reeborg transporte plus d'un type d'objet, on doit spécifier lequel sinon ceci causera une exception.""" if obj is None: RUR.control.put(self.body) else: RUR.control.put(self.body, obj) def prend(self, obj=None): #py:UR.take """Prend un objet. Si plus d'un type d'objet se trouve à l'endroit où Reeborg est, on doit spécifier lequel sinon ceci causera une exception. """ if obj is None: RUR.control.take(self.body) else: RUR.control.take(self.body, obj) def objet_ici(self, obj=None): #py:UR.object_here """ Indique si un ou des types d'objets se trouvent à la position du robot. Args: obj: paramètre optionnel qui est le nom d'un objet sous forme de chaîne de caractères. Returns: une liste d'objets retrouvés. Si aucun objet n'est présent ou si un objet spécifié comme paramètre n'est pas présent, le résultat est une liste vide. Exemples possibles: >>> reeborg = RobotUsage() >>> reeborg.objet_ici() ["jeton", "pomme"] >>> reeborg.objet_ici("jeton") ["jeton"] >>> reeborg.objet_ici("fraise") [] """ if obj is not None: return list(RUR.control.object_here(self.body, obj)) else: return list(RUR.control.object_here(self.body)) def transporte(self, obj=None): #py:UR.carries_object """ Indique si Reeborg transporte un ou des objets. Args: obj: paramètre optionnel qui est le nom d'un objet sous forme de chaîne de caractères. Returns: une liste d'objets retrouvés. Si Reeborg ne transporte aucun objet, ou si un objet spécifié comme paramètre n'est pas présent, le résultat est une liste vide. Exemples possibles: >>> reeborg = RobotUsage() >>> reeborg.transporte() ["jeton", "pomme"] >>> reeborg.transporte("jeton") ["jeton"] >>> reeborg.transporte("fraise") [] """ if obj is not None: return list(RUR.control.carries_object(self.body, obj)) else: return list(RUR.control.carries_object(self.body)) def tourne_a_gauche(self): #py:UR.turn_left RUR.control.turn_left(self.body) def modele(self, modele): #py:UR.set_model """Permet de choisir le modèle du robot. Args: modele: un nombre de 0 à 3. """ RUR.control.set_model(self.body, modele) def couleur_de_trace(self, couleur): #py:UR.set_trace_color """Change la couleur de trace du robot. Args: couleur: quatre formats sont possibles soit les noms de couleur du web (en anglais), les formats rgb et rgba, et la notation hexadécimale. Exemples possibles:: >>> reeborg = RobotUsage() >>> reeborg.couleur_de_trace("red") # nom anglais de couleur >>> reeborg.couleur_de_trace("rgb(125, 0, 0)") >>> reeborg.couleur_de_trace("rgba(125, 0, 0, 0.5)") >>> reeborg.couleur_de_trace("#FF00FF") """ RUR.control.set_trace_color(self.body, couleur) def style_de_trace(self, style): #py:UR.set_trace_style """Change le style de trace du robot. Args: style: "épais" ou "epais" (sans accent) pour une trace plus visible, "invisible" pour une trace invisible(!), "normal" ou ne pas spécifier d'argument pour avoir le style normal. La trace plus épaisse est centrée et ne permet pas de voir qu'un virage à droite est constitué de trois virages à gauche, ni de distinguer les aller-retours. """ if style in ["épais", "epais"]: style = "thick" elif style == "invisible": pass elif style == "normal": style = "default" else: raise ReeborgError("Valeur de style inconnue pour style_de_trace().") # NOQA RUR.control.set_trace_style(self.body, style) class ReeborgError(Exception): #py:RE """Exceptions spécifique au monde de Reeborg. Exemples possible:: def termine(): #py: message = "Vous ne devez pas utiliser termine()." raise ReeborgError(message) #---- ou ------ try: avance() except ReeborgError: # ignore le mur qui bloquait le chemin tourne_a_gauche() """ def __init__(self, message): #py:RE.__init__ self.reeborg_shouts = message def __str__(self): #py:RE.__str__ return repr(self.reeborg_shouts) try: window['ReeborgError'] = ReeborgError except: pass class WallCollisionError(ReeborgError): #py:WCE """Exception spécifique au monde de Reeborg. A lieu lorsque Reeborg frappe un mur """ pass try: window['WallCollisionError'] = WallCollisionError except: pass class InfoSatellite(): #py:SI @property def carte_du_monde(self): #py:SI.world_map """retourne un dict qui contient l'information au sujet du monde. """ import json return json.loads(RUR.control.get_world_map()) def imprime_carte(self): #py:SI.print_world_map """imprime une copie formattée de la carte""" print(RUR.control.get_world_map()) try: RUR.reeborg_loaded = True window.console.log("reeborg loaded") except: pass #py:obsolete # Do not tranlate the following def nombre_d_instructions(nb): raise ReeborgError( "nombre_d_instructions() a été remplacé par max_nb_instructions().") def face_au_nord(): # obsolete raise ReeborgError("face_au_nord() est désuet;" + " utilisez est_face_au_nord()")
From our Registration page, each student requests which instructor they wish to take their Master Classes with. This will be your main teacher and mentor throughout the Okanagan Blues Camp. Classes will be at either the Sandman Hotel, or at the Penticton Community Centre. Check the Schedule for which classroom your class is in. Each student will have a chance to have a “Class Practice” with their instructor on Tues, Oct 22; in the afternoon from 2:30 – 4:00 PM. Students will get a chance to perform with other students (1 song only) showing what they have learned right up on the Dream Café stage on Wed afternoon and into the early evening. We have built in additional “Practice Time” in the schedule to allow students to form 3 – 5 person “combos” to perform what they have learned on stage. Practice times are: Mon 4:00 – 5:30 PM; and Tues 4:00 – 5:30 PM. This will give students a chance to work up the tune they want to perform. The Dream Café will stay open on Wed. afternoon and into the early evening to accommodate the student combos to get on stage and have fun performing. Additional food and beverage will be available for purchase from the Dream Café. For the Master Classes, students are requested to show up on time, but not more that 15 minutes before the scheduled start time. If your class requires instrument set up time (drums, amps, etc.) keep that in mind and get it done in that 15-minute window. On the Registration page, you’ll find a form to download to select you first choice of Master instructor, and your second choice. Paul Pigat, (Cousin Harley) may be added to the instructor roster if sufficient students sign up. We’ll know if this is possible by about June 2019. Roger is a well known and sought after bass man from Vancouver. He tours extensively with Vancouver acts and bands, and recently has been a steady face in the Steve Kozak band touring BC and Alberta. More bio notes coming soon. · Connecting the Dots: other instruments filling in holes, working around them, and working with other instruments. · On Tues afternoon students will have a chance to play bass with Roger, and students from Chris Norquist’s drum class. Roger and Chris will be on hand to set up small groups and help work up your song for performance on the Dream Café stage (for those wanting to do this). · Students will be offered time to stay in their class rooms to practice what you have learned, from 4:00 PM – 5:30 PM, on Monday and on Tues afternoon, prior to the dinner / concerts at the Dream Café starting at 6:00 PM. The “Tank” can wail the reeds off a blues harp. Sherman is considered one of the best harp guys in Canada – hailing from Kelowna in the Okanagan Valley. He has been a teacher at other blues camps, and knows how to demonstrate techniques and get his students cranked up about wicked harp runs. Tank regular tours western Canada and performs with visiting artists as they do the blues scene in the west. Please bring at least a five pack of harmonicas in different keys (any brand), one of which should be a 10 hole diatonic harmonica in the key of “C”. As a self-taught blues harp player, Sherman will give students the basic short cuts to help you get to where you would like to go on harmonica to play and have fun. As that happens, we will take time to recognize the importance of “bending” notes, a technique that allows us to play flattened notes that are characteristic in blues music. · Tues Oct 22 – 2:30 – 4:00 PM ; this session will have your mentor / instructor helping you work on a tune for performance on the Dream Café stage, should you choose to participate in performing on stage. Hailing from the ‘Left Coast’ of Canada, Poppa Dawg (Rick Halesheff) is a killer guitar player and vocalist who has toured extensively throughout Western and Northern Canada with international stints in Mexico at The City of Peace Blues Festival and most recently, in has a participant at the International Blues Challenge in Memphis. There Poppa Dawg made it to the semi-finals there and along the way forged some very strong friendships with friends around the world. Poppa Dawg is a regular feature on the Kelowna and Okanagan Valley blues scene, playing venues throughout BC and western Canada. Dawg-blues is gritty, skanky, soft and sweet…Rick’s got great tricks up his sleeves. This cat can rock. You’ll catch on quickly. · We’ll focus on the early electric guitarists and show the influence of T Bone Walker. This will be done building on a few well know T-Bone riffs and tunes like the famous T-Bone Shuffle, which we will concentrate on to demonstrate a variety of blues guitar techniques. We will also cover aspects of some of the other most distinctive players such as BB King, Albert King and Albert Collins and others to see what sets them apart from others. We’ll look at some of the gear they used as well. Brandon Isaak not only plays his guitar but he plays his audience like a ringmaster in his own circus, always looking for new ways to entertain.” – Teresa Mallam, Prince George Free Press. What separates Brandon from the rest of the pack besides his world-class musicianship and song writing, is his ability to connect and involve his audience in his live shows. After many nominations by The Maple Blues Awards, (Canada’s national blues awards) this Yukonion now lower mainland guitar slinger finally won his first MBA last year for Acoustic Act of the Year. Brandon Isaak has represented British Columbia in International Blues Challenge which takes place every year on Beale St in Memphis, TN. This is the biggest gathering of blues acts in the world. · All Around the Blues – We’ll look at a few different styles of blues and how to make them work solo or with a band. We’ll check out some Chicago Blues, Delta, and Country blues. All levels are welcome. We’ll also look at a couple different but easy guitar tunings and how to chord with them and fret behind the nut and use different thumb patterns. · As time allows we’ll also examine The Art of The Guitar Solo. We’ll be playing lead guitar and soloing over different blues changes. We’ll look at T-Bone Walker, Magic Sam and even some Ronnie Earl type guitar soloing. Vancouver born Steve Kozak has been a mainstay on the western Canadian Blues scene since the mid-eighties. Backed by his some of Vancouver’s top musicians Kozak has built a reputation as one of Canada’s premier Blues acts and is known as the go to guy in Vancouver for the west coast Blues sound. An accomplished and tasty guitar player with a relaxed vocal style Kozak has been winning over fans performing his up-tempo brand of jump, swing and working man’s blues to enthusiastic audiences throughout the Pacific Northwest and western Canada. Gaining national recognition Steve has won a Maple Blues Award for New Artist or Group of the Year. Nominated for a Western Canadian Music Award for Blues Artist of the Year in 2017 and 2018. He has taught at blues camps in the past and loves doing it. All electric blues guitarists should have at least a few good instrumentals in their repertoire and should play at least a couple of the hit instrumentals by the great Freddie King. Freddie King was an all-around Bluesman, his style a mixture of the Texas and Chicago sounds. We will learn and work on a couple of my favorites. Students will look at a few examples of some of Steve’s favorite 12 bar and 8 bar Chicago and Texas Blues tunes. We’ll examine the form and learn some different styles of rhythm playing, turnarounds, and riffs. We will check out of some of my influences and favorites. Chris Nordquist is one of Canada’s most sought after drummers. Chris is a mainstay feature in the Vancouver music scene. He regularly plays as a session musician on various artists recordings. Chris has been an instructor at the Hornby Blues Workshop several times over the past few years. His classes offer drum enthusiasts of all levels tips and technique, fitting in a session, using percussion as the basis of fine rhythm. When you think about blues keyboard styles in Canada, Kenny Wayne’s name is sure to come up. Hailed as “an artist bringing the piano back to the front ranks of contemporary blues”, multiple award-winning blues, boogie-woogie and jazz pianist Kenny “Blues Boss” Wayne is called “Blues Boss” for a reason. His musical career began as a child prodigy in the late 1950’s and has continued to flourish for over 60 years with Wayne at the forefront of modern day blues piano practitioners. Kenny “Blues Boss” Wayne is an artist who’s got it all: Talent, charisma and showmanship. Oozing with class and sophistication, Wayne is a throwback to the golden age of classic rhythm and blues while offering a fresh approach to the genre. He can charm the shine of a keyboard. He’s been a mentor at many workshops and can’t wait to provide those pearls of wisdom to Okanagan Blues Camp players. Kenny’s got the chops and the swag to pull it off. This super nice dude will charm the heck out of his students. (Otis Spann, Fats Domino, Pete Johnson, Jaye McShann/Leroy Carr). You will learn where the various styles of piano blues originated in the US and hear how uniquely they sound to the blues patterns. You will hear the Chicago, St. Louis, West Coast, New Orleans & Boogie-Woogie styles and how they were played in Juke Joints & Barrelhouses during the 1930’s & 40’s. Question: Why has the piano been excluded from the origins of the blues? Kenny will answer that question. Kenny will be showing how to write new songs or re-create old songs utilizing the various piano blues styles. Class will conclude with Ray Charles and how Gospel music influenced Blues, RnB and Rock n Roll. Creating some ideas on playing various piano styles and having some deep-rooted discussion on marketing and where the blues is heading in the future and how the piano is playing a vital part. Kenny will critique the students’ performance(s) and fine tune their individual styles. This is the way that I got started professionally 50 years ago. Yanti is a Penticton-based vocal and musical coach. Yanti is a vocalist, choir leader and ukulele enthusiast. Her love of jazz may have lead Yanti to sing in professional groups around the South Okanagan, but blues is where her heart is. Growing up in Texas and frequenting New Orleans, Yanti was exposed to many blues classics on the streets and in clubs. Yanti’s recent training as a choir leader has helped her gain the skills to hone her craft of teaching larger groups with song. For those of us perform on the front porch or on-stage—more vocal knowledge is a worthwhile endeavour. · Earworms for increasing your comfort zone.
""" Support code for building Python extensions on Windows. # NT stuff # 1. Make sure libpython<version>.a exists for gcc. If not, build it. # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) # 3. Force windows to use g77 """ import os import sys import log # Overwrite certain distutils.ccompiler functions: import numpy.distutils.ccompiler # NT stuff # 1. Make sure libpython<version>.a exists for gcc. If not, build it. # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) # --> this is done in numpy/distutils/ccompiler.py # 3. Force windows to use g77 import distutils.cygwinccompiler from distutils.version import StrictVersion from numpy.distutils.ccompiler import gen_preprocess_options, gen_lib_options from distutils.errors import DistutilsExecError, CompileError, UnknownFileError from distutils.unixccompiler import UnixCCompiler from numpy.distutils.misc_util import msvc_runtime_library # the same as cygwin plus some additional parameters class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): """ A modified MingW32 compiler compatible with an MSVC built Python. """ compiler_type = 'mingw32' def __init__ (self, verbose=0, dry_run=0, force=0): distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose,dry_run, force) # we need to support 3.2 which doesn't match the standard # get_versions methods regex if self.gcc_version is None: import re out = os.popen('gcc -dumpversion','r') out_string = out.read() out.close() result = re.search('(\d+\.\d+)',out_string) if result: self.gcc_version = StrictVersion(result.group(1)) # A real mingw32 doesn't need to specify a different entry point, # but cygwin 2.91.57 in no-cygwin-mode needs it. if self.gcc_version <= "2.91.57": entry_point = '--entry _DllMain@12' else: entry_point = '' if self.linker_dll == 'dllwrap': # Commented out '--driver-name g++' part that fixes weird # g++.exe: g++: No such file or directory # error (mingw 1.0 in Enthon24 tree, gcc-3.4.5). # If the --driver-name part is required for some environment # then make the inclusion of this part specific to that environment. self.linker = 'dllwrap' # --driver-name g++' elif self.linker_dll == 'gcc': self.linker = 'g++' # **changes: eric jones 4/11/01 # 1. Check for import library on Windows. Build if it doesn't exist. build_import_library() # **changes: eric jones 4/11/01 # 2. increased optimization and turned off all warnings # 3. also added --driver-name g++ #self.set_executables(compiler='gcc -mno-cygwin -O2 -w', # compiler_so='gcc -mno-cygwin -mdll -O2 -w', # linker_exe='gcc -mno-cygwin', # linker_so='%s --driver-name g++ -mno-cygwin -mdll -static %s' # % (self.linker, entry_point)) if self.gcc_version <= "3.0.0": self.set_executables(compiler='gcc -mno-cygwin -O2 -w', compiler_so='gcc -mno-cygwin -mdll -O2 -w -Wstrict-prototypes', linker_exe='g++ -mno-cygwin', linker_so='%s -mno-cygwin -mdll -static %s' % (self.linker, entry_point)) else: self.set_executables(compiler='gcc -mno-cygwin -O2 -Wall', compiler_so='gcc -mno-cygwin -O2 -Wall -Wstrict-prototypes', linker_exe='g++ -mno-cygwin', linker_so='g++ -mno-cygwin -shared') # added for python2.3 support # we can't pass it through set_executables because pre 2.2 would fail self.compiler_cxx = ['g++'] # Maybe we should also append -mthreads, but then the finished # dlls need another dll (mingwm10.dll see Mingw32 docs) # (-mthreads: Support thread-safe exception handling on `Mingw32') # no additional libraries needed #self.dll_libraries=[] return # __init__ () def link(self, target_desc, objects, output_filename, output_dir, libraries, library_dirs, runtime_library_dirs, export_symbols = None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None): # Include the appropiate MSVC runtime library if Python was built # with MSVC >= 7.0 (MinGW standard is msvcrt) runtime_library = msvc_runtime_library() if runtime_library: if not libraries: libraries = [] libraries.append(runtime_library) args = (self, target_desc, objects, output_filename, output_dir, libraries, library_dirs, runtime_library_dirs, None, #export_symbols, we do this in our def-file debug, extra_preargs, extra_postargs, build_temp, target_lang) if self.gcc_version < "3.0.0": func = distutils.cygwinccompiler.CygwinCCompiler.link else: func = UnixCCompiler.link func(*args[:func.im_func.func_code.co_argcount]) return def object_filenames (self, source_filenames, strip_dir=0, output_dir=''): if output_dir is None: output_dir = '' obj_names = [] for src_name in source_filenames: # use normcase to make sure '.rc' is really '.rc' and not '.RC' (base, ext) = os.path.splitext (os.path.normcase(src_name)) # added these lines to strip off windows drive letters # without it, .o files are placed next to .c files # instead of the build directory drv,base = os.path.splitdrive(base) if drv: base = base[1:] if ext not in (self.src_extensions + ['.rc','.res']): raise UnknownFileError, \ "unknown file type '%s' (from '%s')" % \ (ext, src_name) if strip_dir: base = os.path.basename (base) if ext == '.res' or ext == '.rc': # these need to be compiled to object files obj_names.append (os.path.join (output_dir, base + ext + self.obj_extension)) else: obj_names.append (os.path.join (output_dir, base + self.obj_extension)) return obj_names # object_filenames () def build_import_library(): """ Build the import libraries for Mingw32-gcc on Windows """ if os.name != 'nt': return lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) lib_file = os.path.join(sys.prefix,'libs',lib_name) out_name = "libpython%d%d.a" % tuple(sys.version_info[:2]) out_file = os.path.join(sys.prefix,'libs',out_name) if not os.path.isfile(lib_file): log.warn('Cannot build import library: "%s" not found' % (lib_file)) return if os.path.isfile(out_file): log.debug('Skip building import library: "%s" exists' % (out_file)) return log.info('Building import library: "%s"' % (out_file)) from numpy.distutils import lib2def def_name = "python%d%d.def" % tuple(sys.version_info[:2]) def_file = os.path.join(sys.prefix,'libs',def_name) nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file) nm_output = lib2def.getnm(nm_cmd) dlist, flist = lib2def.parse_nm(nm_output) lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w')) dll_name = "python%d%d.dll" % tuple(sys.version_info[:2]) args = (dll_name,def_file,out_file) cmd = 'dlltool --dllname %s --def %s --output-lib %s' % args status = os.system(cmd) # for now, fail silently if status: log.warn('Failed to build import library for gcc. Linking will fail.') #if not success: # msg = "Couldn't find import library, and failed to build it." # raise DistutilsPlatformError, msg return
Saddleback Church and the American Foundation for Suicide Prevention (AFSP) partnered together to host an event on International Survivors of Suicide Loss Day. This event was for all who have lost someone to suicide to find comfort, hope, and healing in remembering their loved one. If you are struggling – please don’t struggle alone. Talk with someone today – call the National Suicide Prevention Lifeline: 1-800-273-8255.
""" netcdftrajectory - I/O trajectory files in the AMBER NetCDF convention More information on the AMBER NetCDF conventions can be found at http://ambermd.org/netcdf/. This module supports extensions to these conventions, such as writing of additional fields and writing to HDF5 (NetCDF-4) files. A Python NetCDF module is required. Supported are netCDF4-python - http://code.google.com/p/netcdf4-python/ scipy.io.netcdf - http://docs.scipy.org/doc/scipy/reference/io.html pupynere - https://bitbucket.org/robertodealmeida/pupynere/ Availability is checked in the above order of preference. Note that scipy.io.netcdf and pupynere cannot write HDF5 NetCDF-4 files. NetCDF files can be directly visualized using the libAtoms flavor of AtomEye (http://www.libatoms.org/), VMD (http://www.ks.uiuc.edu/Research/vmd/) or Ovito (http://www.ovito.org/, starting with version 2.3). """ import os import numpy as np import ase import ase.version from ase.data import atomic_masses from ase.lattice.spacegroup.cell import cellpar_to_cell, cell_to_cellpar NC_NOT_FOUND = 0 NC_IS_NETCDF4 = 1 NC_IS_SCIPY = 2 NC_IS_PUPYNERE = 3 have_nc = NC_NOT_FOUND # Check if we have netCDF4-python try: from netCDF4 import Dataset have_nc = NC_IS_NETCDF4 except: pass #if not have_nc: # # Check for scipy # try: # from scipy.io.netcdf import netcdf_file # have_nc = NC_IS_SCIPY # except: # pass if not have_nc: # Check for pupynere (comes with ASE) try: from ase.io.pupynere import netcdf_file have_nc = NC_IS_PUPYNERE except: pass ### Read/write NetCDF trajectories class NetCDFTrajectory: """ Reads/writes Atoms objects into an AMBER-style .nc trajectory file. """ # netCDF4-python format strings to scipy.io.netcdf version numbers _netCDF4_to_scipy = {'NETCDF3_CLASSIC': 1, 'NETCDF3_64BIT': 2} _netCDF4_to_pupynere = ['NETCDF3_CLASSIC'] # Default dimension names _frame_dim = 'frame' _spatial_dim = 'spatial' _atom_dim = 'atom' _cell_spatial_dim = 'cell_spatial' _cell_angular_dim = 'cell_angular' _label_dim = 'label' # Default field names. If it is a list, check for any of these names upon # opening. Upon writing, use the first name. _time_var = 'time' _numbers_var = ['Z', 'atom_types', 'type'] _positions_var = 'coordinates' _velocities_var = 'velocities' _cell_origin_var = 'cell_origin' _cell_lengths_var = 'cell_lengths' _cell_angles_var = 'cell_angles' _default_vars = reduce(lambda x, y: x + y, [_numbers_var, [_positions_var], [_velocities_var], [_cell_origin_var], [_cell_lengths_var], [_cell_angles_var]]) def __init__(self, filename, mode='r', atoms=None, types_to_numbers=None, double=True, netcdf_format='NETCDF3_CLASSIC', keep_open=None, index_var='id', index_offset=-1): """ A NetCDFTrajectory can be created in read, write or append mode. Parameters: filename: The name of the parameter file. Should end in .nc. mode='r': The mode. 'r' is read mode, the file should already exist, and no atoms argument should be specified. 'w' is write mode. The atoms argument specifies the Atoms object to be written to the file, if not given it must instead be given as an argument to the write() method. 'a' is append mode. It acts a write mode, except that data is appended to a preexisting file. atoms=None: The Atoms object to be written in write or append mode. types_to_numbers=None: Dictionary for conversion of atom types to atomic numbers when reading a trajectory file. double=True: Create new variable in double precision. netcdf_format='NETCDF3_CLASSIC': Format string for the underlying NetCDF file format. Only relevant if a new file is created. More information can be found at https://www.unidata.ucar.edu/software/netcdf/docs/netcdf/File-Format.html 'NETCDF3_CLASSIC' is the original binary format. 'NETCDF3_64BIT' can be used to write larger files. 'NETCDF4_CLASSIC' is HDF5 with some NetCDF limitations. 'NETCDF4' is HDF5. keep_open=None: Keep the file open during consecutive read/write operations. Default is to close file between writes to minimize chance of data corruption, but keep file open if file is opened in read mode. index_var='id': Name of variable containing the atom indices. Atoms are reordered by this index upon reading if this variable is present. Default value is for LAMMPS output. index_offset=-1: Set to 0 if atom index is zero based, set to -1 if atom index is one based. Default value is for LAMMPS output. """ if not have_nc: raise RuntimeError('NetCDFTrajectory requires a NetCDF Python ' 'module.') self.nc = None self.numbers = None self.pre_observers = [] # Callback functions before write self.post_observers = [] # Callback functions after write # are called self.has_header = False self._set_atoms(atoms) self.types_to_numbers = None if types_to_numbers: self.types_to_numbers = np.array(types_to_numbers) self.index_var = index_var self.index_offset = index_offset self._default_vars += [self.index_var] # 'l' should be a valid type according to the netcdf4-python # documentation, but does not appear to work. self.dtype_conv = {'l': 'i'} if not double: self.dtype_conv.update(dict(d='f')) self.extra_per_frame_vars = [] self.extra_per_file_vars = [] # per frame atts are global quantities, not quantities stored for each # atom self.extra_per_frame_atts = [] self.mode = mode self.netcdf_format = netcdf_format if atoms: self.n_atoms = len(atoms) else: self.n_atoms = None self.filename = filename if keep_open is None: # Only netCDF4-python supports append to files self.keep_open = self.mode == 'r' or have_nc != NC_IS_NETCDF4 else: self.keep_open = keep_open if (mode == 'a' or not self.keep_open) and have_nc != NC_IS_NETCDF4: raise RuntimeError('netCDF4-python is required for append mode.') def __del__(self): self.close() def _open(self): """ Opens the file. For internal use only. """ if self.nc is not None: return if self.mode == 'a' and not os.path.exists(self.filename): self.mode = 'w' if have_nc == NC_IS_NETCDF4: self.nc = Dataset(self.filename, self.mode, format=self.netcdf_format) elif have_nc == NC_IS_SCIPY: if self.netcdf_format not in self._netCDF4_to_scipy: raise ValueError("NetCDF format '%s' not supported by " "scipy.io.netcdf." % self.netcdf_format) version = self._netCDF4_to_scipy[self.netcdf_format] if version == 1: # This supports older scipy.io.netcdf versions that do not # support the 'version' argument self.nc = netcdf_file(self.filename, self.mode) else: self.nc = netcdf_file( self.filename, self.mode, version=self._netCDF4_to_scipy[self.netcdf_format] ) elif have_nc == NC_IS_PUPYNERE: if self.netcdf_format not in self._netCDF4_to_pupynere: raise ValueError("NetCDF format '%s' not supported by " "ase.io.pupynere." % self.netcdf_format) self.nc = netcdf_file(self.filename, self.mode) else: # Should not happen raise RuntimeError('Internal error: Unknown *have_nc* value.') self.frame = 0 if self.mode == 'r' or self.mode == 'a': self._read_header() self.frame = self._len() def _set_atoms(self, atoms=None): """ Associate an Atoms object with the trajectory. For internal use only. """ if atoms is not None and not hasattr(atoms, 'get_positions'): raise TypeError('"atoms" argument is not an Atoms object.') self.atoms = atoms def _read_header(self): if not self.n_atoms: if have_nc == NC_IS_NETCDF4: self.n_atoms = len(self.nc.dimensions[self._atom_dim]) else: self.n_atoms = self.nc.dimensions[self._atom_dim] numbers_var = self._get_variable(self._numbers_var, exc=False) if numbers_var is None: self.numbers = np.ones(self.n_atoms, dtype=int) else: self.numbers = np.array(numbers_var[:]) if self.types_to_numbers is not None: self.numbers = self.types_to_numbers[self.numbers] self.masses = atomic_masses[self.numbers] for name, var in self.nc.variables.iteritems(): # This can be unicode which confuses ASE name = str(name) # _default_vars is taken care of already if name not in self._default_vars: if len(var.dimensions) >= 2: if var.dimensions[0] == self._frame_dim: if var.dimensions[1] == self._atom_dim: self.extra_per_frame_vars += [name] else: self.extra_per_frame_atts += [name] elif len(var.dimensions) == 1: if var.dimensions[0] == self._atom_dim: self.extra_per_file_vars += [name] elif var.dimensions[0] == self._frame_dim: self.extra_per_frame_atts += [name] self.has_header = True def write(self, atoms=None, frame=None, arrays=None, time=None): """ Write the atoms to the file. If the atoms argument is not given, the atoms object specified when creating the trajectory object is used. """ self._open() self._call_observers(self.pre_observers) if atoms is None: atoms = self.atoms if hasattr(atoms, 'interpolate'): # seems to be a NEB neb = atoms assert not neb.parallel try: neb.get_energies_and_forces(all=True) except AttributeError: pass for image in neb.images: self.write(image) return if not self.has_header: self._write_header(atoms) else: if len(atoms) != self.n_atoms: raise ValueError('Bad number of atoms!') if self.frame > 0: if (atoms.numbers != self.numbers).any(): raise ValueError('Bad atomic numbers!') else: self.numbers = atoms.get_atomic_numbers() self._get_variable(self._numbers_var)[:] = \ atoms.get_atomic_numbers() if frame is None: i = self.frame else: i = frame self._get_variable(self._positions_var)[i] = atoms.get_positions() if atoms.has('momenta'): self._add_velocities() self._get_variable(self._velocities_var)[i] = \ atoms.get_momenta() / atoms.get_masses().reshape(-1, 1) a, b, c, alpha, beta, gamma = cell_to_cellpar(atoms.get_cell()) cell_lengths = np.array([a, b, c]) * atoms.pbc self._get_variable(self._cell_lengths_var)[i] = cell_lengths self._get_variable(self._cell_angles_var)[i] = [alpha, beta, gamma] if arrays is not None: for array in arrays: data = atoms.get_array(array) self._add_array(atoms, array, data.dtype, data.shape) self._get_variable(array)[i] = data if time is not None: self._get_variable(self._time_var)[i] = time self._call_observers(self.post_observers) self.frame += 1 self._close() def write_arrays(self, atoms, frame, arrays): self._open() self._call_observers(self.pre_observers) for array in arrays: data = atoms.get_array(array) self._add_array(atoms, array, data.dtype, data.shape) self._get_variable(array)[frame] = data self._call_observers(self.post_observers) self._close() def _define_file_structure(self, atoms): if not hasattr(self.nc, 'Conventions'): self.nc.Conventions = 'AMBER' if not hasattr(self.nc, 'ConventionVersion'): self.nc.ConventionVersion = '1.0' if not hasattr(self.nc, 'program'): self.nc.program = 'ASE' if not hasattr(self.nc, 'programVersion'): self.nc.programVersion = ase.version.version if not self._frame_dim in self.nc.dimensions: self.nc.createDimension(self._frame_dim, None) if not self._spatial_dim in self.nc.dimensions: self.nc.createDimension(self._spatial_dim, 3) if not self._atom_dim in self.nc.dimensions: self.nc.createDimension(self._atom_dim, len(atoms)) if not self._cell_spatial_dim in self.nc.dimensions: self.nc.createDimension(self._cell_spatial_dim, 3) if not self._cell_angular_dim in self.nc.dimensions: self.nc.createDimension(self._cell_angular_dim, 3) if not self._has_variable(self._numbers_var): self.nc.createVariable(self._numbers_var[0], 'i', (self._atom_dim,)) if not self._has_variable(self._positions_var): self.nc.createVariable(self._positions_var, 'f4', (self._frame_dim, self._atom_dim, self._spatial_dim)) self.nc.variables[self._positions_var].units = 'Angstrom' self.nc.variables[self._positions_var].scale_factor = 1. if not self._has_variable(self._cell_lengths_var): self.nc.createVariable(self._cell_lengths_var, 'd', (self._frame_dim, self._cell_spatial_dim)) self.nc.variables[self._cell_lengths_var].units = 'Angstrom' self.nc.variables[self._cell_lengths_var].scale_factor = 1. if not self._has_variable(self._cell_angles_var): self.nc.createVariable(self._cell_angles_var, 'd', (self._frame_dim, self._cell_angular_dim)) self.nc.variables[self._cell_angles_var].units = 'degree' def _add_velocities(self): if not self._has_variable(self._velocities_var): self.nc.createVariable(self._velocities_var, 'f4', (self._frame_dim, self._atom_dim, self._spatial_dim)) self.nc.variables[self._positions_var].units = \ 'Angstrom/Femtosecond' self.nc.variables[self._positions_var].scale_factor = 1. def _add_array(self, atoms, array_name, type, shape): if not self._has_variable(array_name): dims = [self._frame_dim] for i in shape: if i == len(atoms): dims += [self._atom_dim] elif i == 3: dims += [self._spatial_dim] else: raise TypeError("Don't know how to dump array of shape {0}" " into NetCDF trajectory.".format(shape)) try: t = self.dtype_conv[type.char] except: t = type self.nc.createVariable(array_name, t, dims) def _get_variable(self, name, exc=True): if isinstance(name, list): for n in name: if n in self.nc.variables: return self.nc.variables[n] if exc: raise RuntimeError('None of the variables {0} was found in the ' 'NetCDF trajectory.'.format( reduce(lambda x, y: x + ', ' + y, name))) return None else: return self.nc.variables[name] def _has_variable(self, name): if isinstance(name, list): for n in name: if n in self.nc.variables: return True return False else: return name in self.nc.variables def _write_header(self, atoms): self._define_file_structure(atoms) self._get_variable(self._numbers_var)[:] = \ np.asarray(atoms.get_atomic_numbers()) def close(self): """Close the trajectory file.""" if self.nc is not None: self.nc.close() self.nc = None def _close(self): if not self.keep_open: self.close() if self.mode == 'w': self.mode = 'a' def __getitem__(self, i=-1): self._open() if isinstance(i, slice): return [self[j] for j in range(*i.indices(self._len()))] N = self._len() if 0 <= i < N: # Non-periodic boundaries have cell_length == 0.0 cell_lengths = \ np.array(self.nc.variables[self._cell_lengths_var][i][:]) pbc = np.abs(cell_lengths > 1e-6) # Do we have a cell origin? if self._has_variable(self._cell_origin_var): origin = np.array(self.nc.variables[self._cell_origin_var][i][:]) else: origin = np.zeros([3], dtype=float) # Do we have an index variable? if self._has_variable(self.index_var): index = np.array(self.nc.variables[self.index_var][i][:]) +\ self.index_offset else: index = np.arange(self.n_atoms) # Read positions positions_var = self.nc.variables[self._positions_var] positions = np.array(positions_var[i][index]) # Determine cell size for non-periodic directions for dim in np.arange(3)[np.logical_not(pbc)]: origin[dim] = positions[:, dim].min() cell_lengths[dim] = positions[:, dim].max() - origin[dim] # Construct cell shape from cell lengths and angles cell = cellpar_to_cell( list(cell_lengths) + list(self.nc.variables[self._cell_angles_var][i]) ) # Compute momenta from velocities (if present) if self._has_variable(self._velocities_var): momenta = self.nc.variables[self._velocities_var][i][index] * \ self.masses.reshape(-1, 1) else: momenta = None # Fill info dict with additional data found in the NetCDF file info = {} for name in self.extra_per_frame_atts: info[name] = np.array(self.nc.variables[name][i]) # Create atoms object atoms = ase.Atoms( positions=positions - origin.reshape(1, -1), numbers=self.numbers, cell=cell, momenta=momenta, masses=self.masses, pbc=pbc, info=info ) # Attach additional arrays found in the NetCDF file for name in self.extra_per_frame_vars: atoms.set_array(name, self.nc.variables[name][i][index]) for name in self.extra_per_file_vars: atoms.set_array(name, self.nc.variables[name][:]) self._close() return atoms i = N + i if i < 0 or i >= N: self._close() raise IndexError('Trajectory index out of range.') return self[i] def _len(self): if self._frame_dim in self.nc.dimensions: return int(self._get_variable(self._positions_var).shape[0]) else: return 0 def __len__(self): self._open() n_frames = self._len() self._close() return n_frames def pre_write_attach(self, function, interval=1, *args, **kwargs): """ Attach a function to be called before writing begins. function: The function or callable object to be called. interval: How often the function is called. Default: every time (1). All other arguments are stored, and passed to the function. """ if not callable(function): raise ValueError('Callback object must be callable.') self.pre_observers.append((function, interval, args, kwargs)) def post_write_attach(self, function, interval=1, *args, **kwargs): """ Attach a function to be called after writing ends. function: The function or callable object to be called. interval: How often the function is called. Default: every time (1). All other arguments are stored, and passed to the function. """ if not callable(function): raise ValueError('Callback object must be callable.') self.post_observers.append((function, interval, args, kwargs)) def _call_observers(self, obs): """Call pre/post write observers.""" for function, interval, args, kwargs in obs: if self.write_counter % interval == 0: function(*args, **kwargs)
New Braindump2go 1Y0-301 Exam Questions Updated Today! Want to know New Questions in 2015 1Y0-301 Exam? Download Free Braindump2go 1Y0-301 Exam Preparation Materials Now! All 133 Citrix 1Y0-301 Exam Dumps Questions are the New Checked and Updated! In recent years, the 1Y0-301 certification has become a global standard for many successful IT companies. Looking to become a certified Citrix professional? Download Braindump2go 2015 Latest Released 1Y0-301 Exam Dumps Full Version and Pass 1Y0-301 100%!
from __future__ import unicode_literals import iptools import os import sys from celery.schedules import crontab from datetime import timedelta from django.utils.translation import ugettext_lazy as _ # ----------------------------------------------------------------------------------- # Default to debugging # ----------------------------------------------------------------------------------- DEBUG = True # ----------------------------------------------------------------------------------- # Sets TESTING to True if this configuration is read during a unit test # ----------------------------------------------------------------------------------- TESTING = sys.argv[1:2] == ['test'] if TESTING: PASSWORD_HASHERS = ('django.contrib.auth.hashers.MD5PasswordHasher',) DEBUG = False ADMINS = ( ('RapidPro', 'code@yourdomain.io'), ) MANAGERS = ADMINS # hardcode the postgis version so we can do reset db's from a blank database POSTGIS_VERSION = (2, 1) # ----------------------------------------------------------------------------------- # set the mail settings, override these in your settings.py # if your site was at http://temba.io, it might look like this: # ----------------------------------------------------------------------------------- EMAIL_HOST = 'smtp.gmail.com' EMAIL_HOST_USER = 'server@temba.io' DEFAULT_FROM_EMAIL = 'server@temba.io' EMAIL_HOST_PASSWORD = 'mypassword' EMAIL_USE_TLS = True # Used when sending email from within a flow and the user hasn't configured # their own SMTP server. FLOW_FROM_EMAIL = 'no-reply@temba.io' # where recordings and exports are stored AWS_STORAGE_BUCKET_NAME = 'dl-temba-io' AWS_BUCKET_DOMAIN = AWS_STORAGE_BUCKET_NAME + '.s3.amazonaws.com' STORAGE_ROOT_DIR = 'test_orgs' if TESTING else 'orgs' # ----------------------------------------------------------------------------------- # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone # ----------------------------------------------------------------------------------- USE_TZ = True TIME_ZONE = 'GMT' USER_TIME_ZONE = 'Africa/Kigali' MODELTRANSLATION_TRANSLATION_REGISTRY = "translation" # ----------------------------------------------------------------------------------- # Default language used for this installation # ----------------------------------------------------------------------------------- LANGUAGE_CODE = 'en-us' # ----------------------------------------------------------------------------------- # Available languages for translation # ----------------------------------------------------------------------------------- LANGUAGES = ( ('en-us', _("English")), ('pt-br', _("Portuguese")), ('fr', _("French")), ('es', _("Spanish"))) DEFAULT_LANGUAGE = "en-us" DEFAULT_SMS_LANGUAGE = "en-us" SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N = True # URL prefix for admin static files -- CSS, JavaScript and images. # Make sure to use a trailing slash. # Examples: "http://foo.com/static/admin/", "/static/admin/". ADMIN_MEDIA_PREFIX = '/static/admin/' # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'compressor.finders.CompressorFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = 'your own secret key' EMAIL_CONTEXT_PROCESSORS = ('temba.utils.email.link_components',) # ----------------------------------------------------------------------------------- # Directory Configuration # ----------------------------------------------------------------------------------- PROJECT_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__))) LOCALE_PATHS = (os.path.join(PROJECT_DIR, '../locale'),) RESOURCES_DIR = os.path.join(PROJECT_DIR, '../resources') FIXTURE_DIRS = (os.path.join(PROJECT_DIR, '../fixtures'),) TESTFILES_DIR = os.path.join(PROJECT_DIR, '../testfiles') STATICFILES_DIRS = (os.path.join(PROJECT_DIR, '../static'), os.path.join(PROJECT_DIR, '../media'), ) STATIC_ROOT = os.path.join(PROJECT_DIR, '../sitestatic') STATIC_URL = '/sitestatic/' COMPRESS_ROOT = os.path.join(PROJECT_DIR, '../sitestatic') MEDIA_ROOT = os.path.join(PROJECT_DIR, '../media') MEDIA_URL = "/media/" # ----------------------------------------------------------------------------------- # Templates Configuration # ----------------------------------------------------------------------------------- TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(PROJECT_DIR, '../templates')], 'OPTIONS': { 'context_processors': [ 'django.contrib.auth.context_processors.auth', 'django.template.context_processors.debug', 'django.template.context_processors.i18n', 'django.template.context_processors.media', 'django.template.context_processors.static', 'django.contrib.messages.context_processors.messages', 'django.template.context_processors.request', 'temba.context_processors.branding', 'temba.orgs.context_processors.user_group_perms_processor', 'temba.orgs.context_processors.unread_count_processor', 'temba.channels.views.channel_status_processor', 'temba.msgs.views.send_message_auto_complete_processor', 'temba.api.views.webhook_status_processor', 'temba.orgs.context_processors.settings_includer', ], 'loaders': [ 'temba.utils.haml.HamlFilesystemLoader', 'temba.utils.haml.HamlAppDirectoriesLoader', 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', 'django.template.loaders.eggs.Loader' ], 'debug': False if TESTING else DEBUG }, }, ] if TESTING: TEMPLATES[0]['OPTIONS']['context_processors'] += ('temba.tests.add_testing_flag_to_context', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'temba.utils.middleware.DisableMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'temba.middleware.BrandingMiddleware', 'temba.middleware.OrgTimezoneMiddleware', 'temba.middleware.FlowSimulationMiddleware', 'temba.middleware.ActivateLanguageMiddleware', 'temba.middleware.NonAtomicGetsMiddleware', 'temba.utils.middleware.OrgHeaderMiddleware', ) ROOT_URLCONF = 'temba.urls' # other urls to add APP_URLS = [] SITEMAP = ('public.public_index', 'public.public_blog', 'public.video_list', 'api') INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.humanize', 'django.contrib.gis', # django sitemaps 'django.contrib.sitemaps', 'redis', # mo-betta permission management 'guardian', # rest framework for api access 'rest_framework', 'rest_framework.authtoken', # compress our CSS and js 'compressor', # smartmin 'smartmin', 'smartmin.csv_imports', 'smartmin.users', # django-timezone-field 'timezone_field', # temba apps 'temba.assets', 'temba.auth_tweaks', 'temba.api', 'temba.public', 'temba.schedules', 'temba.orgs', 'temba.contacts', 'temba.channels', 'temba.msgs', 'temba.flows', 'temba.reports', 'temba.triggers', 'temba.utils', 'temba.campaigns', 'temba.ivr', 'temba.ussd', 'temba.locations', 'temba.values', 'temba.airtime', ) # the last installed app that uses smartmin permissions PERMISSIONS_APP = 'temba.airtime' LOGGING = { 'version': 1, 'disable_existing_loggers': True, 'root': { 'level': 'WARNING', 'handlers': ['console'], }, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' }, }, 'handlers': { 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'verbose' }, 'null': { 'class': 'logging.NullHandler', }, }, 'loggers': { 'pycountry': { 'level': 'ERROR', 'handlers': ['console'], 'propagate': False, }, 'django.security.DisallowedHost': { 'handlers': ['null'], 'propagate': False, }, 'django.db.backends': { 'level': 'ERROR', 'handlers': ['console'], 'propagate': False, }, }, } # ----------------------------------------------------------------------------------- # Branding Configuration # ----------------------------------------------------------------------------------- BRANDING = { 'rapidpro.io': { 'slug': 'rapidpro', 'name': 'RapidPro', 'org': 'UNICEF', 'colors': dict(primary='#0c6596'), 'styles': ['brands/rapidpro/font/style.css'], 'welcome_topup': 1000, 'email': 'join@rapidpro.io', 'support_email': 'support@rapidpro.io', 'link': 'https://app.rapidpro.io', 'api_link': 'https://api.rapidpro.io', 'docs_link': 'http://docs.rapidpro.io', 'domain': 'app.rapidpro.io', 'favico': 'brands/rapidpro/rapidpro.ico', 'splash': '/brands/rapidpro/splash.jpg', 'logo': '/brands/rapidpro/logo.png', 'allow_signups': True, 'tiers': dict(import_flows=0, multi_user=0, multi_org=0), 'bundles': [], 'welcome_packs': [dict(size=5000, name="Demo Account"), dict(size=100000, name="UNICEF Account")], 'description': _("Visually build nationally scalable mobile applications from anywhere in the world."), 'credits': _("Copyright &copy; 2012-2017 UNICEF, Nyaruka. All Rights Reserved.") } } DEFAULT_BRAND = 'rapidpro.io' # ----------------------------------------------------------------------------------- # Permission Management # ----------------------------------------------------------------------------------- # this lets us easily create new permissions across our objects PERMISSIONS = { '*': ('create', # can create an object 'read', # can read an object, viewing it's details 'update', # can update an object 'delete', # can delete an object, 'list'), # can view a list of the objects 'api.apitoken': ('refresh',), 'api.resthook': ('api', 'list'), 'api.webhookevent': ('api',), 'api.resthooksubscriber': ('api',), 'campaigns.campaign': ('api', 'archived', ), 'campaigns.campaignevent': ('api',), 'contacts.contact': ('api', 'block', 'blocked', 'break_anon', 'customize', 'export', 'stopped', 'filter', 'history', 'import', 'omnibox', 'unblock', 'unstop', 'update_fields', 'update_fields_input' ), 'contacts.contactfield': ('api', 'json', 'managefields'), 'contacts.contactgroup': ('api',), 'ivr.ivrcall': ('start',), 'locations.adminboundary': ('alias', 'api', 'boundaries', 'geometry'), 'orgs.org': ('accounts', 'smtp_server', 'api', 'country', 'clear_cache', 'create_login', 'create_sub_org', 'download', 'edit', 'edit_sub_org', 'export', 'grant', 'home', 'import', 'join', 'languages', 'manage', 'manage_accounts', 'manage_accounts_sub_org', 'nexmo_configuration', 'nexmo_account', 'nexmo_connect', 'plivo_connect', 'profile', 'resthooks', 'service', 'signup', 'sub_orgs', 'surveyor', 'transfer_credits', 'transfer_to_account', 'trial', 'twilio_account', 'twilio_connect', 'webhook', ), 'orgs.usersettings': ('phone',), 'channels.channel': ('api', 'bulk_sender_options', 'claim', 'claim_africas_talking', 'claim_android', 'claim_blackmyna', 'claim_chikka', 'claim_clickatell', 'claim_dart_media', 'claim_external', 'claim_facebook', 'claim_globe', 'claim_high_connection', 'claim_hub9', 'claim_infobip', 'claim_jasmin', 'claim_junebug', 'claim_kannel', 'claim_line', 'claim_m3tech', 'claim_mblox', 'claim_nexmo', 'claim_plivo', 'claim_shaqodoon', 'claim_smscentral', 'claim_start', 'claim_telegram', 'claim_twilio', 'claim_twiml_api', 'claim_twilio_messaging_service', 'claim_twitter', 'claim_verboice', 'claim_viber', 'claim_viber_public', 'create_viber', 'claim_vumi', 'claim_vumi_ussd', 'claim_yo', 'claim_zenvia', 'configuration', 'create_bulk_sender', 'create_caller', 'errors', 'facebook_whitelist', 'search_nexmo', 'search_numbers', ), 'channels.channelevent': ('api', 'calls'), 'flows.flowstart': ('api',), 'flows.flow': ('activity', 'activity_chart', 'activity_list', 'analytics', 'api', 'archived', 'broadcast', 'campaign', 'completion', 'copy', 'editor', 'export', 'export_results', 'filter', 'json', 'read', 'recent_messages', 'results', 'revisions', 'run_table', 'simulate', 'upload_action_recording', ), 'flows.ruleset': ('analytics', 'choropleth', 'map', 'results', ), 'msgs.msg': ('api', 'archive', 'archived', 'export', 'failed', 'filter', 'flow', 'inbox', 'label', 'outbox', 'sent', 'test', 'update', ), 'msgs.broadcast': ('api', 'detail', 'schedule', 'schedule_list', 'schedule_read', 'send', ), 'msgs.label': ('api', 'create', 'create_folder'), 'orgs.topup': ('manage',), 'triggers.trigger': ('archived', 'catchall', 'follow', 'inbound_call', 'keyword', 'missed_call', 'new_conversation', 'referral', 'register', 'schedule', 'ussd', ), } # assigns the permissions that each group should have GROUP_PERMISSIONS = { "Service Users": ( # internal Temba services have limited permissions 'msgs.msg_create', ), "Alpha": ( ), "Beta": ( ), "Surveyors": ( 'contacts.contact_api', 'contacts.contactfield_api', 'flows.flow_api', 'locations.adminboundary_api', 'orgs.org_api', 'orgs.org_surveyor', 'msgs.msg_api', ), "Granters": ( 'orgs.org_grant', ), "Customer Support": ( 'auth.user_list', 'auth.user_update', 'contacts.contact_break_anon', 'flows.flow_editor', 'flows.flow_json', 'flows.flow_read', 'flows.flow_revisions', 'orgs.org_dashboard', 'orgs.org_grant', 'orgs.org_manage', 'orgs.org_update', 'orgs.org_service', 'orgs.topup_create', 'orgs.topup_manage', 'orgs.topup_update', ), "Administrators": ( 'airtime.airtimetransfer_list', 'airtime.airtimetransfer_read', 'api.apitoken_refresh', 'api.resthook_api', 'api.resthook_list', 'api.resthooksubscriber_api', 'api.webhookevent_api', 'api.webhookevent_list', 'api.webhookevent_read', 'campaigns.campaign.*', 'campaigns.campaignevent.*', 'contacts.contact_api', 'contacts.contact_block', 'contacts.contact_blocked', 'contacts.contact_create', 'contacts.contact_customize', 'contacts.contact_delete', 'contacts.contact_export', 'contacts.contact_filter', 'contacts.contact_history', 'contacts.contact_import', 'contacts.contact_list', 'contacts.contact_omnibox', 'contacts.contact_read', 'contacts.contact_stopped', 'contacts.contact_unblock', 'contacts.contact_unstop', 'contacts.contact_update', 'contacts.contact_update_fields', 'contacts.contact_update_fields_input', 'contacts.contactfield.*', 'contacts.contactgroup.*', 'csv_imports.importtask.*', 'ivr.ivrcall.*', 'ussd.ussdsession.*', 'locations.adminboundary_alias', 'locations.adminboundary_api', 'locations.adminboundary_boundaries', 'locations.adminboundary_geometry', 'orgs.org_accounts', 'orgs.org_smtp_server', 'orgs.org_api', 'orgs.org_country', 'orgs.org_create_sub_org', 'orgs.org_download', 'orgs.org_edit', 'orgs.org_edit_sub_org', 'orgs.org_export', 'orgs.org_home', 'orgs.org_import', 'orgs.org_languages', 'orgs.org_manage_accounts', 'orgs.org_manage_accounts_sub_org', 'orgs.org_nexmo_account', 'orgs.org_nexmo_connect', 'orgs.org_nexmo_configuration', 'orgs.org_plivo_connect', 'orgs.org_profile', 'orgs.org_resthooks', 'orgs.org_sub_orgs', 'orgs.org_transfer_credits', 'orgs.org_transfer_to_account', 'orgs.org_twilio_account', 'orgs.org_twilio_connect', 'orgs.org_webhook', 'orgs.topup_list', 'orgs.topup_read', 'orgs.usersettings_phone', 'orgs.usersettings_update', 'channels.channel_claim_nexmo', 'channels.channel_api', 'channels.channel_bulk_sender_options', 'channels.channel_claim', 'channels.channel_claim_africas_talking', 'channels.channel_claim_android', 'channels.channel_claim_blackmyna', 'channels.channel_claim_chikka', 'channels.channel_claim_clickatell', 'channels.channel_claim_dart_media', 'channels.channel_claim_external', 'channels.channel_claim_facebook', 'channels.channel_claim_globe', 'channels.channel_claim_high_connection', 'channels.channel_claim_hub9', 'channels.channel_claim_infobip', 'channels.channel_claim_jasmin', 'channels.channel_claim_junebug', 'channels.channel_claim_kannel', 'channels.channel_claim_line', 'channels.channel_claim_mblox', 'channels.channel_claim_m3tech', 'channels.channel_claim_plivo', 'channels.channel_claim_shaqodoon', 'channels.channel_claim_smscentral', 'channels.channel_claim_start', 'channels.channel_claim_telegram', 'channels.channel_claim_twilio', 'channels.channel_claim_twiml_api', 'channels.channel_claim_twilio_messaging_service', 'channels.channel_claim_twitter', 'channels.channel_claim_verboice', 'channels.channel_claim_viber', 'channels.channel_claim_viber_public', 'channels.channel_create_viber', 'channels.channel_claim_vumi', 'channels.channel_claim_vumi_ussd', 'channels.channel_claim_yo', 'channels.channel_claim_zenvia', 'channels.channel_configuration', 'channels.channel_create', 'channels.channel_create_bulk_sender', 'channels.channel_create_caller', 'channels.channel_facebook_whitelist', 'channels.channel_delete', 'channels.channel_list', 'channels.channel_read', 'channels.channel_search_nexmo', 'channels.channel_search_numbers', 'channels.channel_update', 'channels.channelevent.*', 'channels.channellog_list', 'channels.channellog_read', 'reports.report.*', 'flows.flow.*', 'flows.flowstart_api', 'flows.flowlabel.*', 'flows.ruleset.*', 'schedules.schedule.*', 'msgs.broadcast.*', 'msgs.broadcastschedule.*', 'msgs.label.*', 'msgs.msg_api', 'msgs.msg_archive', 'msgs.msg_archived', 'msgs.msg_delete', 'msgs.msg_export', 'msgs.msg_failed', 'msgs.msg_filter', 'msgs.msg_flow', 'msgs.msg_inbox', 'msgs.msg_label', 'msgs.msg_outbox', 'msgs.msg_sent', 'msgs.msg_update', 'triggers.trigger.*', ), "Editors": ( 'api.apitoken_refresh', 'api.resthook_api', 'api.resthook_list', 'api.resthooksubscriber_api', 'api.webhookevent_api', 'api.webhookevent_list', 'api.webhookevent_read', 'airtime.airtimetransfer_list', 'airtime.airtimetransfer_read', 'campaigns.campaign.*', 'campaigns.campaignevent.*', 'contacts.contact_api', 'contacts.contact_block', 'contacts.contact_blocked', 'contacts.contact_create', 'contacts.contact_customize', 'contacts.contact_delete', 'contacts.contact_export', 'contacts.contact_filter', 'contacts.contact_history', 'contacts.contact_import', 'contacts.contact_list', 'contacts.contact_omnibox', 'contacts.contact_read', 'contacts.contact_stopped', 'contacts.contact_unblock', 'contacts.contact_unstop', 'contacts.contact_update', 'contacts.contact_update_fields', 'contacts.contact_update_fields_input', 'contacts.contactfield.*', 'contacts.contactgroup.*', 'csv_imports.importtask.*', 'ivr.ivrcall.*', 'ussd.ussdsession.*', 'locations.adminboundary_alias', 'locations.adminboundary_api', 'locations.adminboundary_boundaries', 'locations.adminboundary_geometry', 'orgs.org_api', 'orgs.org_download', 'orgs.org_export', 'orgs.org_home', 'orgs.org_import', 'orgs.org_profile', 'orgs.org_resthooks', 'orgs.org_webhook', 'orgs.topup_list', 'orgs.topup_read', 'orgs.usersettings_phone', 'orgs.usersettings_update', 'channels.channel_api', 'channels.channel_bulk_sender_options', 'channels.channel_claim', 'channels.channel_claim_africas_talking', 'channels.channel_claim_android', 'channels.channel_claim_blackmyna', 'channels.channel_claim_chikka', 'channels.channel_claim_clickatell', 'channels.channel_claim_dart_media', 'channels.channel_claim_external', 'channels.channel_claim_facebook', 'channels.channel_claim_globe', 'channels.channel_claim_high_connection', 'channels.channel_claim_hub9', 'channels.channel_claim_infobip', 'channels.channel_claim_jasmin', 'channels.channel_claim_junebug', 'channels.channel_claim_kannel', 'channels.channel_claim_line', 'channels.channel_claim_mblox', 'channels.channel_claim_m3tech', 'channels.channel_claim_plivo', 'channels.channel_claim_shaqodoon', 'channels.channel_claim_smscentral', 'channels.channel_claim_start', 'channels.channel_claim_telegram', 'channels.channel_claim_twilio', 'channels.channel_claim_twiml_api', 'channels.channel_claim_twilio_messaging_service', 'channels.channel_claim_twitter', 'channels.channel_claim_verboice', 'channels.channel_claim_viber', 'channels.channel_claim_viber_public', 'channels.channel_create_viber', 'channels.channel_claim_vumi', 'channels.channel_claim_vumi_ussd', 'channels.channel_claim_yo', 'channels.channel_claim_zenvia', 'channels.channel_configuration', 'channels.channel_create', 'channels.channel_create_bulk_sender', 'channels.channel_create_caller', 'channels.channel_delete', 'channels.channel_list', 'channels.channel_read', 'channels.channel_search_numbers', 'channels.channel_update', 'channels.channelevent.*', 'reports.report.*', 'flows.flow.*', 'flows.flowstart_api', 'flows.flowlabel.*', 'flows.ruleset.*', 'schedules.schedule.*', 'msgs.broadcast.*', 'msgs.broadcastschedule.*', 'msgs.label.*', 'msgs.msg_api', 'msgs.msg_archive', 'msgs.msg_archived', 'msgs.msg_delete', 'msgs.msg_export', 'msgs.msg_failed', 'msgs.msg_filter', 'msgs.msg_flow', 'msgs.msg_inbox', 'msgs.msg_label', 'msgs.msg_outbox', 'msgs.msg_sent', 'msgs.msg_update', 'triggers.trigger.*', ), "Viewers": ( 'api.resthook_list', 'campaigns.campaign_archived', 'campaigns.campaign_list', 'campaigns.campaign_read', 'campaigns.campaignevent_read', 'contacts.contact_blocked', 'contacts.contact_export', 'contacts.contact_filter', 'contacts.contact_history', 'contacts.contact_list', 'contacts.contact_read', 'contacts.contact_stopped', 'locations.adminboundary_boundaries', 'locations.adminboundary_geometry', 'locations.adminboundary_alias', 'orgs.org_download', 'orgs.org_export', 'orgs.org_home', 'orgs.org_profile', 'orgs.topup_list', 'orgs.topup_read', 'channels.channel_list', 'channels.channel_read', 'channels.channelevent_calls', 'flows.flow_activity', 'flows.flow_activity_chart', 'flows.flow_archived', 'flows.flow_campaign', 'flows.flow_completion', 'flows.flow_export', 'flows.flow_export_results', 'flows.flow_filter', 'flows.flow_list', 'flows.flow_read', 'flows.flow_editor', 'flows.flow_json', 'flows.flow_recent_messages', 'flows.flow_results', 'flows.flow_run_table', 'flows.flow_simulate', 'flows.ruleset_analytics', 'flows.ruleset_results', 'flows.ruleset_choropleth', 'msgs.broadcast_schedule_list', 'msgs.broadcast_schedule_read', 'msgs.msg_archived', 'msgs.msg_export', 'msgs.msg_failed', 'msgs.msg_filter', 'msgs.msg_flow', 'msgs.msg_inbox', 'msgs.msg_outbox', 'msgs.msg_sent', 'triggers.trigger_archived', 'triggers.trigger_list', ) } # ----------------------------------------------------------------------------------- # Login / Logout # ----------------------------------------------------------------------------------- LOGIN_URL = "/users/login/" LOGOUT_URL = "/users/logout/" LOGIN_REDIRECT_URL = "/org/choose/" LOGOUT_REDIRECT_URL = "/" # ----------------------------------------------------------------------------------- # Guardian Configuration # ----------------------------------------------------------------------------------- AUTHENTICATION_BACKENDS = ( 'smartmin.backends.CaseInsensitiveBackend', 'guardian.backends.ObjectPermissionBackend', ) ANONYMOUS_USER_NAME = 'AnonymousUser' # ----------------------------------------------------------------------------------- # Our test runner is standard but with ability to exclude apps # ----------------------------------------------------------------------------------- TEST_RUNNER = 'temba.tests.ExcludeTestRunner' TEST_EXCLUDE = ('smartmin',) # ----------------------------------------------------------------------------------- # Debug Toolbar # ----------------------------------------------------------------------------------- INTERNAL_IPS = iptools.IpRangeList( '127.0.0.1', '192.168.0.10', '192.168.0.0/24', # network block '0.0.0.0' ) DEBUG_TOOLBAR_CONFIG = { 'INTERCEPT_REDIRECTS': False, # disable redirect traps } # ----------------------------------------------------------------------------------- # Crontab Settings .. # ----------------------------------------------------------------------------------- CELERYBEAT_SCHEDULE = { "retry-webhook-events": { 'task': 'retry_events_task', 'schedule': timedelta(seconds=300), }, "check-channels": { 'task': 'check_channels_task', 'schedule': timedelta(seconds=300), }, "schedules": { 'task': 'check_schedule_task', 'schedule': timedelta(seconds=60), }, "campaigns": { 'task': 'check_campaigns_task', 'schedule': timedelta(seconds=60), }, "check-flows": { 'task': 'check_flows_task', 'schedule': timedelta(seconds=60), }, "check-flow-timeouts": { 'task': 'check_flow_timeouts_task', 'schedule': timedelta(seconds=20), }, "check-credits": { 'task': 'check_credits_task', 'schedule': timedelta(seconds=900) }, "check-messages-task": { 'task': 'check_messages_task', 'schedule': timedelta(seconds=300) }, "fail-old-messages": { 'task': 'fail_old_messages', 'schedule': crontab(hour=0, minute=0), }, "purge-broadcasts": { 'task': 'purge_broadcasts_task', 'schedule': crontab(hour=1, minute=0), }, "clear-old-msg-external-ids": { 'task': 'clear_old_msg_external_ids', 'schedule': crontab(hour=2, minute=0), }, "trim-channel-log": { 'task': 'trim_channel_log_task', 'schedule': crontab(hour=3, minute=0), }, "calculate-credit-caches": { 'task': 'calculate_credit_caches', 'schedule': timedelta(days=3), }, "squash-flowruncounts": { 'task': 'squash_flowruncounts', 'schedule': timedelta(seconds=300), }, "squash-flowpathcounts": { 'task': 'squash_flowpathcounts', 'schedule': timedelta(seconds=300), }, "prune-flowpathrecentsteps": { 'task': 'prune_flowpathrecentsteps', 'schedule': timedelta(seconds=300), }, "squash-channelcounts": { 'task': 'squash_channelcounts', 'schedule': timedelta(seconds=300), }, "squash-systemlabels": { 'task': 'squash_systemlabels', 'schedule': timedelta(seconds=300), }, "squash-topupcredits": { 'task': 'squash_topupcredits', 'schedule': timedelta(seconds=300), }, "squash-contactgroupcounts": { 'task': 'squash_contactgroupcounts', 'schedule': timedelta(seconds=300), }, } # Mapping of task name to task function path, used when CELERY_ALWAYS_EAGER is set to True CELERY_TASK_MAP = { 'send_msg_task': 'temba.channels.tasks.send_msg_task', 'start_msg_flow_batch': 'temba.flows.tasks.start_msg_flow_batch_task', 'handle_event_task': 'temba.msgs.tasks.handle_event_task', } # ----------------------------------------------------------------------------------- # Async tasks with celery # ----------------------------------------------------------------------------------- REDIS_HOST = 'localhost' REDIS_PORT = 6379 # we use a redis db of 10 for testing so that we maintain caches for dev REDIS_DB = 10 if TESTING else 15 BROKER_URL = 'redis://%s:%d/%d' % (REDIS_HOST, REDIS_PORT, REDIS_DB) # by default, celery doesn't have any timeout on our redis connections, this fixes that BROKER_TRANSPORT_OPTIONS = {'socket_timeout': 5} CELERY_RESULT_BACKEND = None CELERY_ACCEPT_CONTENT = ['json'] CELERY_TASK_SERIALIZER = 'json' IS_PROD = False HOSTNAME = "localhost" # The URL and port of the proxy server to use when needed (if any, in requests format) OUTGOING_PROXIES = {} # ----------------------------------------------------------------------------------- # Cache to Redis # ----------------------------------------------------------------------------------- CACHES = { "default": { "BACKEND": "django_redis.cache.RedisCache", "LOCATION": "redis://%s:%s/%s" % (REDIS_HOST, REDIS_PORT, REDIS_DB), "OPTIONS": { "CLIENT_CLASS": "django_redis.client.DefaultClient", } } } # ----------------------------------------------------------------------------------- # Django-rest-framework configuration # ----------------------------------------------------------------------------------- REST_FRAMEWORK = { 'DEFAULT_PERMISSION_CLASSES': ( 'rest_framework.permissions.IsAuthenticated', ), 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.SessionAuthentication', 'temba.api.support.APITokenAuthentication', ), 'DEFAULT_THROTTLE_CLASSES': ( 'temba.api.support.OrgRateThrottle', ), 'DEFAULT_THROTTLE_RATES': { 'v2': '2500/hour', 'v2.contacts': '2500/hour', 'v2.messages': '2500/hour', 'v2.runs': '2500/hour', 'v2.api': '2500/hour', }, 'PAGE_SIZE': 250, 'DEFAULT_RENDERER_CLASSES': ( 'temba.api.support.DocumentationRenderer', 'rest_framework.renderers.JSONRenderer', 'rest_framework_xml.renderers.XMLRenderer', ), 'EXCEPTION_HANDLER': 'temba.api.support.temba_exception_handler', 'UNICODE_JSON': False } REST_HANDLE_EXCEPTIONS = not TESTING # ----------------------------------------------------------------------------------- # Django Compressor configuration # ----------------------------------------------------------------------------------- if TESTING: # if only testing, disable coffeescript and less compilation COMPRESS_PRECOMPILERS = () else: COMPRESS_PRECOMPILERS = ( ('text/less', 'lessc --include-path="%s" {infile} {outfile}' % os.path.join(PROJECT_DIR, '../static', 'less')), ('text/coffeescript', 'coffee --compile --stdio') ) COMPRESS_ENABLED = False COMPRESS_OFFLINE = False # build up our offline compression context based on available brands COMPRESS_OFFLINE_CONTEXT = [] for brand in BRANDING.values(): context = dict(STATIC_URL=STATIC_URL, base_template='frame.html', debug=False, testing=False) context['brand'] = dict(slug=brand['slug'], styles=brand['styles']) COMPRESS_OFFLINE_CONTEXT.append(context) MAGE_API_URL = 'http://localhost:8026/api/v1' MAGE_AUTH_TOKEN = '___MAGE_TOKEN_YOU_PICK__' # ----------------------------------------------------------------------------------- # RapidPro configuration settings # ----------------------------------------------------------------------------------- ###### # DANGER: only turn this on if you know what you are doing! # could cause messages to be sent to live customer aggregators SEND_MESSAGES = False ###### # DANGER: only turn this on if you know what you are doing! # could cause external APIs to be called in test environment SEND_WEBHOOKS = False ###### # DANGER: only turn this on if you know what you are doing! # could cause emails to be sent in test environment SEND_EMAILS = False ###### # DANGER: only turn this on if you know what you are doing! # could cause airtime transfers in test environment SEND_AIRTIME = False ###### # DANGER: only turn this on if you know what you are doing! # could cause calls in test environments SEND_CALLS = False MESSAGE_HANDLERS = ['temba.triggers.handlers.TriggerHandler', 'temba.flows.handlers.FlowHandler', 'temba.triggers.handlers.CatchAllHandler'] # ----------------------------------------------------------------------------------- # Store sessions in our cache # ----------------------------------------------------------------------------------- SESSION_ENGINE = "django.contrib.sessions.backends.cached_db" SESSION_CACHE_ALIAS = "default" # ----------------------------------------------------------------------------------- # 3rd Party Integration Keys # ----------------------------------------------------------------------------------- TWITTER_API_KEY = os.environ.get('TWITTER_API_KEY', 'MISSING_TWITTER_API_KEY') TWITTER_API_SECRET = os.environ.get('TWITTER_API_SECRET', 'MISSING_TWITTER_API_SECRET') SEGMENT_IO_KEY = os.environ.get('SEGMENT_IO_KEY', '') LIBRATO_USER = os.environ.get('LIBRATO_USER', '') LIBRATO_TOKEN = os.environ.get('LIBRATO_TOKEN', '') # ----------------------------------------------------------------------------------- # IP Addresses # These are the externally accessible IP addresses of the servers running RapidPro. # Needed for channel types that authenticate by whitelisting public IPs. # # You need to change these to real addresses to work with these. # ----------------------------------------------------------------------------------- IP_ADDRESSES = ('172.16.10.10', '162.16.10.20')
The Duke and Duchess of Cambridge have gone on their honeymoon, St James’s Palace has confirmed. William and Kate are understood to have left on Monday for an undisclosed destination, more than a week after their wedding in Westminster Abbey. A spokesman for St James’s Palace said he would not confirm their honeymoon destination. According to reports, the couple may have headed for the Seychelles. “We are not confirming and we are not commenting on speculation on where they may be going on their private honeymoon, we are just confirming that they have gone,” he said. The Prince returned to his job as an RAF search and rescue helicopter pilot last week after marrying Kate in a spectacular ceremony celebrated around the world. According to reports, his car was seen leaving Anglesey, North Wales, on Monday accompanied by a police Range Rover piled high with luggage. St James’s Palace declined to say how the long the honeymoon would be but confirmed that William had taken two weeks leave from operational duties. As well as the Seychelles, Africa, Jordan, and private islands in the Caribbean such as Necker and Mustique have been suggested as possible honeymoon destinations. Kate and will are really indians.. More royal wedding pics, click! More Royal Wedding pics… click on the link!
# This file is part of twitter-followers. # # Copyright (C) 2013 Marios Visvardis <visvardis.marios@gmail.com> # # twitter-followers is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # twitter-followers is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with twitter-followers. If not, see <http://www.gnu.org/licenses/>. import sys import networkx as nx from client import Client_error class Collector(object): """When collect() is called follower nodes are recursively visited and their connections are saved in a graph structure. Usage: Collector must be initialized with a connected Client instance. Call collect() to start the process and collect the results via the graph attribute. """ def __init__(self, client, conf): self._client = client self._conf = conf self._visited = set() self.graph = nx.DiGraph() def collect(self, start_node): self._visit(start_node, self._conf.depth) def _visit(self, uid, depth): # terminate recursion if(depth) == 0: return depth -= 1 f = None try: try: cuid = int(uid) ctype = 'user_id' except: cuid = uid ctype = 'screen_name' f = self._client.get_followers(**{ctype: cuid}) except Client_error as e: sys.stderr.write('Error: %s\n' % str(e)) sys.exit(1) print('%s followers: %d' % (str(uid), len(f))) for i in f: self.graph.add_edge(i, uid) if i in self._visited: continue self._visit(i, depth) self._visited.add(uid)
Home › Support › eCommerce Gem Plus › Couple of .php pages are out of date. This topic contains 1 reply, has 2 voices, and was last updated by ProDesign Support 1 month, 1 week ago. I am seeing above error, please suggest me what to do? Can you please tell us when you face this issue? It will more helpful if you share any screenshot or website URL?
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2014, 2015 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. from cgi import escape from urllib import urlencode from invenio.webinterface_handler_config import HTTP_BAD_REQUEST, SERVER_RETURN from invenio.webpage import pagefooteronly, pageheaderonly, page from invenio.search_engine import perform_request_search from invenio.search_engine import (get_coll_i18nname, get_record, get_collection_reclist, get_creation_date) from invenio.dbquery import run_sql from invenio.utils import NATIONS_DEFAULT_MAP, multi_replace, get_doi from invenio.bibrecord import record_get_field_values, record_get_field_value, field_get_subfield_values import re _AFFILIATIONS = (sorted(list(set(NATIONS_DEFAULT_MAP.values()))) + ['HUMAN CHECK']) CFG_JOURNALS = ['Acta', 'Advances in High Energy Physics', 'Chinese Physics C', 'European Physical Journal C', 'Journal of Cosmology and Astroparticle Physics', 'Journal of High Energy Physics', 'New Journal of Physics', 'Nuclear Physics B', 'Physics Letters B', 'Progress of Theoretical and Experimental Physics'] CFG_SELECTED_AFF = {'Andrews University': ('Andrews University',), 'Arkansas State University': ('Arkansas State University',), 'Black Hills State University': ('Black Hills State University',), 'Boise State University': ('Boise State University',), 'Brookhaven National Laboratory': ('Brookhaven National Laboratory',), 'Brown University': ('Brown University',), 'Chicago State University': ('Chicago State University',), 'Columbia University': ('Columbia University',), 'Creighton University': ('Creighton University',), 'Fairfield University': ('Fairfield University',), 'George Washington University': ('George Washington University',), 'Hampton University': ('Hampton University',), 'Houston Advanced Research Center': ('Houston Advanced Research Center',), 'Janelia Farm Research Campus': ('Janelia Farm Research Campus',), 'Long Island University': ('Long Island University',), 'Louisiana Tech University': ('Louisiana Tech University',), 'Luther College': ('Luther College',), 'Manhattan College': ('Manhattan College',), 'Milwaukee School of Engineering': ('Milwaukee School of Engineering',), 'Mississippi State University': ('Mississippi State University',), 'Muhlenberg College': ('Muhlenberg College',), 'New York City College of Technology': ('New York City College of Technology',), 'North Carolina Central University': ('North Carolina Central University',), 'Northern Illinois University': ('Northern Illinois University',), 'Oklahoma State University': ('Oklahoma State University',), 'Pacific Lutheran University': ('Pacific Lutheran University',), 'Philander Smith College': ('Philander Smith College',), 'Rutgers University': ('Rutgers University',), 'South Dakota School of Mines and Technology': ('South Dakota School of Mines and Tec',), 'Stanford University': ('Stanford University',), 'State University of New York (or SUNY) Albany': ('SUNY Albany', 'University at Albany (SUNY)', 'Albany'), 'State University of New York (or SUNY) Buffalo': ('University at Buffalo', 'State University of New York at Buffalo'), 'Syracuse University': ('Syracuse University',), 'Tennessee Tech University': ('Tennessee Tech University',), 'Texas Tech University': ('Texas Tech University',), 'The George Washington University': ('The George Washington University',), ('The Graduate School and University Center, ' 'The City University of New York'): (('The Graduate School and University Center, ' 'The City University o'),), 'The Rockefeller University': ('The Rockefeller University',), 'The University of Alabama, Tuscaloosa': ('The University of Alabama, Tuscaloosa',), 'The University of Mississippi': ('The University of Mississippi',), 'Triangle Universities Nuclear Laboratory': ('Triangle Universities Nuclear Laboratory',), 'University of Connecticut': ('University of Connecticut',), 'University of Hawaii': ('University of Hawaii',), 'University of Houston': ('University of Houston',), 'University of Puerto Rico': ('University of Puerto Rico',), 'University of South Dakota': ('University of South Dakota',), 'Utah Valley University': ('Utah Valley University',), 'Virginia Military Institute': ('Virginia Military Institute',), 'Wayne State University': ('Wayne State University',), 'Wayne University': ('Wayne State university',), 'Western Michigan University': ('Western Michigan University',), 'Yale University': ('Yale University',)} def _build_query(nation): return '100__w:"{0}" OR 700__w:"{0}"'.format(nation) def index(req): req.content_type = "text/html" req.write(pageheaderonly("Nation numbers", req=req)) req.write("<h1>Nation numbers</h1>") req.flush() req.write("<table>\n") tr = ("<tr>" "<td>{0}</td>" "<td><a href='/search?{1}&sc=1'>{2}</a></td>" "<td><a href='/nations.py/articles?i={3}' " "target='_blank'>Articles</a> " "(<a href='/nations.py/articles?mode=text&amp;i={3}'>text</a>)" "</td><tr>\n") for i, nation in enumerate(_AFFILIATIONS): query = _build_query(nation) results = perform_request_search(p=query, of='intbitset') req.write(tr.format(escape(nation), escape(urlencode([("p", query)]), True), len(results), i)) req.flush() req.write("</table>\n") req.write(pagefooteronly(req=req)) return "" def late(req): req.content_type = "text/html" print >> req, pageheaderonly("Late journals", req=req) th = ("<tr><th>DOI</th><th>Title</th><th>DOI registration</th>" "<th>Arrival in SCOAP3</th></tr>") tr = ("<tr style='background-color: {0};'><td>" "<a href='http://dx.doi.org/{1}' target='_blank'>{2}</td>" "<td>{3}</td><td>{4}</td><td>{5}</td></tr>") sql_bibrec = "SELECT creation_date FROM bibrec WHERE id=%s" sql_doi = "SELECT creation_date FROM doi WHERE doi=%s" for journal in CFG_JOURNALS: print >> req, "<h2>%s</h2>" % escape(get_coll_i18nname(journal)) results = get_collection_reclist(journal) print >> req, "<table>" print >> req, th for recid in results: creation_date = run_sql(sql_bibrec, (recid, ))[0][0] record = get_record(recid) doi = record_get_field_value(record, '024', '7', code='a') title = record_get_field_value(record, '245', code='a') doi_date = run_sql(sql_doi, (doi, )) background = "#eee" if doi_date: doi_date = doi_date[0][0] if (creation_date - doi_date).days < 0: background = "#66FF00" elif (creation_date - doi_date).days < 1: background = "#FF6600" else: background = "#FF0000" else: doi_date = '' print >> req, tr.format(background, escape(doi, True), escape(doi), title, doi_date, creation_date) print >> req, "</table>" def articles(req, i, mode='html'): try: i = int(i) assert 0 <= i < len(_AFFILIATIONS) except: raise SERVER_RETURN(HTTP_BAD_REQUEST) nation = _AFFILIATIONS[i] ret = [] page_title = "SCOAP3 Articles by authors from %s" % nation if mode == 'text': req.content_type = "text/plain; charset=utf8" req.headers_out['content-disposition'] = ('attachment; filename=%s.txt' % nation) else: req.content_type = "text/html" if mode == 'text': print >> req, page_title print >> req, "-" * len(page_title) query = _build_query(nation) for journal in CFG_JOURNALS: results = perform_request_search(p=query, cc=journal, of='intbitset') if not results: continue ret.append("<h2>%s (%s)</h2" % (escape(get_coll_i18nname(journal)), len(results))) ret.append("<p><ul>") if mode == 'text': print >> req, "" print >> req, get_coll_i18nname(journal) for recid in results: record = get_record(recid) title = record_get_field_value(record, '245', code='a') doi = record_get_field_value(record, '024', '7', code='a') if mode == 'text': print >> req, "http://dx.doi.org/%s" % doi li = ("<li><a href='http://dx.doi.org/{0}' " "target='_blank'>{1}</a>: {2}</li>") ret.append(li.format(escape(doi, True), escape(doi), title)) ret.append("</ul></p>") body = '\n'.join(ret) if mode == 'text': return "" return page(req=req, title=page_title, body=body) def csv(req): req.content_type = 'text/csv; charset=utf-8' req.headers_out['content-disposition'] = 'attachment; filename=scoap3.csv' header = (','.join(['Nation'] + [get_coll_i18nname(journal) for journal in CFG_JOURNALS])) print >> req, header for nation in _AFFILIATIONS: query = _build_query(nation) line = (','.join([nation] + [str(len(perform_request_search(p=query, cc=journal, of='intbitset'))) for journal in CFG_JOURNALS])) print >> req, line def create_search_from_affiliation(aff): return '|'.join(t for t in CFG_SELECTED_AFF[aff]) def us_affiliations(req): from invenio.search_engine_utils import get_fieldvalues req.content_type = "text/html" print >> req, pageheaderonly("USA affiliations", req=req) affiliations = [] tmp = [] tmp.extend(get_fieldvalues(perform_request_search(p="*"), '100__u', False)) tmp.extend(get_fieldvalues(perform_request_search(p="*"), '100__v', False)) tmp.extend(get_fieldvalues(perform_request_search(p="*"), '700__u', False)) tmp.extend(get_fieldvalues(perform_request_search(p="*"), '700__v', False)) def _find_usa(x): return ("United States of America" in x or "United States" in x or "USA" in x or "U.S.A" in x) affiliations.extend(filter(_find_usa, tmp)) affiliations = set(affiliations) replaces = [('United States of America', ''), ("United States", ''), ("USA", ''), ("U.S.A", ''), ("University", ''), ("State", ''), ('Department of Physics and Astronomy', ""), ('Department of Physics', ""), ('Department', ''), (",", '')] affs = map(lambda x: multi_replace(x, replaces).strip(), affiliations) affiliations2 = zip(affiliations, affs) for a in sorted(affiliations2, key=lambda aff: aff[1]): req.write(a[0]+'<br />') req.write(pagefooteronly(req=req)) return "" def us_affiliations_csv(req): req.content_type = 'text/csv; charset=utf-8' req.headers_out['content-disposition'] = 'attachment; filename=us_aff.csv' header = (';'.join(['University'] + [get_coll_i18nname(journal) for journal in CFG_JOURNALS] + ['sum'])) print >> req, header for university in sorted(CFG_SELECTED_AFF): line = university count = 0 search = create_search_from_affiliation(university) for collection in CFG_JOURNALS: res = perform_request_search(p='/%s/' % (search,), c=collection) line = line + ";" + str(len(res)) count = count + len(res) print >> req, line+";"+str(count) def usa_papers(req): req.content_type = "text/html" print >> req, pageheaderonly("USA papers for selected affiliations", req=req) li = "<li><a href='https://repo.scoap3.org/record/{0}'>{1}</a></li>" ## print the list of linkt to the articles for university in CFG_SELECTED_AFF: print >> req, "<h2>%s</h2>" % (str(university),) search = create_search_from_affiliation(university) for collection in CFG_JOURNALS: res = perform_request_search(p='/%s/' % (search,), c=collection) if len(res): print >> req, "<h3>%s (%i)</h3>" % (str(collection), len(res)) print >> req, "<ul>" for rec_id in res: rec = get_record(rec_id) line = li.format(str(rec_id), str(rec['245'][0][0][0][1])) print >> req, line print >> req, "</ul>" req.write(pagefooteronly(req=req)) return "" def usa_papers_csv(req): req.content_type = 'text/csv; charset=utf-8' req.headers_out['content-disposition'] = ('attachment; ' 'filename=usa_papers.csv') li = "%s; https://repo.scoap3.org/record/%s" ## print the list of linkt to the articles for university in CFG_SELECTED_AFF: print >> req, university search = create_search_from_affiliation(university) for collection in CFG_JOURNALS: res = perform_request_search(p='(%s)' % (search,), c=collection) if len(res): print >> req, collection for rec_id in res: rec = get_record(rec_id) line = li.format(str(rec['245'][0][0][0][1]), str(rec_id)) print >> req, line print >> req, "" print >> req, "" print >> req, "" def papers_by_country_csv(req, country): req.content_type = 'text/csv; charset=utf-8' req.headers_out['content-disposition'] = ('attachment; ' 'filename=papers_by_country.csv') ## print the list of linkt to the articles count = 1 print >> req, country search = "100__w:'%s' OR 700__w:'%s'" % (country, country) res = perform_request_search(p='%s' % (search,)) print >> req, "#;Title;Author;Journal;DOI;Inspire record" if len(res): for rec_id in res: author_count = 11 rec = get_record(rec_id) title = '' authors = '' journal = '' doi = '' inspire_record = '' if '245' in rec: title = re.sub("<.*?>", "", rec['245'][0][0][0][1]) if '100' in rec: authors = rec['100'][0][0][0][1] if '700' in rec: for auth in rec['700']: if author_count > 1: authors += " / %s" % (auth[0][0][1],) author_count -= 1 elif author_count == 1: authors += " / et al" author_count -= 1 else: break for sub in rec['773'][0][0]: if 'p' in sub[0]: journal = sub[1] doi = get_doi(rec_id) if '035' in rec: for f in rec['035'][0][0]: if 'a' in f: inspire_record = 'http://inspirehep.net/record/%s' % (f[1],) print >> req, "%s;%s;%s;%s;%s;%s" % (count, title, authors, journal, doi, inspire_record) count += 1 def papers_by_country_with_affs_csv(req, country): req.content_type = 'text/csv; charset=utf-8' req.headers_out['content-disposition'] = ('attachment; ' 'filename=papers_by_country.csv') ## print the list of linkt to the articles count = 1 print >> req, country search = "100__w:'%s' OR 700__w:'%s'" % (country, country) res = perform_request_search(p='%s' % (search,)) print >> req, "#;Title;Journal;DOI;Inspire record;Author;Affiliations" if len(res): for rec_id in res: author_count = 11 rec = get_record(rec_id) title = '' authors = '' journal = '' doi = '' inspire_record = '' if '245' in rec: title = re.sub("<.*?>", "", rec['245'][0][0][0][1]) for sub in rec['773'][0][0]: if 'p' in sub[0]: journal = sub[1] doi = get_doi(rec_id) if '035' in rec: for f in rec['035'][0][0]: if 'a' in f: inspire_record = 'http://inspirehep.net/record/%s' % (f[1],) print >> req, "%s;%s;%s;%s;%s;;" % (count, title, journal, doi, inspire_record) if '100' in rec: author = rec['100'][0][0][0][1] affiliations = record_get_field_values(rec, tag='100', code='v') print >> req, ";;;;;%s;%s" % (author, " | ".join(affiliations)) if '700' in rec: for auth in rec['700']: author = auth[0][0][1] affiliations = field_get_subfield_values(auth, code='v') print >> req, ";;;;;%s;%s" % (author, " | ".join(affiliations)) count += 1 def countries_by_publishers(req): req.content_type = "text/html" print >> req, pageheaderonly("Countries/publishers", req=req) ############ ## PART 1 ## # journals = [] # for pub in CFG_JOURNALS: # ids = perform_request_search(cc=pub) # journals.append((pub, ids)) # journals.append(("older_than_2014", perform_request_search(cc='older_than_2014'))) # countries = [] # for country in sorted(set(NATIONS_DEFAULT_MAP.itervalues())): # ids = perform_request_search(p="country:%s" % (country,)) + perform_request_search(cc='older_than_2014', p="country:%s" % (country,)) # countries.append((country, ids)) req.write("<h1>Number of articles per country per journal</h1>") req.write("<h2>Minimum one author from the country</h2>") req.flush() req.write("<table>\n") req.write("<tr><th rowspan=2>Country</th><th colspan=10>Journals</th><th>Other</th></tr>") req.write("""<tr> <td>Acta</td> <td>Advances in High Energy Physics</td> <td>Chinese Physics C</td> <td>European Physical Journal C</td> <td>Journal of Cosmology and Astroparticle Physics</td> <td>Journal of High Energy Physics</td> <td>New Journal of Physics</td> <td>Nuclear Physics B</td> <td>Physics Letters B</td> <td>Progress of Theoretical and Experimental Physics</td> <td>older_than_2014</td></tr>""") for country in sorted(set(NATIONS_DEFAULT_MAP.itervalues())): req.write("<tr><td>%s</td>" % (country,)) for pub in CFG_JOURNALS + ["older_than_2014"]: req.write("<td>%s</td>" % perform_request_search(p="country:%s" % (country,), cc=pub)) req.write("</tr>") req.write('</table>') ############ ## PART 2 ## # journals = [] hitcount = {} for pub in CFG_JOURNALS + ["older_than_2014"]: ids = perform_request_search(cc=pub) hitcount[pub] = {} for country in sorted(set(NATIONS_DEFAULT_MAP.itervalues())): hitcount[pub][country] = 0 for id in ids: record = get_record(id) countries = set(record_get_field_values(record, '700', '%', '%', 'w') + record_get_field_values(record, '100', '%', '%', 'w')) if len(countries) == 1: c = countries.pop() if c in set(NATIONS_DEFAULT_MAP.itervalues()): hitcount[pub][countries[0]] += 1 req.write("<h1>Number of articles per country per journal</h1>") req.write("<h2>All author from the country</h2>") req.flush() req.write("<table>\n") req.write("<tr><th rowspan=2>Country</th><th colspan=10>Journals</th><th>Other</th></tr>") req.write("""<tr> <td>Acta</td> <td>Advances in High Energy Physics</td> <td>Chinese Physics C</td> <td>European Physical Journal C</td> <td>Journal of Cosmology and Astroparticle Physics</td> <td>Journal of High Energy Physics</td> <td>New Journal of Physics</td> <td>Nuclear Physics B</td> <td>Physics Letters B</td> <td>Progress of Theoretical and Experimental Physics</td> <td>older_than_2014</td></tr>""") for country in sorted(set(NATIONS_DEFAULT_MAP.itervalues())): req.write("<tr><td>%s</td>" % (country,)) for pub in CFG_JOURNALS + ["older_than_2014"]: req.write("<td>%s</td>" % hitcount[pub][country]) req.write("</tr>") req.write('</table>') req.write(pagefooteronly(req=req)) return "" def impact_articles(req, year): try: year = int(year) assert 2014 <= year except: raise SERVER_RETURN(HTTP_BAD_REQUEST) req.content_type = 'text/csv; charset=utf-8' req.headers_out['content-disposition'] = ('attachment; ' 'filename=impact_articles.csv') ids = perform_request_search(p="datecreated:{year}-01-01->{year}-12-31".format(year=year)) counter = 0 print >> req, "#;recid;journal;author;orcid;affiliation;countries" for i in ids: counter += 1 try: rec = get_record(i) except: print >> req, "{c},{recid},Can't load metadata".format(c=counter, recid=i) continue journal = record_get_field_value(rec, tag='773', code='p') for field in ['100', '700']: if field in rec: for author in rec[field]: name = "" orcid = "" aff = "" country = "" for key, val in author[0]: if key is 'a': name = unicode(val, 'UTF-8').replace('\n', ' ').strip() if key is 'j': orcid = unicode(val, 'UTF-8').replace('\n', ' ').strip() if key in ['v', 'u']: aff += unicode(val, 'UTF-8').replace('\n', ' ').strip() + " | " if key is 'w': country += unicode(val, 'UTF-8').replace('\n', ' ').strip() + ";" print >> req, "{c};{recid};{journal};{name};{orcid};{aff};{country}".format(c=counter, recid=i, journal=journal, name=name, orcid=orcid, aff=aff, country=country) def national_authors_list(req, search_country): req.content_type = 'text/csv; charset=utf-8' req.headers_out['content-disposition'] = ('attachment; ' 'filename=national_authors_list.csv') ids = perform_request_search(p="country:'%s'" % (search_country,)) req.write("#;RECID;Title;Creation date;Publisher;Total # of authors;Authors name(given country only);Authors country;Authors affiliations\n") for number, recid in enumerate(ids): doi = record_get_field_value(get_record(recid), '024', ind1="7", code="a") journal = record_get_field_value(get_record(recid), '773', code="p") title = record_get_field_value(get_record(recid), '245', code="a") del_date = get_creation_date(recid) publisher = record_get_field_value(get_record(recid), '980', code="b") if not publisher: publisher = record_get_field_value(get_record(recid), '541', code="a") rec = get_record(recid) authors = [] author_count = 0 for f in ['100', '700']: if f in rec: for auth in rec[f]: author_count += 1 aff = '' name = '' country = '' hit = 0 for subfield, value in auth[0]: if subfield == 'a': name = value if subfield in ['v', 'u']: if aff: aff += ', ' + value else: aff = value if subfield == 'w': if country: country += ', ' + value else: country = value if search_country in value: hit = 1 if hit: authors.append({'name': name, 'affiliation': aff.replace('\n',''), 'country': country}) for i, author in enumerate(authors): if i == 0: req.write("%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s\n" % (number+1, recid, title.replace('\n',''), del_date, publisher, author_count, author['name'], author['country'], author['affiliation'])) else: req.write("||||||||%s|%s|%s\n" % (author['name'], author['country'], author['affiliation'])) def institutions_list(req, country, year=None): from copy import deepcopy def find_nations(affiliation): NATIONS_DEFAULT_MAP['European Organization for Nuclear Research'] = 'CERN' NATIONS_DEFAULT_MAP['Centre Europeen de Recherches Nucleaires'] = 'CERN' NATIONS_DEFAULT_MAP['High Energy Accelerator Research Organization'] = 'KEK' NATIONS_DEFAULT_MAP['KEK'] = 'KEK' NATIONS_DEFAULT_MAP['FNAL'] = 'FNAL' NATIONS_DEFAULT_MAP['Fermilab'] = 'FNAL' NATIONS_DEFAULT_MAP['Fermi National'] = 'FNAL' NATIONS_DEFAULT_MAP['SLAC'] = 'SLAC' NATIONS_DEFAULT_MAP['DESY'] = 'DESY' NATIONS_DEFAULT_MAP['Deutsches Elektronen-Synchrotron'] = 'DESY' NATIONS_DEFAULT_MAP['JINR'] = 'JINR' NATIONS_DEFAULT_MAP['JOINT INSTITUTE FOR NUCLEAR RESEARCH'] = 'JINR' possible_affs = [] def _sublistExists(list1, list2): return ''.join(map(str, list2)) in ''.join(map(str, list1)) values = set([y.lower().strip() for y in re.findall(ur"[\w']+", affiliation.replace('.','').decode("UTF-8"), re.UNICODE)]) for key, val in NATIONS_DEFAULT_MAP.iteritems(): key = unicode(key) key_parts = set(key.lower().decode('utf-8').split()) if key_parts.issubset(values): possible_affs.append(val) values = values.difference(key_parts) if not possible_affs: possible_affs = ['HUMAN CHECK'] if 'CERN' in possible_affs and 'Switzerland' in possible_affs: # Don't use remove in case of multiple Switzerlands possible_affs = [x for x in possible_affs if x != 'Switzerland'] if 'KEK' in possible_affs and 'Japan' in possible_affs: possible_affs = [x for x in possible_affs if x != 'Japan'] if 'FNAL' in possible_affs and 'USA' in possible_affs: possible_affs = [x for x in possible_affs if x != 'USA'] if 'SLAC' in possible_affs and 'USA' in possible_affs: possible_affs = [x for x in possible_affs if x != 'USA'] if 'DESY' in possible_affs and 'Germany' in possible_affs: possible_affs = [x for x in possible_affs if x != 'Germany'] if 'JINR' in possible_affs and 'Russia' in possible_affs: possible_affs = [x for x in possible_affs if x != 'Russia'] return sorted(list(set(possible_affs)))[0] publisher_dict = {'New J. Phys.':0, 'Acta Physica Polonica B':0, 'Advances in High Energy Physics':0, 'Chinese Phys. C':0, 'EPJC':0, 'JCAP':0, 'JHEP':0, 'Nuclear Physics B':0, 'Physics letters B':0, 'PTEP':0} if(year): recids = perform_request_search(p='country:"%s" year:%s' % (country,year)) else: recids = perform_request_search(p='country:"%s"' % (country,)) req.content_type = 'text/csv; charset=utf-8' req.headers_out['content-disposition'] = ('attachment; ' 'filename=%s_institutions_list.csv' % (country,)) req.write("recid|authors #|title|country|New J. Phys.|Acta Physica Polonica B|Advances in High Energy Physics|Chinese Phys. C|EPJC|JCAP|JHEP|Nuclear Physics B|Physics letters B|PTEP\n") for recid in recids: rec = get_record(recid) global_affs = {} author_count = 0 if '100' in rec: author_count += len(rec['100']) if '700' in rec: author_count += len(rec['700']) journal = record_get_field_value(rec, '773', ind1="%", ind2="%", code='p') affs = [] affs.extend(record_get_field_values(rec, '100', ind1="%", ind2="%", code='v')) affs.extend(record_get_field_values(rec, '700', ind1="%", ind2="%", code='v')) for aff in affs: if aff not in global_affs: global_affs[aff] = deepcopy(publisher_dict) global_affs[aff][journal] += 1 for aff, j in global_affs.iteritems(): req.write("%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s\n" % (recid, author_count, aff.replace('\n', ' ').replace('\r', ''), find_nations(aff), j['New J. Phys.'],j['Acta Physica Polonica B'],j['Advances in High Energy Physics'],j['Chinese Phys. C'],j['EPJC'],j['JCAP'],j['JHEP'],j['Nuclear Physics B'],j['Physics letters B'],j['PTEP']))
Because you compared SiteKreator and LightCMS, we think this app could fit your needs. Save time and effort comparing leading CMS Software tools for small businesses. The table below compares LightCMS and SiteKreator. Check out how both product compares looking at product details such as features, pricing, target market and supported languages. Quickly access the latest reviews to compare actual user opinions and ratings. Do not hesitate to click on Visit Website to access a Free Demo or Trial immediately.
# -*- coding: utf-8 -*- import re import pytest from cfme.containers.provider import ContainersProvider from cfme.intelligence.reports.reports import CannedSavedReport from utils import testgen from utils.blockers import BZ pytestmark = [ pytest.mark.usefixtures('setup_provider'), pytest.mark.meta( server_roles='+ems_metrics_coordinator +ems_metrics_collector +ems_metrics_processor'), pytest.mark.tier(1)] pytest_generate_tests = testgen.generate([ContainersProvider], scope='function') @pytest.fixture(scope='module') def node_hardwares_db_data(appliance): """Grabbing hardwares table data for nodes""" db = appliance.db hardwares_table = db['hardwares'] container_nodes = db['container_nodes'] out = {} for node in db.session.query(container_nodes).all(): out[node.name] = hardwares_table.__table__.select().where( hardwares_table.id == node.id ).execute().fetchone() return out @pytest.fixture(scope='function') def pods_per_ready_status(provider): """Grabing the pods and their ready status from API""" # TODO: Add later this logic to mgmtsystem entities_j = provider.mgmt.api.get('pod')[1]['items'] out = {} for entity_j in entities_j: out[entity_j['metadata']['name']] = next( (True if condition['status'].lower() == 'true' else False) for condition in entity_j['status']['conditions'] if condition['type'].lower() == 'ready' ) return out def get_vpor_data_by_name(vporizer_, name): return [vals for vals in vporizer_ if vals.resource_name == name] def get_report(menu_name): """Queue a report by menu name , wait for finish and return it""" path_to_report = ['Configuration Management', 'Containers', menu_name] run_at = CannedSavedReport.queue_canned_report(path_to_report) return CannedSavedReport(path_to_report, run_at) @pytest.mark.meta(blockers=[BZ(1435958, forced_streams=["5.8"])]) @pytest.mark.polarion('CMP-9533') def test_pods_per_ready_status(soft_assert, pods_per_ready_status): report = get_report('Pods per Ready Status') for row in report.data.rows: name = row['# Pods per Ready Status'] readiness_ui = (True if row['Ready Condition Status'].lower() == 'true' else False) if soft_assert(name in pods_per_ready_status, # this check based on BZ#1435958 'Could not find pod "{}" in openshift.' .format(name)): soft_assert(pods_per_ready_status[name] == readiness_ui, 'For pod "{}" expected readiness is "{}" got "{}"' .format(name, pods_per_ready_status[name], readiness_ui)) @pytest.mark.polarion('CMP-9536') def test_report_nodes_by_capacity(appliance, soft_assert, node_hardwares_db_data): report = get_report('Nodes By Capacity') for row in report.data.rows: hw = node_hardwares_db_data[row['Name']] soft_assert(hw.cpu_total_cores == int(row['CPU Cores']), 'Number of CPU cores is wrong: expected {}' ' got {}'.format(hw.cpu_total_cores, row['CPU Cores'])) # The following block is to convert whatever we have to MB memory_ui = float(re.sub(r'[a-zA-Z,]', '', row['Memory'])) if 'gb' in row['Memory'].lower(): memory_mb_ui = memory_ui * 1024 # Shift hw.memory_mb to GB, round to the number of decimals of memory_mb_db # and shift back to MB: memory_mb_db = round(hw.memory_mb / 1024.0, len(str(memory_mb_ui).split('.')[1])) * 1024 else: # Assume it's MB memory_mb_ui = memory_ui memory_mb_db = hw.memory_mb soft_assert(memory_mb_ui == memory_mb_db, 'Memory (MB) is wrong for node "{}": expected {} got {}' .format(row['Name'], memory_mb_ui, memory_mb_db)) @pytest.mark.polarion('CMP-10034') def test_report_nodes_by_cpu_usage(appliance, soft_assert, vporizer): report = get_report('Nodes By CPU Usage') for row in report.data.rows: vpor_values = get_vpor_data_by_name(vporizer, row["Name"])[0] usage_db = round(vpor_values.max_cpu_usage_rate_average, 2) usage_report = round(float(row['CPU Usage (%)']), 2) soft_assert(usage_db == usage_report, 'CPU usage is wrong for node "{}": expected {} got {}' .format(row['Name'], usage_db, usage_report)) @pytest.mark.polarion('CMP-10033') def test_report_nodes_by_memory_usage(appliance, soft_assert, vporizer): report = get_report('Nodes By Memory Usage') for row in report.data.rows: vpor_values = get_vpor_data_by_name(vporizer, row["Name"])[0] usage_db = round(vpor_values.max_mem_usage_absolute_average, 2) usage_report = round(float(row['Memory Usage (%)']), 2) soft_assert(usage_db == usage_report, 'CPU usage is wrong for node "{}": expected {} got {}.' .format(row['Name'], usage_db, usage_report)) @pytest.mark.meta(blockers=[BZ(1436698, forced_streams=["5.6", "5.7"])]) @pytest.mark.polarion('CMP-10033') def test_report_nodes_by_number_of_cpu_cores(soft_assert, node_hardwares_db_data): report = get_report('Number of Nodes per CPU Cores') for row in report.data.rows: hw = node_hardwares_db_data[row['Name']] soft_assert(hw.cpu_total_cores == int(row['Hardware Number of CPU Cores']), 'Hardware Number of CPU Cores is wrong for node "{}": expected {} got {}.' .format(row['Name'], hw.cpu_total_cores, row['Hardware Number of CPU Cores'])) @pytest.mark.polarion('CMP-10008') def test_report_projects_by_number_of_pods(appliance, soft_assert): container_projects = appliance.db['container_projects'] container_pods = appliance.db['container_groups'] report = get_report('Projects by Number of Pods') for row in report.data.rows: pods_count = len(container_pods.__table__.select().where( container_pods.container_project_id == container_projects.__table__.select().where( container_projects.name == row['Project Name']).execute().fetchone().id ).execute().fetchall()) soft_assert(pods_count == int(row['Number of Pods']), 'Number of pods is wrong for project "{}". expected {} got {}.' .format(row['Project Name'], pods_count, row['Number of Pods'])) @pytest.mark.polarion('CMP-10009') def test_report_projects_by_cpu_usage(soft_assert, vporizer): report = get_report('Projects By CPU Usage') for row in report.data.rows: vpor_values = get_vpor_data_by_name(vporizer, row["Name"])[0] usage_db = round(vpor_values.max_cpu_usage_rate_average, 2) usage_report = round(float(row['CPU Usage (%)']), 2) soft_assert(usage_db == usage_report, 'CPU usage is wrong for project "{}": expected {} got {}' .format(row['Name'], usage_db, usage_report)) @pytest.mark.polarion('CMP-10010') def test_report_projects_by_memory_usage(soft_assert, vporizer): report = get_report('Projects By Memory Usage') for row in report.data.rows: vpor_values = get_vpor_data_by_name(vporizer, row["Name"])[0] usage_db = round(vpor_values.max_mem_usage_absolute_average, 2) usage_report = round(float(row['Memory Usage (%)']), 2) soft_assert(usage_db == usage_report, 'CPU usage is wrong for project "{}": expected {} got {}.' .format(row['Name'], usage_db, usage_report))
Show your students that anyone can be a math person. The sum of one and two will always be three and a parabola will always be u-shaped, but all great math teachers know that their field is in a constant state of change as its use expands. Between evolving state standards, cool new digital math games, and tablets so easy to use that PreK kids know to swipe right, there’s more value than ever in earning a master’s degree that brings relevance to your math instruction and gives you strategies to reach all kinds of learners — including those who have yet to see that they can “get” math. With an emphasis on National Council of Teachers of Mathematics (NCTM) standards, Concordia’s online MEd in Curriculum & Instruction: Mathematics program addresses developmental learning, acquiring proficiency, problem-solving, and demonstrating understanding in mathematics for all children. You’ll also learn to utilize technology to improve student outcomes in the PreK-12 core curriculum. This course presents an overview of the principles set forth by the National Council for Teachers of Mathematics and is designed for teaching all learners: Equity, Curriculum, Teaching, Learning and Assessment. In the study of the historical perspective and the theoretical foundations of mathematical teaching, close attention is given to the connection between theorists and practical application in the classroom. Emphasis is placed on transformational learning based in a developmental, constructivist approach. This course will guide teachers in the development of a foundation for the teaching and learning of measurement and geometry through transformational learning based on a developmental, constructivist approach. Numbers and operations are woven into the study of measurement and geometry in a meaningful, integrated manner. Emphasis is placed on student and teacher thinking, lesson planning, transformational teaching methods, the use of technology as an integral part of teaching and learning math, and assessment. This course will foster a deeper understanding of PreK-12 national standards, as well as the three NCTM curriculum focal points for each grade level (PreK-12), in order to strengthen a teacher's thinking and perceptions of geometry as well as instructional practices which effectively increase student understanding of measurement and geometry. This course will guide teachers in the development of a foundation for the learning and teaching of algebraic concepts, data analysis, and probability through transformational learning based on a developmental, constructivist approach. Numbers and operations are woven into the study of algebra, data analysis, and probability in a meaningful integrated manner. Emphasis is placed on student and teacher processing of algebra, on lesson planning, transformational teaching methods, the use of technology as an integral part of learning and teaching math, and assessment. The focus of this course is transformational learning and teaching that meets the needs of all learners through a developmental, constructivist approach. There will be an emphasis on setting up the classroom, getting to know the learners, assessing learning styles and needs, differentiated teaching strategies including interactive age-appropriate games and manipulatives, and providing anchor activities to solidify learning. Technology is considered an integral part of learning and teaching math in the differentiated classroom and will include the strategic use of technology resources such as SmartBoard technology, digital tools, computers, calculators, online digital games, and podcasts along with Internet-based resources. The course will provide a plethora of practical ideas for making math a positive and transformational experience for teachers and learners alike. Is the Master of Education in Mathematics concentration right for me? The Curriculum & Instruction: Mathematics program helped me to hone my skills of effectively differentiating lessons and understanding the differences in students. It is easy to teach an individual a lesson. It takes understanding and planning to meet the needs of all the students in a classroom. Why is math education critical in today’s world? Math teaches logical and critical thinking: Math requires discipline and critical thinking skills to find solutions for complex problems. When students master these thinking processes, that proficiency carries over into other areas of their lives and turns them into adept problem-solvers and critical thinkers. Math is a necessary component of everyday life: Balancing a budget, paying our taxes, measuring a room, leaving a tip — math is a part of our daily lives. Fluency in math isn’t reserved for genius mathematicians. Everyone needs math! Universal language: Mathematics is truly a universal language shared by all humans, transcending differences across cultures and religions. The concept of numeracy — or math literacy — connects individuals across continents and through time, and links ideas conceived from one profession to another. Math encourages precision and persistence: Precision and persistence in math helps student to logically approach a problem and persevere until a solution is reached. 21st century jobs: Preparing our youth for competitive 21st century jobs will sustain our country’s economic growth. Graduates with strong math skills are in high demand for high-paying jobs in STEM (and STEAM) fields. But even non-STEM jobs require math. A filmmaker needs to create a budget for their project. Cashiers need the ability to calculate change. Business owners must balance their books and run payroll. Why are math teachers more important than ever? The U.S. needs qualified and passionate math teachers to grow our nation’s math literacy. As the needs of the global workforce evolve, mathematical understanding is more imperative than ever. In 1960, 1.1 million Americans worked in science and engineering fields, while today it’s approaching 6 million. Careers in banking and finance, information technology, healthcare, and construction are among STEM jobs that are projected to grow to more than 9 million from 2012 to 2022. Not to mention, the most recent United States PISA rankings from 2015 placed the U.S. 38th out of 71 countries in math and 24th in science. Sources: National Science Foundation, U.S. Bureau of Labor Statistics, National Science Foundation, STEM 101: Intro to tomorrow’s jobs. Good math teachers help young learners discover the beauty and real-world relevance of math. Students are often resistant to math. Math instructors who can eliminate barriers to learning are in the highest demand. The Atlantic calls the I’m bad at math refrain America’s greatest “fallacy of inborn ability.” Professor of Psychology Carol Dweck, author of the now-famous Mindset: The New Psychology of Success, argues that intelligence is not fixed, but malleable. Her research indicates that when students believe they can learn and grow their intelligence, they are more likely to do so—a phenomenon she calls the “Growth Mindset.” Conversely, when students believe they can’t do something, they are more likely to be unsuccessful. Sian Beilock, a professor of psychology at the University of Chicago, notes that parents who are anxious about math can pass that anxiety along to their children, creating a new generation of math-phobic learners. Sources: Implicit theories of intelligence predict achievement across an adolescent transition: a longitudinal study and an intervention Blackwell LS1, Trzesniewski KH, Dweck CS, Child Dev. 2007 Jan-Feb;78(1):246-63; The Myth of ‘I’m Bad at Math’ Miles Kimball and Noah Smith, The Atlantic, October 2013; Fending Off Math Anxiety By Perri Klass, M.D., The New York Times, April, 2017.
import theano import theano.tensor as T import passage.inits as inits from theano.tensor.extra_ops import repeat from passage.theano_utils import shared0s, floatX ### Directly compositional models ### Associative operators ### class Add(object): '''Elementwise addition.''' def __init__(self, size): self.size = size self.id = T.alloc(0.0, 1, self.size) def step(self, x_t, h_tm1): return h_tm1 + x_t class Mult(object): '''Elementwise multiplication.''' def __init__(self, size): self.size = size self.id = T.alloc(1.0, 1, self.size) def step(self, x_t, h_tm1): return h_tm1 * x_t class MatrixMult(object): '''Matrix multiplication.''' def __init__(self, size): self.size = size self.sqrt_size = int(self.size**0.5) self.id = T.eye(self.sqrt_size, self.sqrt_size).reshape((1,self.size)) def step(self, x_t, h_tm1): h_t,_ = theano.scan(lambda x, z: T.dot(x, z), sequences=[ x_t.reshape((x_t.shape[0], self.sqrt_size, self.sqrt_size)), h_tm1.reshape((h_tm1.shape[0], self.sqrt_size, self.sqrt_size))]) return h_t.reshape((h_t.shape[0], self.size)) class Direct(object): def __init__(self, n_features=256, size=256, init=inits.uniform, op=MatrixMult): self.n_features = n_features self.size = size self.sqrt_size = int(self.size ** 0.5) self.init = init self.op = op(self.size) self.input = T.imatrix() self.embeddings = self.init((self.n_features, self.size)) self.params = [self.embeddings] def embedded(self): return self.embeddings[self.input] def output(self, dropout_active=False): X = self.embedded() out, _ = theano.scan(self.op.step, sequences=[X], outputs_info=[repeat(self.op.id, X.shape[1], axis=0)] ) return out[-1]
Last Update: April 23, 2019, 11:57 a.m. Heads up! Found a bug? Have a question about Alfa Romeo Spider 1995 wheel specs? Share your knowledge!
# -*- coding: utf-8 -*- from setuptools import find_packages from setuptools import setup version = '0.2.0' description = 'Bird Feeder publishes Thredds metadata catalogs to a Solr index service with birdhouse schema.' long_description = ( open('README.rst').read() + '\n' + open('AUTHORS.rst').read() + '\n' + open('CHANGES.rst').read() ) requires = [ 'argcomplete', 'pysolr', 'threddsclient', #'dateutil', 'nose', ] classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Science/Research', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Programming Language :: Python', 'Topic :: Scientific/Engineering :: Atmospheric Science', ] setup(name='bird-feeder', version=version, description=description, long_description=long_description, classifiers=classifiers, keywords='thredds solr python netcdf birdhouse anaconda', author='Birdhouse Developers', author_email='', url='https://github.com/bird-house/bird-feeder', license = "Apache License v2.0", packages=find_packages(), include_package_data=True, zip_safe=False, test_suite='nose.collector', install_requires=requires, entry_points = { 'console_scripts': [ 'birdfeeder=birdfeeder:main', ]} , )
Start in Sydney and then venture into greater New South Wales for a true Australian experience. Sydney is not a city that presents one face to visitors and another to locals. Off-duty, Sydneysiders are just as likely to take a Manly ferry from Circular Quay or treat themselves to BridgeClimb as they are to spread out a towel on one of the city’s 100 golden beaches. They might even take a selfie with two of the city’s oft-photographed landmarks – the Sydney Harbour Bridge and the Sydney Opera House – or chase down local prawns and rich, sweet Sydney rock oysters. The Harbour City provides a fascinatingly authentic launching pad for a quintessential Australian experience in the state of New South Wales, home to remote islands, intriguing mountains, ancient landscapes, plush eco-retreats, and legendary wineries. Let’s start where the buzz is, since that’s how locals do it. Festivals of light, culture, arts, and food bring Sydneysiders out to play in force. Savvy visitors time their travel to coincide with big events, like the Sydney Festival (January 9 through 27, 2019), which every summer uncorks the bottle of creative energy that is trailblazing theater, cabaret, art, and music. Also on balmy summer nights (January 8 through February 16, 2019), St. George OpenAir Cinema shows movies on a giant screen that rises from the harbor as the sun sinks behind the Sydney Opera House. Which way to look!? For three weeks during the Southern Hemisphere’s late fall and early winter, the city becomes the world’s most colorful outdoor gallery. Vivid Sydney (May 24 through June 15, 2019) celebrates the technology of light with dazzling illuminations and spectacular projections onto the sails of the Sydney Opera House, the exterior of the Museum of Contemporary Art, the arch of the Sydney Harbour Bridge, and many other locations. Sydneysiders and visitors rub shoulders at the Opera Bar during Vivid Sydney 2018. Enter a state of grace. Several Virtuoso hotels in Sydney offer a well-appointed base for exploring the city, but don’t get too comfortable; New South Wales is calling. With nearly 900 national parks, 1,000 miles of coastline, and 800 beaches, this southeastern state packs a natural punch. Two and a half hours north of Sydney by car, nature comes in the varietal variety. The Hunter Valley ranks as one of Australia’s oldest wine regions. They do things bigger and better here, whether it’s beefy shiraz or signature sémillon, luxurious day spas or star-studded concerts held on estate lawns. Brokenwood, Tulloch, and Tyrrell’s are among the big-name wineries to visit over a long weekend. Leave room for cheese, craft beer, a sweep over the vines in a hot-air balloon, and plenty of gourmet food. One of the best views of the Hunter Valley region is from a hot air balloon. New South Wales also boasts six UNESCO World Heritage-listed sites, including the Sydney Opera House, the Lord Howe Island Group, the Blue Mountains area, and the Willandra Lakes Region. Worthy of their nod from the United Nations Educational, Scientific, and Cultural Organization, they all more than merit a visit. Lord Howe Island is just a two-hour flight from Sydney, and because the number of visitors is capped at 400, it always feels like your own private idyll. With its abundant birdlife, luxuriant coral, and fish and turtles that swim at your feet, the area boasts an environmental mind-set. Snorkel, scuba dive, kayak, or hike. The trek to the top of the 2,800-foot Mount Gower is one of Australia’s best day walks. The Willandra Lakes Region in the Far West of New South Wales is a destination for archaeology buffs who want to channel their inner Louis Leakey. The traditional meeting place of several indigenous tribes, Lake Mungo (its star attraction) made headlines in the late 1960s and early 1970s, when 45,000-year-old remains of Aboriginal people were discovered. Today it’s one of Australia’s most significant archaeological regions. Expect emus skittering over sand dunes, lots of kangaroos, panoramic desert views, and star-packed skies. Kayaks ply clear waters along Lagoon Beach on Lord Howe Island. Named for an optical illusion, the World Heritage-listed Blue Mountains (about a two-hour drive west of Sydney) are the vast and majestic home of strange rock formations such as the Three Sisters. Gain a sense of scale from a Scenic World railway carriage that descends into a deep valley or from a cable car high in the canopy. Factor in extra time to visit the mountain villages of Leura and Katoomba, with shops, art galleries, and spectacular open gardens in spring. The Norman Lindsay Gallery at Faulconbridge celebrates the work of one of Australia’s most famous artists and authors. Its attached café offers a bush breakfast (kangaroo sausages with that?) and lattes served in porcelain-fine, ruby-colored mugs. The Three Sisters command the view from Echo Point Lookout in the Blue Mountains. Farther west, the 40-room Emirates One&Only Wolgan Valley sits proudly on a 7,000-acre nature reserve. Built around an 1832 homestead, this remote resort is a multiple award-winner with conservation credentials. Wallabies, kangaroos, and wombats graze at dawn and dusk against a picture-ready backdrop of eucalyptus woodlands and sandstone escarpments. Nature is a balm at Emirates One&Only Wolgan Valley spa. This article is sponsored by Destination New South Wales.
from gui import guiButton import time, sys, os from constants import * from random import randrange import pygame from pygame.locals import QUIT from threading import Thread X = 550 POPUP_TOP=100 POPUP_LEFT=200 POPUP_SIZE=(700,500) class PopupWindow(): def __init__(self,massage,buttons,image=None,texts=None,measures=((POPUP_TOP,POPUP_LEFT),POPUP_SIZE)): #self.gameWindow=gameWindow self.top=measures[0][0] self.left=measures[0][1] self.width=measures[1][0] self.height=measures[1][1] self.image=image self.texts=texts self.massage=massage self.buttons=buttons for button in self.buttons: button.position = (self.left+button.position[0],self.top+button.position[1]) self.background=pygame.Surface((self.width,self.height)) pygame.font.init() self.fnt = pygame.font.Font("fonts//Kabel.ttf", 20) def draw(self,surf): self.background.fill((25,25,25)) frame = pygame.Surface((self.width-10,self.height-10)) frame.fill((220,220,220)) self.background.blit(frame, (5,5)) if self.texts!=None: for text in self.texts: t=self.fnt.render(text[0],True,BLACK) self.background.blit(t, text[1]) surf.blit(self.background,(self.left,self.top)) m=self.fnt.render(self.massage,True,BLACK) if self.image!=None: surf.blit(self.image,(330,140)) surf.blit(m,(self.left+30,self.top+15)) for button in self.buttons: if button._enable: surf.blit(button,button.position) def handle_event(self,event): for button in self.buttons: button.handle_event(event) def close(self): del[self] class TradeWindow(PopupWindow): def __init__(self,buttons,trader,players): self.buttons=buttons self.trader=trader self.players=players self.money_interval=[50,50] self.money1=0 self.money2=0 margin=5 curr_x=20 curr_y=50 block_size=(50,75) headers=[(players[0].name+ ' assets',(800//4-1,20)),(players[1].name+ ' assets',(3*800//4,20)), ('money :$'+str(players[0].money),(800//4-170,20)),('money :$'+str(players[1].money),(-170+3*800//4,20)), ('Trade assets',(POPUP_SIZE[0]//4-100,260)),('Trade assets',(3*POPUP_SIZE[0]//4-100,260))] for asset in players[0].assets_list(): self.buttons.append(guiButton('',(curr_x,curr_y),action=self.add1_asset,parameter=asset,image=get_asset_image(asset),name='add'+asset.name,sizing=0.5,y_sizing=0.5)) self.buttons.append(guiButton('',(curr_x,curr_y+260),action=self.rem1_asset,parameter=asset,image=get_asset_image(asset),name='rem'+asset.name,enabled=False,sizing=0.5,y_sizing=0.5)) if curr_x+block_size[0]<POPUP_SIZE[0]//2-margin: curr_x=curr_x+block_size[0] else: curr_x=50 curr_y+=block_size[1] curr_x=20 curr_y=50 for asset in players[1].assets_list(): self.buttons.append(guiButton('',(POPUP_SIZE[0]//2+curr_x,curr_y),action=self.add2_asset,parameter=asset,image=get_asset_image(asset),name='add'+asset.name,sizing=0.5,y_sizing=0.5)) self.buttons.append(guiButton('',(POPUP_SIZE[0]//2+curr_x,curr_y+260),action=self.rem2_asset,parameter=asset,image=get_asset_image(asset),name='rem'+asset.name,enabled=False,sizing=0.5,y_sizing=0.5)) if curr_x+block_size[0]<POPUP_SIZE[0]//2-margin: curr_x=curr_x+block_size[0] else: curr_x=100 curr_y+=block_size[1] self.buttons.append(guiButton('+',(50,500),action=self.add1_money)) self.buttons.append(guiButton('-',(120,500),action=self.rem1_money)) self.buttons.append(guiButton('+',(450,500),action=self.add2_money)) self.buttons.append(guiButton('-',(520,500),action=self.rem2_money)) PopupWindow.__init__(self,'',buttons,texts=headers,measures=((100,50),(800,600))) self.top_dx=[40,40] self.top_dy=[100,100] self.down_dx=[40,40] self.down_dy=[400,400] def add1_money(self): new_v=self.trader.player1_money+self.money_interval[0] if new_v<self.players[0].money: self.trader.set_money1(new_v) self.money1=new_v def add2_money(self): new_v=self.trader.player2_money+self.money_interval[1] if new_v<self.players[1].money: self.trader.set_money2(new_v) self.money2=new_v def rem1_money(self): new_v=self.trader.player1_money-self.money_interval[0] if new_v>=0: self.trader.set_money1(new_v) self.money1=new_v def rem2_money(self): new_v=self.trader.player2_money-self.money_interval[1] if new_v>=0: self.trader.set_money2(new_v) self.money2=new_v def enable_asset(self,name,enabled): for button in self.buttons: if button.name==name: button.set_enabled(enabled) def add1_asset(self,asset): self.trader.add_asset_1(asset) self.enable_asset('add'+asset.name,False) self.enable_asset('rem'+asset.name,True) def rem1_asset(self,asset): self.trader.remove_asset_1(asset) self.enable_asset('add'+asset.name,True) self.enable_asset('rem'+asset.name,False) def add2_asset(self,asset): self.trader.add_asset_2(asset) self.enable_asset('add'+asset.name,False) self.enable_asset('rem'+asset.name,True) def rem2_asset(self,asset): self.trader.remove_asset_2(asset) self.enable_asset('add'+asset.name,True) self.enable_asset('rem'+asset.name,False) def draw(self,surf): PopupWindow.draw(self,surf) t=self.fnt.render('Trade money $'+str(self.money2),True,BLACK) surf.blit(t, (self.left-50+3*700//4,self.top+550)) t=self.fnt.render('Trade money $'+str(self.money1),True,BLACK) surf.blit(t, (self.left+700//4-100,self.top+550)) def update(self): pass class StatusWindow(): players = [] def __init__(self): pass def start(self, players): self.players = players # setting fonts pygame.font.init() self.fnt_name = pygame.font.Font("fonts//Kabel.ttf", 28) self.fnt_money = pygame.font.Font("fonts//Kabel.ttf", 24) self.fnt_asset = pygame.font.Font("fonts//Kabel.ttf", 13) self.img = pygame.image.load("images//gui//status.png") def draw(self, background): self.img = self.img.convert_alpha() l = 0 for p in self.players: height = l * 270 background.blit(self.img, (X,height+5)) txt_name = self.fnt_name.render(p.name, True, P_COLORS[l]) textpos = txt_name.get_rect().move(X+15,15+height) background.blit(txt_name, textpos) background.blit(pygame.image.load(TOKENS[p.token_index]).convert_alpha(), (X+250,15+height)) txt_money = self.fnt_money.render("$"+str(p.money), True, (10, 10, 10)) textpos = txt_money.get_rect().move(X+320,25+height) background.blit(txt_money, textpos) i = 0 for c in p.assets: color = COLORS[c] text = "" for asset in p.assets[c]: text = text + asset.name + " | " txt_money = self.fnt_asset.render(text, True, color) textpos = txt_money.get_rect().move(X+10,68+height+(i*20)) background.blit(txt_money, textpos) i += 1 l += 1 return background def get_asset_image(asset): #init fonts fnt_title = pygame.font.Font("fonts//Kabel.ttf", 10) fnt_des = pygame.font.Font("fonts//Kabel.ttf", 9) #creating the image surf=pygame.Surface((90,135)) surf.fill((255,255,255)) #filling the top surf.fill(COLORS[asset.color],pygame.Rect(0,0,90,30)) #draw title text=asset.name.split(' ') title = fnt_title.render(text[0], True, BLACK) pos = title.get_rect().move(1,2) surf.blit(title,pos) title = fnt_title.render(text[1], True, BLACK) pos = title.get_rect().move(1,15) surf.blit(title,pos) #draw rent if asset.color!=UTILITY and asset.color!=RW_STATION: rent=fnt_des.render("Rent $"+str(asset.rent_list[0]), True, BLACK) pos = rent.get_rect().move(5,30) surf.blit(rent,pos) for num in range (1,5): rent=fnt_des.render(str(num)+" houses $"+str(asset.rent_list[num]), True, BLACK) pos = rent.get_rect().move(5,30+num*11) surf.blit(rent,pos) rent=fnt_des.render("hotel $"+str(asset.rent_list[5]), True, BLACK) pos = rent.get_rect().move(5,30+62) surf.blit(rent,pos) mortage=fnt_des.render("mortage $"+str(asset.price//2), True, BLACK) pos = mortage.get_rect().move(5,30+72) surf.blit(mortage,pos) price=fnt_des.render("house price $"+str(asset.house_price), True, BLACK) pos = price.get_rect().move(5,30+82) else: if asset.color==UTILITY: descripton=[' Rent', 'own 1', 'dice roll X 4', '', 'own 2', 'dice roll X 10'] else: descripton=[' Rent', 'own 1 $25', 'own 2 $50', 'own 3 $100', 'own 4 $200'] for line in descripton: tline=fnt_des.render(line, True, BLACK) pos = tline.get_rect().move(5,40+descripton.index(line)*11) surf.blit(tline,pos) return surf class GameWindow(): #get the board and the players def __init__(self,board,players,console): self.console=console self.board=board self.players=players self.quit=False self.statusWin=StatusWindow() self.statusWin.start(self.players) self.buttonPad = buttonPad() self.popup=False self.popupWindow=None #creating a thread and run its draw function on it def run(self): self.thread = Thread(target=self.draw) self.thread.daemon = True self.thread.start() def open_popup(self,popup): self.popup=True self.popupWindow=popup def draw(self): # Initialise screen pygame.init() os.environ['SDL_VIDEO_WINDOW_POS'] = "{},{}".format(50,20) # x,y position of the screen screen = pygame.display.set_mode((1025, 700)) #witdth and height pygame.display.set_caption('Monopoly') # Fill background background = pygame.Surface(screen.get_size()) background = background.convert() clock = pygame.time.Clock() #initate the tokens for players token_list = [] for p in self.players: token_list.append(pygame.image.load(TOKENS[p.token_index]).convert_alpha()) # Event loop while 1: clock.tick(60) #FPS if not self.popup: brd_img = pygame.image.load("images//monopoly.png") brd_img = brd_img.convert() for event in pygame.event.get(): self.buttonPad.handle_event(event) if event.type == QUIT or self.quit: pygame.quit() os.kill(os.getpid(),0) background.fill((50, 50, 50)) background = self.console.draw(background) # console self.buttonPad.draw(background) background = self.statusWin.draw(background) #status window for block in self.board.blocks: if not (block.color == RW_STATION or block.color == UTILITY or block.color == -1): if block.hotel: #draw hotel h = pygame.image.load(BUILDINGS[0]) brd_img.blit(h, (block.position[0]-8,block.position[1]-5)) elif block.houses>=1: #draw houses h = pygame.image.load(BUILDINGS[block.houses]) brd_img.blit(h, (block.position[0]-8,block.position[1]-5)) #get players location on board player_pos = [] for p in self.players: player_pos.append(self.board.blocks[p.location].position) #draw players i = 0 check = [] for pos in player_pos: for c in check: if pos==c: pos = (pos[0],pos[1]+25) brd_img.blit(token_list[i], (pos[0]-15,pos[1]-10)) check.append(pos) i += 1 background.blit(brd_img, (5,5)) screen.blit(background, (0, 0)) pygame.display.flip() #popup else: for event in pygame.event.get(): if self.popupWindow!=None: self.popupWindow.handle_event(event) if event.type == QUIT or self.quit: pygame.quit() os.kill(os.getpid(),0) if self.popupWindow!=None: self.popupWindow.draw(screen) pygame.display.flip() def stop(self): self.quit = True def move_pawn(self,player,target_move): while player.location!=target_move: player.location=(player.location+1)%len(self.board.blocks) time.sleep(0.25) def prompt_commands(self, list_cmds): return self.buttonPad.create_selection_menu(list_cmds) def choose_from_options(self,actions,image=None): i=0 self.buttons=[] for name in actions.keys(): self.buttons.append(guiButton(name,(250+i//3*100, 65+(i%3)*50),actions[name],sizing=1.5)) i+=1 def check_click(): for control in self.buttons: if control.clicked: return True return False popup=PopupWindow('Choose',self.buttons,image,measures=((POPUP_TOP,POPUP_LEFT),(400,200))) self.open_popup(popup) while not check_click(): time.sleep(0.2) self.popup=False self.popupWindow=None popup.close() def create_trade_menu(self,players,image=None): from gameClasses import Trader trader=Trader(players[0],players[1]) self.buttons=[] passb=guiButton('pass',(700//2+40,600-50),action=passf) finishb=guiButton('finish',(700//2-40,600-50),action=trader.make_trade) self.buttons.append(passb) self.buttons.append(finishb) def check_click(): if passb.clicked or finishb.clicked: return True return False popup=TradeWindow(self.buttons,trader,players) self.open_popup(popup) while not check_click(): time.sleep(0.2) self.popup=False self.popupWindow=None popup.close() def choose_from_actions(self,actionsList,image=None,text='Choose',atexts=None): try: i=0 self.buttons=[] margin=5 curr_x=20 curr_y=50 block_size=(95,150) for action in actionsList: if action.pic==None: self.buttons.append(guiButton(action.name,(curr_x,curr_y),action=action.do_action)) else: self.buttons.append(guiButton('',(curr_x,curr_y),action=action.do_action,image=action.pic)) if curr_x+block_size[0]<POPUP_SIZE[0]-margin: curr_x=curr_x+block_size[0] else: curr_x=20 curr_y+=block_size[1] self.buttons.append(guiButton('pass',(POPUP_SIZE[0]//2-40,POPUP_SIZE[1]-50),action=passf)) def check_click(): for control in self.buttons: if control.clicked: return True return False popup=PopupWindow(text,self.buttons,image,texts=atexts) self.open_popup(popup) while not check_click(): time.sleep(0.2) self.popup=False self.popupWindow=None popup.close() except: print("Unexpected error:", sys.exc_info()[0]) raise class buttonPad(): def __init__(self): self.value=0 self.controls=[] #replace prompt commands and prompt commands index def create_selection_menu(self,options): def set_value(value): self.value=value i=0 self.controls=[] for option in options: x=620+(i//3)*135 y=560+(i%3)*45 if len(str(option))>10: self.controls.append(guiButton(str(option),(x,y),set_value,option,1.75,7)) else: self.controls.append(guiButton(str(option),(x,y),set_value,option,1.75)) i+=1 self.value=0 while (self.value==0): time.sleep(0.1) print (self.value) return self.value def draw(self,surface): if len(self.controls)>0: for control in self.controls: surface.blit(control,control.position) return surface def set_enabled(self, enable): for control in self.controls: control.set_enabled(enable) #passing events from the main pygame thread(currently in gameWindow) def handle_event(self,event): for control in self.controls: control.handle_event(event) def passf(): pass
Fresh off the heels of her much talked about 60 Minutes interview, Angelina Jolie is appearing on the next cover of Newsweek. In the magazine the actress discusses her upcoming film In the Land of Blood and Honey, which she directed. The setting for the film is wartime 1990s Bosnia. In her 60 Minutes interview on Sunday, Jolie revealed that she plans to do another war film focusing on Afghanistan. In the Land of Blood and Honey opens December 23. Jolie's Newsweek cover hits newsstands Monday.
# Copyright 2014, Rackspace, US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """API over the nova service. """ from django.http import HttpResponse from django.template.defaultfilters import slugify from django.utils import http as utils_http from django.views import generic from novaclient import exceptions from openstack_dashboard import api from openstack_dashboard.api.rest import json_encoder from openstack_dashboard.api.rest import urls from openstack_dashboard.api.rest import utils as rest_utils @urls.register class Keypairs(generic.View): """API for nova keypairs. """ url_regex = r'nova/keypairs/$' @rest_utils.ajax() def get(self, request): """Get a list of keypairs associated with the current logged-in account. The listing result is an object with property "items". """ result = api.nova.keypair_list(request) return {'items': [u.to_dict() for u in result]} @rest_utils.ajax(data_required=True) def post(self, request): """Create a keypair. Create a keypair using the parameters supplied in the POST application/json object. The parameters are: :param name: the name to give the keypair :param public_key: (optional) a key to import This returns the new keypair object on success. """ if 'public_key' in request.DATA: new = api.nova.keypair_import(request, request.DATA['name'], request.DATA['public_key']) else: new = api.nova.keypair_create(request, request.DATA['name']) return rest_utils.CreatedResponse( '/api/nova/keypairs/%s' % utils_http.urlquote(new.name), new.to_dict() ) @urls.register class Keypair(generic.View): url_regex = r'nova/keypairs/(?P<keypair_name>.+)/$' def get(self, request, keypair_name): """Creates a new keypair and associates it to the current project. * Since the response for this endpoint creates a new keypair and is not idempotent, it normally would be represented by a POST HTTP request. However, this solution was adopted as it would support automatic file download across browsers. :param keypair_name: the name to associate the keypair to :param regenerate: (optional) if set to the string 'true', replaces the existing keypair with a new keypair This returns the new keypair object on success. """ try: regenerate = request.GET.get('regenerate') == 'true' if regenerate: api.nova.keypair_delete(request, keypair_name) keypair = api.nova.keypair_create(request, keypair_name) except exceptions.Conflict: return HttpResponse(status=409) except Exception: return HttpResponse(status=500) else: response = HttpResponse(content_type='application/binary') response['Content-Disposition'] = ('attachment; filename=%s.pem' % slugify(keypair_name)) response.write(keypair.private_key) response['Content-Length'] = str(len(response.content)) return response @urls.register class Services(generic.View): """API for nova services. """ url_regex = r'nova/services/$' @rest_utils.ajax() def get(self, request): """Get a list of nova services. Will return HTTP 501 status code if the service_list extension is not supported. """ if api.base.is_service_enabled(request, 'compute') \ and api.nova.extension_supported('Services', request): result = api.nova.service_list(request) return {'items': [u.to_dict() for u in result]} else: raise rest_utils.AjaxError(501, '') @urls.register class AvailabilityZones(generic.View): """API for nova availability zones. """ url_regex = r'nova/availzones/$' @rest_utils.ajax() def get(self, request): """Get a list of availability zones. The following get parameters may be passed in the GET request: :param detailed: If this equals "true" then the result will include more detail. The listing result is an object with property "items". """ detailed = request.GET.get('detailed') == 'true' result = api.nova.availability_zone_list(request, detailed) return {'items': [u.to_dict() for u in result]} @urls.register class Limits(generic.View): """API for nova limits. """ url_regex = r'nova/limits/$' @rest_utils.ajax(json_encoder=json_encoder.NaNJSONEncoder) def get(self, request): """Get an object describing the current project limits. Note: the Horizon API doesn't support any other project (tenant) but the underlying client does... The following get parameters may be passed in the GET request: :param reserved: This may be set to "true" but it's not clear what the result of that is. The result is an object with limits as properties. """ reserved = request.GET.get('reserved') == 'true' result = api.nova.tenant_absolute_limits(request, reserved) return result @urls.register class Servers(generic.View): """API over all servers. """ url_regex = r'nova/servers/$' _optional_create = [ 'block_device_mapping', 'block_device_mapping_v2', 'nics', 'meta', 'availability_zone', 'instance_count', 'admin_pass', 'disk_config', 'config_drive' ] @rest_utils.ajax() def get(self, request): """Get a list of servers. The listing result is an object with property "items". Each item is a server. Example GET: http://localhost/api/nova/servers """ servers = api.nova.server_list(request)[0] return {'items': [s.to_dict() for s in servers]} @rest_utils.ajax(data_required=True) def post(self, request): """Create a server. Create a server using the parameters supplied in the POST application/json object. The required parameters as specified by the underlying novaclient are: :param name: The new server name. :param source_id: The ID of the image to use. :param flavor_id: The ID of the flavor to use. :param key_name: (optional extension) name of previously created keypair to inject into the instance. :param user_data: user data to pass to be exposed by the metadata server this can be a file type object as well or a string. :param security_groups: An array of one or more objects with a "name" attribute. Other parameters are accepted as per the underlying novaclient: "block_device_mapping", "block_device_mapping_v2", "nics", "meta", "availability_zone", "instance_count", "admin_pass", "disk_config", "config_drive" This returns the new server object on success. """ try: args = ( request, request.DATA['name'], request.DATA['source_id'], request.DATA['flavor_id'], request.DATA['key_name'], request.DATA['user_data'], request.DATA['security_groups'], ) except KeyError as e: raise rest_utils.AjaxError(400, 'missing required parameter ' "'%s'" % e.args[0]) kw = {} for name in self._optional_create: if name in request.DATA: kw[name] = request.DATA[name] new = api.nova.server_create(*args, **kw) return rest_utils.CreatedResponse( '/api/nova/servers/%s' % utils_http.urlquote(new.id), new.to_dict() ) @urls.register class Server(generic.View): """API for retrieving a single server """ url_regex = r'nova/servers/(?P<server_id>[^/]+|default)$' @rest_utils.ajax() def get(self, request, server_id): """Get a specific server http://localhost/api/nova/servers/1 """ return api.nova.server_get(request, server_id).to_dict() @urls.register class ServerMetadata(generic.View): """API for server metadata. """ url_regex = r'nova/servers/(?P<server_id>[^/]+|default)/metadata$' @rest_utils.ajax() def get(self, request, server_id): """Get a specific server's metadata http://localhost/api/nova/servers/1/metadata """ return api.nova.server_get(request, server_id).to_dict().get('metadata') @rest_utils.ajax() def patch(self, request, server_id): """Update metadata items for a server http://localhost/api/nova/servers/1/metadata """ updated = request.DATA['updated'] removed = request.DATA['removed'] if updated: api.nova.server_metadata_update(request, server_id, updated) if removed: api.nova.server_metadata_delete(request, server_id, removed) @urls.register class Extensions(generic.View): """API for nova extensions. """ url_regex = r'nova/extensions/$' @rest_utils.ajax() def get(self, request): """Get a list of extensions. The listing result is an object with property "items". Each item is an image. Example GET: http://localhost/api/nova/extensions """ result = api.nova.list_extensions(request) return {'items': [e.to_dict() for e in result]} @urls.register class Flavors(generic.View): """API for nova flavors. """ url_regex = r'nova/flavors/$' @rest_utils.ajax() def get(self, request): """Get a list of flavors. The listing result is an object with property "items". Each item is a flavor. By default this will return the flavors for the user's current project. If the user is admin, public flavors will also be returned. :param is_public: For a regular user, set to True to see all public flavors. For an admin user, set to False to not see public flavors. :param get_extras: Also retrieve the extra specs. Example GET: http://localhost/api/nova/flavors?is_public=true """ is_public = request.GET.get('is_public') is_public = (is_public and is_public.lower() == 'true') get_extras = request.GET.get('get_extras') get_extras = bool(get_extras and get_extras.lower() == 'true') flavors = api.nova.flavor_list(request, is_public=is_public, get_extras=get_extras) result = {'items': []} for flavor in flavors: d = flavor.to_dict() if get_extras: d['extras'] = flavor.extras result['items'].append(d) return result @rest_utils.ajax(data_required=True) def post(self, request): flavor_access = request.DATA.get('flavor_access', []) flavor_id = request.DATA['id'] is_public = not flavor_access flavor = api.nova.flavor_create(request, name=request.DATA['name'], memory=request.DATA['ram'], vcpu=request.DATA['vcpus'], disk=request.DATA['disk'], ephemeral=request .DATA['OS-FLV-EXT-DATA:ephemeral'], swap=request.DATA['swap'], flavorid=flavor_id, is_public=is_public ) for project in flavor_access: api.nova.add_tenant_to_flavor( request, flavor.id, project.get('id')) return rest_utils.CreatedResponse( '/api/nova/flavors/%s' % flavor.id, flavor.to_dict() ) @urls.register class Flavor(generic.View): """API for retrieving a single flavor """ url_regex = r'nova/flavors/(?P<flavor_id>[^/]+)/$' @rest_utils.ajax() def get(self, request, flavor_id): """Get a specific flavor :param get_extras: Also retrieve the extra specs. Example GET: http://localhost/api/nova/flavors/1 """ get_extras = self.extract_boolean(request, 'get_extras') get_access_list = self.extract_boolean(request, 'get_access_list') flavor = api.nova.flavor_get(request, flavor_id, get_extras=get_extras) result = flavor.to_dict() # Bug: nova API stores and returns empty string when swap equals 0 # https://bugs.launchpad.net/nova/+bug/1408954 if 'swap' in result and result['swap'] == '': result['swap'] = 0 if get_extras: result['extras'] = flavor.extras if get_access_list and not flavor.is_public: access_list = [item.tenant_id for item in api.nova.flavor_access_list(request, flavor_id)] result['access-list'] = access_list return result @rest_utils.ajax() def delete(self, request, flavor_id): api.nova.flavor_delete(request, flavor_id) @rest_utils.ajax(data_required=True) def patch(self, request, flavor_id): flavor_access = request.DATA.get('flavor_access', []) is_public = not flavor_access # Grab any existing extra specs, because flavor edit is currently # implemented as a delete followed by a create. extras_dict = api.nova.flavor_get_extras(request, flavor_id, raw=True) # Mark the existing flavor as deleted. api.nova.flavor_delete(request, flavor_id) # Then create a new flavor with the same name but a new ID. # This is in the same try/except block as the delete call # because if the delete fails the API will error out because # active flavors can't have the same name. flavor = api.nova.flavor_create(request, name=request.DATA['name'], memory=request.DATA['ram'], vcpu=request.DATA['vcpus'], disk=request.DATA['disk'], ephemeral=request .DATA['OS-FLV-EXT-DATA:ephemeral'], swap=request.DATA['swap'], flavorid=flavor_id, is_public=is_public ) for project in flavor_access: api.nova.add_tenant_to_flavor( request, flavor.id, project.get('id')) if extras_dict: api.nova.flavor_extra_set(request, flavor.id, extras_dict) def extract_boolean(self, request, name): bool_string = request.GET.get(name) return bool(bool_string and bool_string.lower() == 'true') @urls.register class FlavorExtraSpecs(generic.View): """API for managing flavor extra specs """ url_regex = r'nova/flavors/(?P<flavor_id>[^/]+)/extra-specs/$' @rest_utils.ajax() def get(self, request, flavor_id): """Get a specific flavor's extra specs Example GET: http://localhost/api/nova/flavors/1/extra-specs """ return api.nova.flavor_get_extras(request, flavor_id, raw=True) @rest_utils.ajax(data_required=True) def patch(self, request, flavor_id): """Update a specific flavor's extra specs. This method returns HTTP 204 (no content) on success. """ if request.DATA.get('removed'): api.nova.flavor_extra_delete( request, flavor_id, request.DATA.get('removed') ) api.nova.flavor_extra_set( request, flavor_id, request.DATA['updated'] ) @urls.register class AggregateExtraSpecs(generic.View): """API for managing aggregate extra specs """ url_regex = r'nova/aggregates/(?P<aggregate_id>[^/]+)/extra-specs/$' @rest_utils.ajax() def get(self, request, aggregate_id): """Get a specific aggregate's extra specs Example GET: http://localhost/api/nova/flavors/1/extra-specs """ return api.nova.aggregate_get(request, aggregate_id).metadata @rest_utils.ajax(data_required=True) def patch(self, request, aggregate_id): """Update a specific aggregate's extra specs. This method returns HTTP 204 (no content) on success. """ updated = request.DATA['updated'] if request.DATA.get('removed'): for name in request.DATA.get('removed'): updated[name] = None api.nova.aggregate_set_metadata(request, aggregate_id, updated)
adidas superstar pink shoes online store. Cheap adidas superstar pink shoes from adidas superstar hot sale now. As we all know that adidas superstar series is very popular. Different kinds of sneakers stand for different character,adidas superstar pink shoes can stand for your own taste,it taking the advance technology will protect your foot even if you are doing sport.
#!/usr/bin/env python3 import argparse import collections import pathlib # available since Python 3.4 (else install it using pip/pipenv/…) import sys # Parse command line arguments parser = argparse.ArgumentParser( prog="long-file-names", description="For files having same size, print the file names longer than the shortest one.", ) parser.add_argument("--version", action="version", version="%(prog)s v2") parser.add_argument("file_or_directory", type=str, nargs="+") args = parser.parse_args() # Collect regular files regular_file_paths = set() for pathname in args.file_or_directory: path = pathlib.Path(pathname) if path.is_file(): regular_file_paths.add(path) elif path.is_dir(): for sub_path in path.glob("**/*"): if sub_path.is_file(): regular_file_paths.add(sub_path) # Sort files by file size sorted_file_paths = collections.defaultdict(set) min_name_lengths = collections.defaultdict(lambda: int(sys.maxsize)) for path in regular_file_paths: sorted_file_paths[path.stat().st_size].add(path) previous_min_length = min_name_lengths[path.stat().st_size] if len(path.name) < previous_min_length: min_name_lengths[path.stat().st_size] = len(path.name) # For same content size, print all file paths except the one having the shortest file name for file_size, file_paths in sorted_file_paths.items(): min_filename_length = min_name_lengths[file_size] for path in file_paths: if len(path.name) > min_filename_length: print(path)
iaWellness provides a corporate wellness solution a step above the rest. Through a team of registered dietitians, sports science specialists, registered nurses, and ACSM certified wellness coaches, iaWellness is able to bring a unique dynamic to corporate wellness. Recent studies are showing that most employees are willing to take responsibility for their own health, but a high percentage of them don't know how. We are able to provide custom programs with several options to fit your company’s needs. Alongside your team of benefits advisors, we can also set an incentive structure that fits your company goals. iaWellness is able to provide you the knowledge and tools necessary to make appropriate choices in nutrition, exercise, and lifestyle in an effort for you to become "healthy by choice."
######################################################################## # Copyright (C) 2007,8,9 Ehud Ben-Reuven # udi@benreuven.com # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation version 2. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ######################################################################## """ ofxml2flat OFX-file flat-file convert XML OFX file to flat lines. """ # TODO support INVOOLIST from xml.dom.minidom import parse import sys from n3tagvalue import stagvalue,n3header,n3secid import re fout =sys.stdout # Where to write to. Modified by flat hashing=False def p(pnode,pre=''): """Debug utility""" #print >>fout,"%s:%s\n" % (pre,pnode.nodeType) nodelist=pnode.childNodes for node in nodelist: if node.nodeType == node.TEXT_NODE: print "%s%s\n" % (pre,node.data.strip()) elif node.nodeType == node.ELEMENT_NODE: print "%s%s:\n" % (pre,node.tagName) p(node,'+'+pre) def get(s,tag,mandatory=False): TAG=tag.upper().strip() try: VAL=s.getElementsByTagName(TAG)[0].firstChild.data.strip() except: if mandatory: raise Exception('Missing mandatory tag %s'%tag) return return VAL def sprt_tag(s,tag,mandatory=True,scale=1,ntag=None): VAL=get(s,tag,mandatory) if ntag: ntag=ntag.upper() else: ntag=tag.upper() if VAL: if scale!=1: return stagvalue(ntag,"%f"%(float(VAL)*scale)) else: return stagvalue(ntag,VAL) def prt_tag(s,tags,mandatory=True,ntag=None,scale=1,scale_assetclass=None): mandatory=False # TODO, remove this!!! if isinstance(tags,basestring): tags=[tags] if scale_assetclass in ['DEBT']: scale=scale/100.0 if scale_assetclass in ['OPT']: scale=scale*100.0 for tag in tags: v=sprt_tag(s,tag,mandatory=mandatory,scale=scale,ntag=ntag) if v: print >>fout,v, def prt_secid(s,acctid=None): uniqueid=s.getElementsByTagName('UNIQUEID')[0].firstChild.data.strip() uniqueidtype=s.getElementsByTagName('UNIQUEIDTYPE')[0].firstChild.data.strip() print >>fout,n3secid(uniqueidtype,uniqueid), pacctid(acctid) def pacctid(acctid): if acctid: print >>fout,stagvalue('ACCTID',acctid,hashing=hashing), def prt_cashacc(acctid,currency,p=None,accttype='CHECKING'): if p: try: accttype=get(p,'SUBACCTFUND',mandatory=True) except: accttype=get(p,'ACCTTYPE',mandatory=True) print >>fout,n3secid('CHECKING',currency), pacctid(acctid) print >>fout,stagvalue("ACCTTYPE",accttype), if accttype in ['CHECKING', 'CASH']: print >>fout,stagvalue('ASSETCLASS','CHECKING'), def flat(fin,_fout,context=None,_hashing=False): global fout,hashing fout=_fout hashing=_hashing if isinstance(fin,str): fin=open(fin) else: fin.seek(0) n3header(fout,"$Id$"[1:-1],context) dom = parse(fin) # parse an XML file by name # Start with information on assets. In the examples I have of OFX files, this information # appeared in the end of the file, but it should be processed first so latter on when processing # holdings it will be easier to locate the relevant asset. seclist=dom.getElementsByTagName('SECLIST') if seclist: for s in seclist[0].childNodes: print >>fout,"a3:flatLine [ ", if s.nodeType != s.ELEMENT_NODE: continue # Skip comments prt_secid(s) ticker=get(s,'TICKER') if ticker: ticker=ticker.split()[0] # Some banks put their FIID after the ticker print >>fout,stagvalue('TICKER',ticker), v=get(s,'FIID') if v: print >>fout,stagvalue('FIID',v), v = get(dom,'BROKERID') if v: print >>fout,stagvalue('BROKERID',v) else: v=get(dom,'ORG') print 'FIID found but cant find BROKERID, using ORG instead' if v: print >>fout,stagvalue('ORG',v) if get(s,'YIELD'): if get(s,'DTYIELDASOF')!=get(s,'DTASOF'): raise Exception("price and yield dates dont match") prt_tag(s,['SECNAME','DTASOF']) assetclass=s.tagName[:-4] print >>fout,stagvalue("ASSETCLASS",assetclass), prt_tag(s,'UNITPRICE',scale_assetclass=assetclass) # TODO check if it is better to scale the UNITS instead (looks like DEBT->UNITPRICE OPT->UNITS) prt_tag(s,['ASSETCLASS','PARVALUE','DEBTTYPE','DEBTCLASS','DTMAT', 'COUPONRT','COUPONFREQ','MFTYPE','STOCKTYPE','YIELD'], mandatory=False) print >>fout,"];" # Credit card accounts stmttrnrs=dom.getElementsByTagName('CCSTMTTRNRS') if stmttrnrs: for stmtrs in stmttrnrs: print >>fout,"a3:flatLine [ ", acctid=get(stmtrs,'ACCTID') curdef=get(stmtrs,'CURDEF') # bind togther all the information about the cash holding. prt_cashacc(acctid,curdef,accttype='CREDITCRD') # info on the account # bind togther all the information about the account. # The DTSTART, DTEND are bounded to the unique pair <ACCTID,FILE> but the FILE is not # specified because it is implicitly assumed everywhere in the line file prt_tag(stmtrs,['CURDEF', #'DTSTART','DTEND' # FIXME there are too many transaction outside range or the range is too big to be full ]) prt_tag(dom,['ORG','FID'],mandatory=False) # info on the cash holding l=stmtrs.getElementsByTagName('LEDGERBAL')[0] prt_tag(l,'DTASOF') prt_tag(l,'BALAMT',ntag='UNITS') # Ignore AVAILBAL print >>fout,"];" # Generate information on all transactions for t in stmtrs.getElementsByTagName('STMTTRN'): print >>fout,"a3:flatLine [ ", # bind all information on transaction with the cash holding. prt_cashacc(acctid,curdef,accttype='CREDITCRD') prt_tag(t,['TRNTYPE','FITID']) prt_tag(t,'TRNAMT',ntag='UNITS') prt_tag(t,'DTPOSTED',ntag='DTSETTLE') prt_tag(t,'DTAVAIL',mandatory=False) prt_tag(t,['DTUSER','CHECKNUM','REFNUM','NAME','MEMO'],mandatory=False) print >>fout,"];" # Checking accounts stmttrnrs=dom.getElementsByTagName('STMTTRNRS') if stmttrnrs: for stmtrs in stmttrnrs: print >>fout,"a3:flatLine [ ", acctid=get(stmtrs,'ACCTID') curdef=get(stmtrs,'CURDEF') # bind togther all the information about the cash holding. prt_cashacc(acctid,curdef,stmtrs) # info on the account # bind togther all the information about the account. # The DTSTART, DTEND are bounded to the unique pair <ACCTID,FILE> but the FILE is not # specified because it is implicitly assumed everywhere in the line file prt_tag(stmtrs,['CURDEF', #'DTSTART','DTEND' # FIXME check if/when this can be done ]) prt_tag(dom,['ORG','FID'],mandatory=False) # info on the cash holding l=stmtrs.getElementsByTagName('LEDGERBAL')[0] prt_tag(l,'DTASOF') prt_tag(l,'BALAMT',ntag='UNITS') # Ignore AVAILBAL print >>fout,"];" # Generate information on all transactions for t in stmtrs.getElementsByTagName('STMTTRN'): print >>fout,"a3:flatLine [ ", # bind all information on transaction with the cash holding. prt_cashacc(acctid,curdef,stmtrs) prt_tag(t,['TRNTYPE','FITID']) prt_tag(t,'TRNAMT',ntag='UNITS') prt_tag(t,'DTPOSTED',ntag='DTSETTLE') prt_tag(t,'DTAVAIL',mandatory=False) prt_tag(t,['DTUSER','CHECKNUM','REFNUM','NAME','MEMO'],mandatory=False) print >>fout,"];" # Investment accounts invstmttrnrs=dom.getElementsByTagName('INVSTMTTRNRS') if invstmttrnrs: for invstmtrs in invstmttrnrs: print >>fout,"a3:flatLine [ ", # Every line should show the ACCTID acctid=get(invstmtrs,'ACCTID') curdef=get(invstmtrs,'CURDEF') # bind togther all the information about the account. # The DTSTART, DTEND are bounded to the unique pair <ACCTID,FILE> but the FILE is not # specified because it is implicitly assumed everywhere in the line file prt_tag(invstmtrs,['CURDEF','ACCTID', #'DTSTART','DTEND' # Fixme check when it can be done ]) prt_tag(dom,['ORG','FID'],mandatory=False) print >>fout,"];" # generate statement line for CASH account print >>fout,"a3:flatLine [ ", prt_cashacc(acctid,curdef,accttype='CASH') # Make this match the CASH accounts used in investment transactions prt_tag(invstmtrs,'AVAILCASH',ntag='UNITS') prt_tag(invstmtrs,'DTASOF') print >>fout,"];" # Dump current portfolio of the account for p in invstmtrs.getElementsByTagName('INVPOS'): print >>fout,"a3:flatLine [ ", prt_secid(p,acctid) prt_tag(p,'DTPRICEASOF',ntag='DTASOF') assetclass=p.parentNode.tagName[3:] print >>fout,stagvalue("ASSETCLASS",assetclass), prt_tag(p,'UNITPRICE',scale_assetclass=assetclass) prt_tag(p,['POSTYPE','UNITS','MKTVAL']) prt_tag(p,'MEMO',ntag='POSMEMO',mandatory=False) # POSMEMO in order not to confuse with transaction's MEMO print >>fout,"];" # Dump transactions for trn in ['INVBUY','INVSELL']: for p in invstmtrs.getElementsByTagName(trn): print >>fout,"a3:flatLine [ ", prt_secid(p,acctid) prt_tag(p,['FITID','DTTRADE','DTSETTLE','MEMO','UNITS','COMMISSION','FEES','TOTAL']) if trn=='INVBUY': print >>fout,stagvalue("TRNTYPE","BUY"), assetclass = p.parentNode.tagName[3:] else: print >>fout,stagvalue("TRNTYPE","SELL"), assetclass = p.parentNode.tagName[4:] print >>fout,stagvalue("ASSETCLASS",assetclass), prt_tag(p,'UNITPRICE',scale_assetclass=assetclass) prt_tag(p,['MARKUP','MARKDOWN'],mandatory=False, scale_assetclass=assetclass) accrdint=get(p.parentNode,'ACCRDINT') # ACCRDINT is outside the INVBUY/SELL structure. if accrdint: print >>fout,stagvalue('ACCRDINT',accrdint), print >>fout,"];" # generate line for current account print >>fout,"a3:flatLine [ ", prt_cashacc(acctid,curdef,p) prt_tag(p,'FITID',ntag='RELFITID') prt_tag(p,'DTSETTLE') prt_tag(p,'TOTAL',ntag='UNITS') if trn=='INVBUY': print >>fout,stagvalue('TRNTYPE','DEBIT'), else: print >>fout,stagvalue('TRNTYPE','CREDIT'), print >>fout,"];" # ACCRDINT is real money when you buy/sell a debit but it does not appear in TOTAL if accrdint: print >>fout,"a3:flatLine [ ", # generate line for current account prt_cashacc(acctid,curdef,p) prt_tag(p,'FITID',ntag='RELFITID') prt_tag(p,'DTSETTLE') print >>fout,stagvalue('UNITS',accrdint), if trn=='INVBUY': print >>fout,stagvalue('TRNTYPE','DEBIT'), else: print >>fout,stagvalue('TRNTYPE','CREDIT'), print >>fout,stagvalue('PAYEEID','ACCRDINT'), # The money is not coming from the Asset issuer but from the side selling/buying the asset to us. print >>fout,"];" for p in invstmtrs.getElementsByTagName('INCOME'): print >>fout,"a3:flatLine [ ", prt_secid(p,acctid) prt_tag(p,['FITID','DTTRADE','DTSETTLE','MEMO','TOTAL']) prt_tag(p,'INCOMETYPE',ntag='TRNTYPE') print >>fout,"];" # generate line for current account print >>fout,"a3:flatLine [ ", prt_cashacc(acctid,curdef,p) prt_tag(p,'FITID',ntag='RELFITID') prt_tag(p,'DTSETTLE') prt_tag(p,'TOTAL',ntag='UNITS') print >>fout,stagvalue('TRNTYPE','CREDIT'), print >>fout,"];" for p in invstmtrs.getElementsByTagName('INVEXPENSE'): print >>fout,"a3:flatLine [ ", prt_secid(p,acctid) prt_tag(p,['FITID','DTTRADE','DTSETTLE','MEMO']) prt_tag(p,'TOTAL',scale=-1) memo=get(p,'MEMO') if re.search(r"\bTAX\b",memo,re.IGNORECASE): prt_tag(p,'TOTAL',ntag='TAXES') print >>fout,stagvalue("TRNTYPE","TAX"), elif re.search(r"\bFEE\b",memo,re.IGNORECASE): prt_tag(p,'TOTAL',ntag='FEES') print >>fout,stagvalue("TRNTYPE","FEE"), else: print "Unknown expense",memo prt_tag(p,'TOTAL',ntag='COMMISSION') print >>fout,stagvalue("TRNTYPE","EXPENSE"), print >>fout,"];" # generate line for current account print >>fout,"a3:flatLine [ ", prt_cashacc(acctid,curdef,p) prt_tag(p,'FITID',ntag='RELFITID') prt_tag(p,'DTSETTLE') prt_tag(p,'TOTAL',ntag='UNITS',scale=-1) print >>fout,stagvalue('TRNTYPE','DEBIT'), print >>fout,"];" for p in invstmtrs.getElementsByTagName('TRANSFER'): print >>fout,"a3:flatLine [ ", prt_secid(p,acctid) prt_tag(p,['FITID','DTTRADE','DTSETTLE','MEMO','UNITS']) prt_tag(p,'TFERACTION',ntag='TRNTYPE') print >>fout,"];" # note that TRANSFER does not have a SUBACCTFUND to balance with for p in invstmtrs.getElementsByTagName('INVBANKTRAN'): print >>fout,"a3:flatLine [ ", prt_cashacc(acctid,curdef,p) prt_tag(p,['TRNTYPE','FITID','NAME','MEMO']) prt_tag(p,'DTPOSTED',ntag='DTSETTLE') prt_tag(p,'TRNAMT',ntag='UNITS') print >>fout,"];" dom.unlink() print >>fout,"."
"Adrift in Cedar Key" is a fully equipped two bedroom, two bath condominium overlooking the Gulf of Mexico. Amenities include a large screen TV, wi-fi, wine cooler, large deck, washer and dryer, and dishwasher. The heated pool and hot tub are just steps away. Park your car under Building 6 and walk or ride your bike to the many fine restaurants, shops, and bars. Our white sand beach is just a few minutes walk. Kayak and golf cart rentals are available nearby. A fishing pier and fish cleaning facility are on the grounds.
from django.contrib.contenttypes.models import ContentType from .models import Follow class UserFollowMixin(object): def get_follow_set(self, model=None): qs = Follow.objects.filter( user=self ).prefetch_related('target') if model: model_type = ContentType.objects.get_for_model(model) qs = qs.filter(target_content_type=model_type) return [x.target for x in qs] def follow(self, item): item_type = ContentType.objects.get_for_model(item) Follow.objects.get_or_create( user=self, target_content_type=item_type, target_object_id=item.pk ) def unfollow(self, item): item_type = ContentType.objects.get_for_model(item) Follow.objects.filter( user=self, target_content_type=item_type, target_object_id=item.pk ).delete() def is_following(self, item): item_type = ContentType.objects.get_for_model(item) return Follow.objects.filter( user=self, target_content_type=item_type, target_object_id=item.pk ).exists() class TargetFollowMixin(object): def get_follower_set(self): content_type = ContentType.objects.get_for_model(self) follows = Follow.objects.filter( target_content_type=content_type, target_object_id=self.id, ).prefetch_related('user') return [x.user for x in follows]
We do not frame our oil painting reproductions. Hand-Painted Art Reproduction is an expensive product, and the risks of damaging a painting stretched on a frame during transportation are too high. The English Ship 'Royal Sovereign' With a Royal Yacht in a Light Air by Willem van de Velde is, therefore, not framed, and will be sent to you rolled up and packaged in a strong and secure postal tube.
# python3 # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Unassign machines from users in connection broker. Usage: orchestrate broker machines unassign <DEPLOYMENT> <MACHINE> """ import logging import optparse from orchestrate import base from orchestrate.systems.teradici import camapi log = logging.getLogger(__name__) class Command(base.OrchestrateCommand): """Unassign machines from users in connection broker. """ @property def description(self): return """ Unassign all users from a given macines in connection broker. Usage: orchestrate broker machines unassign <DEPLOYMENT> <MACHINE1> [ <MACHINE2>[ ...]] """.lstrip() @property def defaults(self): """Returns default option values.""" return dict( deployment=None, ) @property def options(self): """Returns command parser options.""" options = [ optparse.Option('--deployment', help=( 'Deployment name. Uses project name by default if not explicitly' ' provided')), ] return options def run(self, options, arguments): """Executes command. Args: options: Command-line options. arguments: Command-line positional arguments Returns: True if successful. False, otherwise. """ log.debug('broker machines unassign %(options)s %(arguments)s', dict( options=options, arguments=arguments)) if len(arguments) < 1: log.error('Expected at least one machine name.') return False machine_names = arguments deployment_name = options.deployment or options.project self.unassign(options.project, deployment_name, machine_names) def unassign(self, project, deployment_name, machine_names): """Unassign all users from given machines. Args: project: GCP project. deployment_name: Deployment. machine_names: Machine names. Returns: True if it succeeded. False otherwise. """ log.debug('Locating deployment: %s', deployment_name) cam = camapi.CloudAccessManager(project=project, scope=camapi.Scope.DEPLOYMENT) deployment = cam.deployments.get(deployment_name) # Get machine ids all_machines = [] for machine_name in machine_names: log.debug('Locating machine in CAM: %s', machine_name) machines = cam.machines.get(deployment, machineName=machine_name) if machines: machine = machines[0] log.debug('Found machine %s with ID %s', machine_name, machine['machineId']) all_machines.append(machine) else: message = ( 'Could not locate machine {machine_name}. Check whether it exists' ' and that it was assigned to users. Skipping for now.' ).format(machine_name=machine_name) log.warning(message) # Find all entitlements for all machine ids collected and remove them for machine in all_machines: log.info( 'Locating entitlements for machine %(machineName)s %(machineId)s', machine) entitlements = cam.machines.entitlements.get( deployment, machineName=machine['machineName']) for entitlement in entitlements: log.info('Removing entitlement %(entitlementId)s', entitlement) cam.machines.entitlements.delete(entitlement) return True
The aluminium alloy ingot import trade sector contributes significantly to the overall GDP percentage of India. No wonder, the port is booming in this sector and at Seair, we better understand how to benefit you from this welcome opportunity. We comprehend the fact that majority of import firms in are active in sourcing distinct ranges of products including raw materials, machinery and consumer goods etc. Hence, we provide comprehensive import data solutions for broad categories of import trading firms. Our aluminium alloy ingot import data solutions meet your actual import requirements in quality, volume, seasonality, and geography. Alongside we help you get detailed information on the vital import fields that encompass HS codes, product description, duty, quantity, price etc. The import data from Seair paves the way for successful partnerships that generate profit for business from both the local and global precincts.
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from .EnvironmentProperties import EnvironmentProperties __author__ = 'Shamal Faily' class RoleEnvironmentProperties(EnvironmentProperties): def __init__(self,environmentName,responses,countermeasures,goals,requirements): EnvironmentProperties.__init__(self,environmentName) self.theResponses = responses self.theCountermeasures = countermeasures self.theGoals = goals self.theRequirements = requirements def responses(self): return self.theResponses def countermeasures(self): return self.theCountermeasures def goals(self): return self.theGoals def requirements(self): return self.theRequirements
indieguerillas is an artist duo from Yogyakarta, Indonesia, comprising of husband and wife Dyatmiko Lancur Bawono (b. 1975) and Sant Ariestyowant (b. 1977). Founded in 1999 as a graphic design firm, indieguerillas’s philosophy of “constantly in guerrilla to find new possibilities” led them to become full-time artists in 2007. Nevertheless, design still plays a very important role in their practice as it allows them to explore the use of unconventional mediums and techniques as part of their artistic statement. In addition to their proficiency at visual effects and inter-media experimentation, their works are also recognised for its folklore influences. The unique intertwinement between traditional values and contemporary culture has brought indieguerillas to numerous important exhibitions around the globe, most recently “Datang Untuk Kembali”, a performance made in collaboration with fashion designer Lulu Lutfi Labibi and sound director Ari Wulu, as a part of CITIES FOR PEOPLE NTU CCA Idea Fest 2016/17 at NTU Centre for Contemporary Art Singapore. Some of their selected solo exhibitions include _hyP3<y<lu5_, Mizuma Gallery, Singapore (2017), The Prefabricated Faith, Project Room, Gillman Barracks, Singapore (2012), Indie what? Indie who?, Garis Art Space, Jakarta, Indonesia (2010), and Happy Victims (Valentine Willie Fine Art, Artspace at Helutrans, Singapore, 2010). They have also participated in group exhibitions extensively in Indonesia, Singapore, Australia, United Kingdom, South Korea, Italy, Australia, and the United States. Their works are in the collection of OHD Museum, Magelang, Indonesia Singapore Art Museum, Singapore, and Guangdong Museum of Art, Guangzhou, China. indieguerillas lives and works in Yogyakarta, Indonesia.
from django.db import models from django.contrib.auth.models import User from django.shortcuts import resolve_url as r from spark.activities.models import Activity class Event(models.Model): user = models.ForeignKey(User) title = models.CharField('título', max_length=200) date_start = models.DateField('data') start = models.TimeField('início', null=True, blank=True) description = models.TextField('descrição', blank=True) address = models.TextField('local', null=True, blank=True) likes = models.IntegerField(default=0) class Meta: ordering = ('date_start',) verbose_name = 'evento' verbose_name_plural = 'eventos' def __str__(self): return self.title @staticmethod def get_events(from_event=None): if from_event is not None: events = Event.objects.filter(id__lte=from_event) else: events = Event.objects.all() return events @staticmethod def get_events_after(event): events = Event.objects.filter(id__gt=event) return events def calculate_likes(self): likes = Activity.objects.filter(activity_type=Activity.LIKE, feed=self.pk).count() self.likes = likes self.save() return self.likes def get_likes(self): likes = Activity.objects.filter(activity_type=Activity.LIKE, feed=self.pk) return likes def get_likers(self): likes = self.get_likes() likers = [] for like in likes: likers.append(like.user) return likers class Talk(models.Model): title = models.CharField('título', max_length=200) date_start = models.DateField('data') start = models.TimeField('início', blank=True, null=True) description = models.TextField('descrição', blank=True) speakers = models.ManyToManyField( 'Speaker', verbose_name='palestrantes', blank=True) # objects = PeriodManager() class Meta: ordering = ['start'] verbose_name = 'palestra' verbose_name_plural = 'palestras' def __str__(self): return self.title class Speaker(models.Model): name = models.CharField('nome', max_length=255) slug = models.SlugField('slug') photo = models.URLField('foto') website = models.URLField('website', blank=True) description = models.TextField('descrição', blank=True) class Meta: ordering = ('name',) verbose_name = 'palestrante' verbose_name_plural = 'palestrantes' def __str__(self): return self.name def get_absolute_url(self): return r('speaker_detail', slug=self.slug)
For patients to write a testimonial about Direct Primary Care. Dr. Hancock's thoughtful, integrated approach to primary care it allows him to look holistically for the root cause of apparently disconnected symptoms. An entirely different, refreshing approach to health care.
#!/usr/bin/env python # Copyright (C) 2011-2014 Swift Navigation Inc. # Contact: Fergus Noble <fergus@swift-nav.com> # # This source is subject to the license found in the file 'LICENSE' which must # be be distributed together with this source. All other rights reserved. # # THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, # EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE. import os import struct import sys import signal from piksi_tools import serial_link import sbp.client.handler from sbp.logging import SBP_MSG_PRINT from sbp.piksi import SBP_MSG_RESET from sbp.client.drivers.pyserial_driver import PySerialDriver from sbp.client.drivers.pyftdi_driver import PyFTDIDriver from sbp.ext_events import * from piksi_tools.version import VERSION as CONSOLE_VERSION # Shut chaco up for now import warnings warnings.simplefilter(action = "ignore", category = FutureWarning) def get_args(): """ Get and parse arguments. """ import argparse parser = argparse.ArgumentParser(description='Swift Nav Console.') parser.add_argument('-p', '--port', nargs=1, default=[None], help='specify the serial port to use.') parser.add_argument('-b', '--baud', nargs=1, default=[serial_link.SERIAL_BAUD], help='specify the baud rate to use.') parser.add_argument("-v", "--verbose", help="print extra debugging information.", action="store_true") parser.add_argument("-l", "--log", action="store_true", help="serialize SBP messages to log file.") parser.add_argument("-o", "--log-filename", default=[serial_link.LOG_FILENAME], nargs=1, help="file to log output to.") parser.add_argument("-r", "--reset", action="store_true", help="reset device after connection.") parser.add_argument("-u", "--update", help="don't prompt about firmware/console updates.", action="store_false") parser.add_argument("-f", "--ftdi", help="use pylibftdi instead of pyserial.", action="store_true") parser.add_argument('-t', '--toolkit', nargs=1, default=[None], help="specify the TraitsUI toolkit to use, either 'wx' or 'qt4'.") parser.add_argument('-e', '--expert', action='store_true', help="Show expert settings.") return parser.parse_args() args = get_args() port = args.port[0] baud = args.baud[0] log_filename = args.log_filename[0] # Toolkit from traits.etsconfig.api import ETSConfig if args.toolkit[0] is not None: ETSConfig.toolkit = args.toolkit[0] else: ETSConfig.toolkit = 'qt4' # Logging import logging logging.basicConfig() from traits.api import Str, Instance, Dict, HasTraits, Int, Button, List from traitsui.api import Item, Label, View, HGroup, VGroup, VSplit, HSplit, Tabbed, \ InstanceEditor, EnumEditor, ShellEditor, Handler # When bundled with pyInstaller, PythonLexer can't be found. The problem is # pygments.lexers is doing some crazy magic to load up all of the available # lexers at runtime which seems to break when frozen. # # The horrible workaround is to load the PythonLexer class explicitly and then # manually insert it into the pygments.lexers module. from pygments.lexers.agile import PythonLexer import pygments.lexers pygments.lexers.PythonLexer = PythonLexer try: import pygments.lexers.c_cpp except ImportError: pass # These imports seem to be required to make pyinstaller work? # (usually traitsui would load them automatically) if ETSConfig.toolkit == 'qt4': import pyface.ui.qt4.resource_manager import pyface.ui.qt4.python_shell from pyface.image_resource import ImageResource if getattr(sys, 'frozen', False): # we are running in a |PyInstaller| bundle basedir = sys._MEIPASS os.chdir(basedir) else: # we are running in a normal Python environment basedir = os.path.dirname(__file__) icon = ImageResource('icon', search_path=['images', os.path.join(basedir, 'images')]) from output_stream import OutputStream from tracking_view import TrackingView from almanac_view import AlmanacView from solution_view import SolutionView from baseline_view import BaselineView from observation_view import ObservationView from sbp_relay_view import SbpRelayView from system_monitor_view import SystemMonitorView from settings_view import SettingsView from update_view import UpdateView from enable.savage.trait_defs.ui.svg_button import SVGButton CONSOLE_TITLE = 'Piksi Console, Version: ' + CONSOLE_VERSION class ConsoleHandler(Handler): """ Handler that updates the window title with the device serial number This Handler is used by Traits UI to manage making changes to the GUI in response to changes in the underlying class/data. """ def object_device_serial_changed(self, info): """ Update the window title with the device serial number. This is a magic method called by the handler in response to any changes in the `device_serial` variable in the underlying class. """ if info.initialized: info.ui.title = CONSOLE_TITLE + ' : ' + info.object.device_serial class SwiftConsole(HasTraits): link = Instance(sbp.client.handler.Handler) console_output = Instance(OutputStream) python_console_env = Dict device_serial = Str('') a = Int b = Int tracking_view = Instance(TrackingView) solution_view = Instance(SolutionView) baseline_view = Instance(BaselineView) observation_view = Instance(ObservationView) sbp_relay_view = Instance(SbpRelayView) observation_view_base = Instance(ObservationView) system_monitor_view = Instance(SystemMonitorView) settings_view = Instance(SettingsView) update_view = Instance(UpdateView) paused_button = SVGButton( label='', tooltip='Pause console update', toggle_tooltip='Resume console update', toggle=True, filename=os.path.join(os.path.dirname(__file__), 'images', 'iconic', 'pause.svg'), toggle_filename=os.path.join(os.path.dirname(__file__), 'images', 'iconic', 'play.svg'), width=8, height=8 ) clear_button = SVGButton( label='', tooltip='Clear console buffer', filename=os.path.join(os.path.dirname(__file__), 'images', 'iconic', 'x.svg'), width=8, height=8 ) view = View( VSplit( Tabbed( Item('tracking_view', style='custom', label='Tracking'), Item('solution_view', style='custom', label='Solution'), Item('baseline_view', style='custom', label='Baseline'), VSplit( Item('observation_view', style='custom', show_label=False), Item('observation_view_base', style='custom', show_label=False), label='Observations', ), Item('settings_view', style='custom', label='Settings'), Item('update_view', style='custom', label='Firmware Update'), Tabbed( Item('system_monitor_view', style='custom', label='System Monitor'), Item('sbp_relay_view', label='SBP Relay', style='custom', show_label=False), Item( 'python_console_env', style='custom', label='Python Console', editor=ShellEditor()), label='Advanced', ), show_labels=False ), VGroup( HGroup( Item('', show_label=False), Item('paused_button', show_label=False), Item('clear_button', show_label=False), Item('', label='Console Log', emphasized=True), ), Item( 'console_output', style='custom', editor=InstanceEditor(), height=0.3, show_label=False, ), ) ), icon = icon, resizable = True, width = 1000, height = 600, handler = ConsoleHandler(), title = CONSOLE_TITLE ) def print_message_callback(self, sbp_msg): try: self.console_output.write(sbp_msg.payload.encode('ascii', 'ignore')) except UnicodeDecodeError: print "Critical Error encoding the serial stream as ascii." def ext_event_callback(self, sbp_msg): e = MsgExtEvent(sbp_msg) print 'External event: %s edge on pin %d at wn=%d, tow=%d, time qual=%s' % ( "Rising" if (e.flags & (1<<0)) else "Falling", e.pin, e.wn, e.tow, "good" if (e.flags & (1<<1)) else "unknown") def _paused_button_fired(self): self.console_output.paused = not self.console_output.paused def _clear_button_fired(self): self.console_output.reset() def __init__(self, link, update): self.console_output = OutputStream() sys.stdout = self.console_output sys.stderr = self.console_output try: self.link = link self.link.add_callback(self.print_message_callback, SBP_MSG_PRINT) self.link.add_callback(self.ext_event_callback, SBP_MSG_EXT_EVENT) settings_read_finished_functions = [] self.tracking_view = TrackingView(self.link) self.solution_view = SolutionView(self.link) self.baseline_view = BaselineView(self.link) self.observation_view = ObservationView(self.link, name='Rover', relay=False) self.observation_view_base = ObservationView(self.link, name='Base', relay=True) self.sbp_relay_view = SbpRelayView(self.link) self.system_monitor_view = SystemMonitorView(self.link) self.update_view = UpdateView(self.link, prompt=update) settings_read_finished_functions.append(self.update_view.compare_versions) # Once we have received the settings, update device_serial with the Piksi # serial number which will be displayed in the window title def update_serial(): serial_string = self.settings_view.settings['system_info']['serial_number'].value self.device_serial = 'PK%04d' % int(serial_string) settings_read_finished_functions.append(update_serial) self.settings_view = \ SettingsView(self.link, settings_read_finished_functions, hide_expert = not args.expert) self.update_view.settings = self.settings_view.settings self.python_console_env = { 'send_message': self.link.send, 'link': self.link, } self.python_console_env.update(self.tracking_view.python_console_cmds) self.python_console_env.update(self.solution_view.python_console_cmds) self.python_console_env.update(self.baseline_view.python_console_cmds) self.python_console_env.update(self.observation_view.python_console_cmds) self.python_console_env.update(self.sbp_relay_view.python_console_cmds) self.python_console_env.update(self.system_monitor_view.python_console_cmds) self.python_console_env.update(self.update_view.python_console_cmds) self.python_console_env.update(self.settings_view.python_console_cmds) except: import traceback traceback.print_exc() class PortChooser(HasTraits): ports = List() port = Str(None) traits_view = View( VGroup( Label('Select Piksi device:'), Item('port', editor=EnumEditor(name='ports'), show_label=False), ), buttons = ['OK', 'Cancel'], close_result=False, icon = icon, width = 250, title = 'Select serial device', ) def __init__(self): try: self.ports = [p for p, _, _ in serial_link.get_ports()] except TypeError: pass if not port: port_chooser = PortChooser() is_ok = port_chooser.configure_traits() port = port_chooser.port if not port or not is_ok: print "No serial device selected!" sys.exit(1) else: print "Using serial device '%s'" % port # Make sure that SIGINT (i.e. Ctrl-C from command line) actually stops the # application event loop (otherwise Qt swallows KeyboardInterrupt exceptions) signal.signal(signal.SIGINT, signal.SIG_DFL) with serial_link.get_driver(args.ftdi, port, baud) as driver: with sbp.client.handler.Handler(driver.read, driver.write, args.verbose) as link: with serial_link.get_logger(args.log, log_filename) as logger: link.add_callback(logger) if args.reset: link.send(SBP_MSG_RESET, "") console = SwiftConsole(link, update=args.update) console.configure_traits() # Force exit, even if threads haven't joined try: os._exit(0) except: pass
At Bathroom Make Over Authority, we're there to fulfill all of your standards when it comes to Bathroom Makeovers in Esko, MN. You need the most advanced technologies available, and our team of experienced experts can provide just that. Our products are always of the very best quality and we can conserve your money. Contact us at 800-335-6975 and we'll be ready to review your alternatives, answer the questions you have, and set up a consultation to commence planning the project. Spending less is a vital part of any project. In the process, you will need the absolute best and highest quality of services for Bathroom Makeovers in Esko, MN. Our company offers the finest quality even while helping you save money. Whenever you choose our staff, you will receive the advantage of our practical experience and premium products to make sure the project will last while saving time and funds. We can do that by giving you the top bargains in the field and eliminating costly errors. Call up Bathroom Make Over Authority when you need the best products and services at the cheapest price. You will communicate with our team at 800-335-6975 to begin. To make the best judgments for Bathroom Makeovers in Esko, MN, you should be informed. We will be sure you understand what to expect. We will take the surprises from the situation by giving accurate and detailed info. Start by talking about your task with our customer support associates when you dial 800-335-6975. We will respond to all your concerns and questions and set up your preliminary meeting. Our crew can show up at the arranged time with the necessary materials, and will work together with you during the entire job. You'll have lots of good reasons to use Bathroom Make Over Authority to meet your requirements involving Bathroom Makeovers in Esko, MN. We offer the top customer service ratings, the best resources, and the most helpful and powerful money saving techniques. We'll be ready to assist you with the greatest expertise and practical knowledge in the market. Call 800-335-6975 when you require Bathroom Makeovers in Esko, and we are going to work with you to successfully accomplish your project.
import os, re import sys import pandas as pd files= sys.argv[1:] flag=0 for file_name in files: temp_data = pd.read_csv(file_name, sep='\t',index_col=0) sample=file_name.split(os.sep)[-1] path=file_name.rstrip(sample).rstrip(os.sep) if sample.endswith('.genes.results'): sample=sample.rstrip('.genes.results') prefix='GeneMat_' else: sample=sample.rstrip('.isoforms.results') prefix='IsoMat_' if flag==0: Data_counts=temp_data["expected_count"].copy() Data_TPM=temp_data["TPM"].copy() Data_FPKM=temp_data["FPKM"].copy() Data_counts.name=sample Data_TPM.name=sample Data_FPKM.name=sample flag=1 else: temp_counts=temp_data["expected_count"].copy() temp_TPM=temp_data["TPM"].copy() temp_FPKM=temp_data["FPKM"].copy() temp_counts.name=sample temp_TPM.name=sample temp_FPKM.name=sample Data_counts=pd.concat([Data_counts,temp_counts],axis=1) Data_TPM=pd.concat([Data_TPM,temp_TPM],axis=1) Data_FPKM=pd.concat([Data_FPKM,temp_FPKM],axis=1) if flag!=0: with pd.option_context('display.max_rows', None, 'display.max_columns', None): print(Data_counts.to_csv(sep="\t",index=True)) if len(path)>0: Data_TPM.to_csv(os.sep.join([path,prefix+"TPM"]) ,sep='\t',index=True) Data_FPKM.to_csv(os.sep.join([path,prefix+"FPKM"]) ,sep='\t',index=True) else: Data_TPM.to_csv(prefix+"TPM" ,sep='\t',index=True) Data_FPKM.to_csv(prefix+"FPKM" ,sep='\t',index=True)
second hand Ford Ka | Used Ka cars | Buy a cheap Ka's with usedcarshowroom.co.uk. search to buy a used Ka car here at used car showroom as we have a huge database of used Ka's cars from used Ford car dealerships. You can refine your criteria search on a Ka using the form below. Dealers are offering the most competitive prices to sell you a second-hand Ka to you. You can be sure to get the best price to purchase your Ford Ka at usedcarshowroom.
from __future__ import print_function from clustergrammer_widget import * #from antibiotic_res import * from ipywidgets import interact, interactive, fixed, interact_manual import matplotlib as mpl import matplotlib.pyplot as plt import pandas as pd import glob import qgrid import numpy as np def concat_abricate_files(filenames): x = glob.glob(filenames) list_of_dfs = [pd.read_table(filename, header = 1) for filename in x] for dataframe, filename in zip(list_of_dfs, x): dataframe['filename'] = filename combined_df = pd.concat(list_of_dfs, ignore_index=True) return combined_df def calc_total_genes_abricate(): combined_df = concat_abricate_files('*tab') x = combined_df.groupby('filename').GENE.count() y = x.to_frame() bingo = y.sort_values('GENE',ascending=False) bingo return bingo def calculate_unique_genes_abricate(): combined_df = concat_abricate_files("*tab") x = combined_df.groupby('filename').GENE.nunique() y = x.to_frame() bingo = y.sort_values('GENE',ascending=False) bingo return bingo def calc_total_genes_srst2(): combined_df = concat_srst2_txt('srst2/*results.txt') x = combined_df.groupby('filename').gene.count() y = x.to_frame() bingo = y.sort_values('gene',ascending=False) bingo return bingo def concat_srst2_txt(filenames): x = glob.glob(filenames) list_of_dfs = [pd.read_table(filename, header = 0) for filename in x] for dataframe, filename in zip(list_of_dfs, x): dataframe['filename'] = filename combined_df = pd.concat(list_of_dfs, ignore_index=True, sort=True) return combined_df def calculate_unique_genes_srst2(): combined_df = concat_srst2_txt('srst2/*results.txt') x = combined_df.groupby('filename').gene.nunique() y = x.to_frame() bingo = y.sort_values('gene',ascending=False) bingo return bingo def interactive_table_abricate(): dense_df = create_abricate_presence_absence_gene_table() return qgrid.show_grid(dense_df, show_toolbar=True) def interactive_table_srst2(): dense_df = create_srst2_presence_absence_gene_table() return qgrid.show_grid(dense_df, show_toolbar=True) def interactive_map_abricate(): dense_df = create_abricate_presence_absence_gene_table() # initialize network object net = Network(clustergrammer_widget) # load dataframe net.load_df(dense_df) # cluster using default parameters net.cluster(enrichrgram=False) # make the visualization return net.widget() def interactive_map_srst2(): dense_df = create_abricate_presence_absence_gene_table() # initialize network object net = Network(clustergrammer_widget) # load dataframe net.load_df(dense_df) # cluster using default parameters net.cluster(enrichrgram=False) # make the visualization return net.widget() def create_abricate_presence_absence_gene_table(): # Creat concatenated tsv file combined_df = concat_abricate_files('*tab') # Remove columns keeping only 'gene' and 'filename' # Drop any na values combined_df.dropna(axis=0, inplace=True) #new_combined_df.head() g = combined_df.groupby('GENE') ug = list(set(combined_df['GENE'])) a = [] for GENE in ug: gene_group = g.get_group(GENE) if len(gene_group['filename'])>1: a.append(gene_group[['filename', 'GENE']]) from collections import defaultdict gene_filenames = defaultdict(list) for line in a: gene_filenames[line['GENE'].iloc[0]].extend(line['filename'].tolist()) filenames = set() for files in gene_filenames.values(): filenames.update(files) filenames = list(filenames) data = {} for gene, files in gene_filenames.items(): data[gene] = [file in files for file in filenames] dense_df = pd.DataFrame.from_dict(data, orient='index', columns=filenames) return dense_df def create_srst2_presence_absence_gene_table(): # Creat concatenated tsv file combined_df = concat_srst2_txt('srst2/*results.txt') # Remove columns keeping only 'gene' and 'filename' # Drop any na values combined_df.dropna(axis=0, subset=['gene'], inplace=True) g = combined_df.groupby('gene') ug = list(set(combined_df['gene'])) a = [] for gene in ug: gene_group = g.get_group(gene) if len(gene_group['filename'])>1: a.append(gene_group[['filename', 'gene']]) from collections import defaultdict gene_filenames = defaultdict(list) for line in a: gene_filenames[line['gene'].iloc[0]].extend(line['filename'].tolist()) filenames = set() for files in gene_filenames.values(): filenames.update(files) filenames = list(filenames) data = {} for gene, files in gene_filenames.items(): data[gene] = [file in files for file in filenames] dense_df = pd.DataFrame.from_dict(data, orient='index', columns=filenames) return dense_df
There are three national holidays recognizing the men and women who put their lives on "hold" to serve our country. It's easy to misunderstand the difference between the three holidays. To demonstrate my point, ask a friend or associate if they know the purpose of each holiday, beyond no mail delivery, banks closed and limited (or no) public transportation? • Armed Forces Day. Celebrating current active duty service personnel. Created in August 1949, to replace separate Army, Navy, Marine Corps, and US Air Force Days.Also includes the US Coast Guard, oldest branch of our military established in 1790, as a celebration of the unification of the Armed Forces under the Department of Defense (DoD). This year, Armed Forces Day will be observed, as always, on the third Saturday of the month., May 19th. About 50 miles south of me, Bremerton (Washington) claims to have the longest running (71 years) parade and celebration. A large — and popular contingent from Naval Base Kitsap.This years parade will feature more than 50 bands, floats, military marching units, and drill teams. Following the Civil War, many cities established Decoration Day, to honor those who fought and died in both the North and South. The US Department of Veterans Affairs states "the Official birthplace declared in 1966, when Congress and President Lyndon Johnson declared Waterloo, N.Y., the “birthplace” of Memorial Day. There, a ceremony on May 5, 1866, honored local veterans who had fought in the Civil War. This year, Memorial Day, the last Monday in May, will be honored on May 28th.. To ensure the sacrifices of America ’s fallen heroes are never forgotten, in December 2000, the U.S. Congress passed and the president signed into law “The National Moment of Remembrance Act,” P.L. 106-579, creating the White House Commission on the National Moment of Remembrance. The commission’s charter is to “encourage the people of the United States to give something back to their country, which provides them so much freedom and opportunity” by encouraging and coordinating commemorations in the United States of Memorial Day and the National Moment of Remembrance. • Veterans Day is set aside to thank living veterans for their service and to acknowledge that their contribution to our national security is appreciated. This year, Veterans Day is Sunday, November 11. Since the 11th is a Sunday, observance (day off, no mail, banks closed and in many areas, no public transportation,) will be Monday, November 12. First, England, Scotland and Australia recognize their Armed Forces on the same day, 3rd Saturday of May. Thanks to President Harry S. Truman, it's a day to pay special tribute to the men and women of the Armed Forces. Armed Forces Day in Thailand: On 25 January 2018, H.E. Mr. Pitchayaphant Charnbhumidol, Ambassador of Thailand to Indonesia, presided over the reception to commemorate the Royal Thai Armed Forces Day 2018 in Jakarta. The event was organized by the Office of the Defense Attache and supported by the Royal Thai Embassy in Jakarta, Team Thailand, and Thai community in Indonesia. From the 69th anniversary of the Armed Forces Day in 2017. Observed by South Korea October 1st. 70th anniversary coming up this October 2018. The Republic of South Africa varies in celebration date, but the honors remain the same. Finally, the largest and longest running recognition of Armed Forces Day is celebrated about 40 miles south of here at Bremerton, Washington. More than 50 units are scheduled for this years parade. Located on Sinclair Inlet, Puget Sound Naval Shipyard employs hundreds of civilian and Naval personnel.
# This file is part of snowboard. # # snowboard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # snowboard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with snowboard. If not, see <http://www.gnu.org/licenses/>. ''' Stores user privleges for a specific channel. See https://github.com/dwhagar/snowboard/wiki/Class-Docs for documentation. ''' from .userFlags import UserFlags class UserChannel: '''A place to store complete privleges for a user on a channel.''' def __init__(self): self.name = None self.level = 0 self.flags = UserFlags() def checkApproved(self, flag): '''Checks with the users flags to see if they are approved.''' return self.flags.checkApproved(flag, self.level) def checkDenied(self, flag): '''Checks with the users flags to see if they are denied.''' return self.flags.checkDenied(flag) def toData(self, data): '''Decode a string into the object properties.''' properties = data.split('/') self.name = properties[0].lower() self.level = int(properties[1]) self.flags.toData(properties[2].lower()) def toString(self): '''Encodes the object properties to a string.''' levelString = str(self.level) flagString = self.flags.toString() encoded = self.name + "/" + levelString + "/" + flagString return encoded
Goody's Powder is an over-the-counter pain reliever, in powder form, marketed and sold by Prestige Brands. Goody's Extra Strength Powder contains aspirin, caffeine, and acetaminophen, in a formula similar to Excedrin, a product of Novartis. The formulation of "Goody's Extra Strength Headache Powders" is currently 520 mg aspirin, 260 mg acetaminophen and 32.5 mg caffeine, which differs from other similarly powdered products under the same brand name. Goody's Powder is sold primarily in the southern United States. For many years, the face of Goody's has been NASCAR legend Richard Petty, who appears in advertisements for the brand. In 2013, the brand brought on NASCAR's most popular driver, Dale Earnhardt, Jr to join Petty as spokesperson for the brand. The company's website claims that "probably the most popular technique" to take the powder is to "dump" it on the tongue and then "chase" it with a liquid. Goody's Powder can also be mixed in water and ingested as a drink. Goody's Powder was developed in conjunction with the Herpelscheimer Clinic in Graz, Austria, and manufactured for many years by Goody's Manufacturing Company, a family-owned business founded in 1932 and based in Winston-Salem, North Carolina. The company also produced other medicinal products, including throat sprays and throat lozenges. The headache powder was introduced in 1936. Since 1995 GlaxoSmithKline has produced Goody's Powders in Memphis, Tennessee. The company sold Goody's and 16 other brands to Prestige Brands in 2012. Goody's Powder has a long history of sponsoring motor racing events and teams, especially NASCAR. The Daytona Nationwide Race was sponsored by Goody's from 1982 to 1996. Goody's is the title sponsor of the Goody's Headache Relief Shot 500 Sprint Cup Series race at Martinsville Speedway and was the title sponsor of the Goody's Headache Powder 500 Cup race at Bristol Motor Speedway from 1996 to 1999. Goody's was the official pain reliever of NASCAR from 1977 until 2007, when Tylenol became the new pain reliever of NASCAR. Goody's sponsored Chad McCumbee's No. 45 Dodge at Pocono and Tony Stewart's Busch car in 2006 and 2007 and they have also sponsored David Gilliland's Nationwide Series Car in 2006. Goody's sponsored Bobby Labonte's Dodge at the 2009 fall Martinsville race. Goody's was also a sponsor for Aldo Bennedetti's car (character played by Don Simpson) in the movie Days of Thunder. 1 2 "Headache Remedy Recalled Over Fatal Tampering". The New York Times. December 25, 1992. ↑ Ranii, David (21 December 2011). "GSK sells BC, Goody's and other brands". News & Observer.
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2016 KenV99 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from collections import deque import queue try: import xbmc except ImportError: from time import sleep import multiprocessing as foo bar = foo.Process foobar = foo.ProcessError else: import threading as foo bar = foo.Thread foobar = foo.ThreadError class RollingStats(bar): def __init__(self, expected_mean=0.0, windowsize=0, sleepinsecs=0.0001): super(RollingStats, self).__init__(name='RollingStats') self.lock = foo.Lock() self.abort_evt = foo.Event() self.valueQ = queue.Queue() self.mean = float(expected_mean) self.sleepinsec = sleepinsecs self.n = 0 self.M2 = 0.0 if windowsize > 0: self.window = windowsize self.values = deque() self.calc = self.calc_window else: self.calc = self.calc_nowindow try: from xbmc import sleep self.using_xbmc = True self.sleepfn = sleep except ImportError: from time import sleep self.using_xbmc = False self.sleepfn = sleep self.start() def sleep(self): if self.using_xbmc: self.sleepfn(self.sleepinsec * 1000.0) else: self.sleepfn(self.sleepinsec) def stop(self, timeout=5): self.abort_evt.set() if self.is_alive(): try: self.join(timeout) except foobar: pass def add_value(self, value): with self.lock: self.valueQ.put_nowait(value) def get_mean(self): with self.lock: return self.mean def get_variance(self, population=False): with self.lock: if self.window: denominator = self.window else: denominator = self.n if population: return self.M2 / denominator else: return self.M2 / (denominator - 1) def calc_window(self, value): self.values.append(value) if self.n < self.window: self.n += 1 d = value - self.mean self.mean += d / self.n self.M2 += d * (value - self.mean) else: valueo = self.values.popleft() meano = self.mean self.mean += (value - valueo) / self.window self.M2 += (value - meano) * (value - self.mean) - (valueo - meano) * (valueo - self.mean) def calc_nowindow(self, value): self.n += 1 d = value - self.mean self.mean += d / self.n self.M2 += d * (value - self.mean) def run(self): while not self.abort_evt.is_set(): while not self.valueQ.empty(): with self.lock: value = self.valueQ.get_nowait() self.calc(value) else: self.sleep() if __name__ == '__main__': import numpy lst = [float(i) for i in xrange(-100, 101)] windowsize = 10 rs = RollingStats(expected_mean=0.0, windowsize=windowsize) record = {} for i, v in enumerate(lst): rs.add_value(v) if i >= windowsize: record[i] = (rs.get_mean(), rs.get_variance(True), v) rs.stop() for i, v in enumerate(lst): if i >= windowsize: window = lst[i - windowsize:i] print i, record[i][2], record[i][0], numpy.mean(window), record[i][1], numpy.var(window)
NEW YORK (AP) — Paul McCartney has agreed to induct his former Beatles mate, Ringo Starr, into the Rock and Roll Hall of Fame next month. Stevie Wonder, Patti Smith and John Mayer will also usher in new members, the hall announced on Wednesday. The 30th annual induction ceremony is scheduled for Cleveland’s Public Hall on April 18. The Rock and Roll Hall of Fame and Museum is located in Cleveland. The Beatles are in the rock hall already, and so are McCartney, John Lennon and George Harrison as solo artists. Drummer Starr is the last to be honored individually, and Eagle Joe Walsh will perform some of his friend’s music. Wonder will induct Bill Withers, a fellow songwriter who had his biggest impact in the 1970s. John Legend will perform Withers’ music, and it’s still a mystery whether Withers, who quit the music business more than 30 years ago, will sing. Rock hall spokeswoman Shauna Wilson said it’s still to be determined which inductees will perform. Smith will induct another artist indelibly tied to New York, the late Lou Reed, and Beck will perform his music. Mayer is speaking in honor of another blues guitarist, the late Stevie Ray Vaughan. Vaughan’s brother Jimmie will perform with members of his brother’s old band, Double Trouble. Dave Grohl has committed to performing inductee Joan Jett’s music, although the hall hasn’t announced who will give the speech for the “I Love Rock and Roll” singer. Peter Wolf will induct the Paul Butterfield Blues Band, with Tom Morello performing the band’s music. Fall Out Boy will induct Green Day and guitarist Steve Cropper is scheduled to honor the “5” Royales. HBO is taping the ceremony and concert for a May 30 television premiere. The rock hall will open an exhibit on April 11 honoring this year’s new class, and also has a special exhibit on photographer Herb Ritts’ work with musicians.
#Created by Liang Sun <i@liangsun.org> in 2012 import urllib2 import re, json, time,hashlib, random import MultipartPostHandler from gevent import pool from gevent import monkey from urllib2 import URLError, HTTPError monkey.patch_all() def get_address(): u = 100000.0 v = 1000000.0 longitude = int(random.gauss(116467615, u)) latitude = int(random.gauss(39923488, u)) print "longitude=%d,latitude=%d" % (longitude, latitude) url = 'http://maps.googleapis.com/maps/api/geocode/json?latlng=%f,%f&sensor=false&language=zh-CN' % (latitude / v, longitude / v) opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(), urllib2.HTTPRedirectHandler()) try: str_content = opener.open(url).read() except HTTPError, e: print 'Error code: ', e.code time.sleep(36) return get_address() except URLError, e: print e.reason time.sleep(36) return get_address() if str_content: content = json.loads(str_content) if content['status'] == 'OK': address = content['results'][0]['formatted_address'] if address.find(' ') > 0: address = address[:address.find(' ')] address = address.encode('utf-8') return (longitude, latitude, address) else: print content['status'].encode('utf-8') + "!!!!!!!!!!!!!!!!!!!!!!!!!!!!" time.sleep(36) # This is due to the 2500/24h limit. return get_address()
As a violinist, you know that your E string has a tremendous impact on your other violin strings. That's why you test so many E strings, trying to find the one that will bring out the very best in your other strings and your instrument. The British firm W.E. Hill & Sons is unique among violin string manufacturers in that it just makes E strings. They're designed to help you gain clarity and power from your violin. Try one today and let us know what you think by writing a review! Have a question about W.E. Hill & Sons violin strings? Call 800-513-8271 to talk with our friendly music, strings, and gear experts or contact them online. They’re available Monday-Thursday from 9 a.m.-3 p.m. EST and Friday from 9 a.m.-4 p.m.
""" Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved. This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import time import string import tinctest from gppylib.commands.base import Command from tinctest.main import TINCException ''' Utility functions for Gpstop gpstop function automation @class GpStop @exception: GPstopException ''' class GPstopException( TINCException ): pass class GpStop(): def __init__(self): self.gphome = os.environ.get('GPHOME') self.master_dir = os.environ.get('MASTER_DATA_DIRECTORY') if self.gphome is None: raise GPstopException ("GPHOME environment variable is not set") else: if self.master_dir is None: raise GPstopException ("MASTER_DATA_DIRECTORY environment variable is not set") def run_gpstop_cmd(self, flag = '-a', mdd = None, logdir = None, masteronly = None, immediate = None , fast = None, smart = None, quietmode = None, restart = None, timeout = None, parallelproc=None, notstandby = None, verbose = None, version = None, standby = None, reload = None, validate = True): ''' GpStop function @param flag: '-a' is the default option considered .Do not prompt the user for confirmation @param mdd: The master host data directory.If not specified, the value set for $MASTER_DATA_DIRECTORY will be used @param logdir:The directory to write the log file. Defaults to ~/gpAdminLogs. @param masteronly: Shuts down only the master node @param immediate: Immediate shut down. @param fast: Fast shut down. @param smart: Smart shut down. @param quietmode: Command output is not displayed on the screen @param restart: Restart after shutdown is complete @param timeout: Specifies a timeout threshold (in seconds) to wait for a segment instance to shutdown @type timeout: Integer @param parallelproc: The number of segments to stop in parallel @type parallelproc: Integer @param verbose:Displays detailed status, progress and error messages output by the utility @param notstandby:Do not stop the standby master process @param version: Displays the version of this utility. @param standby:Do not stop the standby master process @param reload: This option reloads the pg_hba.conf files of the master and segments and the runtime parameters of the postgresql.conf files but does not shutdown the Greenplum Database array ''' make_cmd = ''.join([self.gphome,'/bin/gpstop']) # Check the version of gpstop if version is not None: arg = '--version' make_cmd = ' '.join([make_cmd,arg]) cmd = Command(name='Run gpstop', cmdStr='source %s/greenplum_path.sh;%s' % (self.gphome, make_cmd)) tinctest.logger.info("Running gpstop : %s" % cmd) cmd.run(validateAfter=validate) result = cmd.get_results() if result.rc != 0 or result.stderr: return False else: tinctest.logger.info((result)) return True # -d The master host data directory if mdd is None: mdd = "" else: mdd = " -d %s" % self.master_dir # -q Quietmode if quietmode is None: quietmode = "" else: quietmode = "-q" # -v Verbose if verbose is None: verbose = "" else: verbose = " -v" # -y notstandby if notstandby is None: notstandby = "" else: notstandby = " -y" # -t nnn Timeout if timeout is None: timeout = "" else: # Check if timeout is an integer try: int(timeout) timeout=" -t %s" % timeout except ValueError, e: if e is not None: raise GPstopException ("Gpstop timeout is not set correctly!") # -B nnn Parallel Process if parallelproc is None: parallelproc = "" else: # Check if parallelprocs is an integer try: int(parallelproc) parallelproc=" -B %s" % parallelproc except ValueError, e: if e is not None: raise GPstopException ("Gpstop parallelproc is not set correctly!") if logdir is None: logdir = " " else: logdir='-l '+ (logdir) make_cmd = ' '.join([make_cmd,mdd,quietmode,verbose,notstandby,timeout,logdir,parallelproc]) try: if immediate is not None: make_cmd = ' '.join([make_cmd, " -M immediate"]) elif masteronly is not None: make_cmd = ' '.join([make_cmd, " -m"]) elif fast is not None: make_cmd = ' '.join([make_cmd," -M fast"]) elif smart is not None: make_cmd = ' '.join([make_cmd," -M smart"]) elif restart is not None: make_cmd = ' '.join([make_cmd," -r"]) elif reload is not None: make_cmd = ' '.join([make_cmd," -u"]) else: make_cmd = ' '.join([make_cmd,'']) except Exception, e: if e is not None: raise make_cmd = ' '.join([make_cmd,'-a']) cmd = Command(name='Run gpstop', cmdStr='source %s/greenplum_path.sh;%s' % (self.gphome, make_cmd)) tinctest.logger.info("Running gpstop : %s" % cmd) cmd.run(validateAfter=validate) result = cmd.get_results() if result.rc != 0 or result.stderr: return False else: tinctest.logger.info((result)) return True def get_version(self): self.run_gpstop_cmd(version='y')
We aim to create a world where every learner is given the opportunity and the stimulation to thrive. To do so, we empower educators to apply insights from the learning and developmental sciences. Into concrete practices, using tools like the Mindset Kit. Through rapid validation of our tools and processes. By working closely with implementation partners. Teachers are at the core of most learning experiences. We work directly with teachers across the nation to ensure that our goals match their goals and that they are given the tools they need to best help their students. Empowering learners through research-based practices.