text
stringlengths
29
850k
import os import tempfile import unittest import logging from pyidf import ValidationLevel import pyidf from pyidf.idf import IDF from pyidf.condenser_equipment_and_heat_exchangers import CoolingTowerVariableSpeed log = logging.getLogger(__name__) class TestCoolingTowerVariableSpeed(unittest.TestCase): def setUp(self): self.fd, self.path = tempfile.mkstemp() def tearDown(self): os.remove(self.path) def test_create_coolingtowervariablespeed(self): pyidf.validation_level = ValidationLevel.error obj = CoolingTowerVariableSpeed() # alpha var_name = "Name" obj.name = var_name # node var_water_inlet_node_name = "node|Water Inlet Node Name" obj.water_inlet_node_name = var_water_inlet_node_name # node var_water_outlet_node_name = "node|Water Outlet Node Name" obj.water_outlet_node_name = var_water_outlet_node_name # alpha var_model_type = "CoolToolsCrossFlow" obj.model_type = var_model_type # object-list var_model_coefficient_name = "object-list|Model Coefficient Name" obj.model_coefficient_name = var_model_coefficient_name # real var_design_inlet_air_wetbulb_temperature = 20.0 obj.design_inlet_air_wetbulb_temperature = var_design_inlet_air_wetbulb_temperature # real var_design_approach_temperature = 0.0001 obj.design_approach_temperature = var_design_approach_temperature # real var_design_range_temperature = 0.0001 obj.design_range_temperature = var_design_range_temperature # real var_design_water_flow_rate = 0.0001 obj.design_water_flow_rate = var_design_water_flow_rate # real var_design_air_flow_rate = 0.0001 obj.design_air_flow_rate = var_design_air_flow_rate # real var_design_fan_power = 0.0001 obj.design_fan_power = var_design_fan_power # object-list var_fan_power_ratio_function_of_air_flow_rate_ratio_curve_name = "object-list|Fan Power Ratio Function of Air Flow Rate Ratio Curve Name" obj.fan_power_ratio_function_of_air_flow_rate_ratio_curve_name = var_fan_power_ratio_function_of_air_flow_rate_ratio_curve_name # real var_minimum_air_flow_rate_ratio = 0.35 obj.minimum_air_flow_rate_ratio = var_minimum_air_flow_rate_ratio # real var_fraction_of_tower_capacity_in_free_convection_regime = 0.1 obj.fraction_of_tower_capacity_in_free_convection_regime = var_fraction_of_tower_capacity_in_free_convection_regime # real var_basin_heater_capacity = 0.0 obj.basin_heater_capacity = var_basin_heater_capacity # real var_basin_heater_setpoint_temperature = 2.0 obj.basin_heater_setpoint_temperature = var_basin_heater_setpoint_temperature # object-list var_basin_heater_operating_schedule_name = "object-list|Basin Heater Operating Schedule Name" obj.basin_heater_operating_schedule_name = var_basin_heater_operating_schedule_name # alpha var_evaporation_loss_mode = "LossFactor" obj.evaporation_loss_mode = var_evaporation_loss_mode # real var_evaporation_loss_factor = 19.19 obj.evaporation_loss_factor = var_evaporation_loss_factor # real var_drift_loss_percent = 20.2 obj.drift_loss_percent = var_drift_loss_percent # alpha var_blowdown_calculation_mode = "ConcentrationRatio" obj.blowdown_calculation_mode = var_blowdown_calculation_mode # real var_blowdown_concentration_ratio = 2.0 obj.blowdown_concentration_ratio = var_blowdown_concentration_ratio # object-list var_blowdown_makeup_water_usage_schedule_name = "object-list|Blowdown Makeup Water Usage Schedule Name" obj.blowdown_makeup_water_usage_schedule_name = var_blowdown_makeup_water_usage_schedule_name # object-list var_supply_water_storage_tank_name = "object-list|Supply Water Storage Tank Name" obj.supply_water_storage_tank_name = var_supply_water_storage_tank_name # node var_outdoor_air_inlet_node_name = "node|Outdoor Air Inlet Node Name" obj.outdoor_air_inlet_node_name = var_outdoor_air_inlet_node_name # integer var_number_of_cells = 1 obj.number_of_cells = var_number_of_cells # alpha var_cell_control = "MinimalCell" obj.cell_control = var_cell_control # real var_cell_minimum_water_flow_rate_fraction = 0.50005 obj.cell_minimum_water_flow_rate_fraction = var_cell_minimum_water_flow_rate_fraction # real var_cell_maximum_water_flow_rate_fraction = 1.0 obj.cell_maximum_water_flow_rate_fraction = var_cell_maximum_water_flow_rate_fraction # real var_sizing_factor = 0.0001 obj.sizing_factor = var_sizing_factor idf = IDF() idf.add(obj) idf.save(self.path, check=False) with open(self.path, mode='r') as f: for line in f: log.debug(line.strip()) idf2 = IDF(self.path) self.assertEqual(idf2.coolingtowervariablespeeds[0].name, var_name) self.assertEqual(idf2.coolingtowervariablespeeds[0].water_inlet_node_name, var_water_inlet_node_name) self.assertEqual(idf2.coolingtowervariablespeeds[0].water_outlet_node_name, var_water_outlet_node_name) self.assertEqual(idf2.coolingtowervariablespeeds[0].model_type, var_model_type) self.assertEqual(idf2.coolingtowervariablespeeds[0].model_coefficient_name, var_model_coefficient_name) self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].design_inlet_air_wetbulb_temperature, var_design_inlet_air_wetbulb_temperature) self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].design_approach_temperature, var_design_approach_temperature) self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].design_range_temperature, var_design_range_temperature) self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].design_water_flow_rate, var_design_water_flow_rate) self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].design_air_flow_rate, var_design_air_flow_rate) self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].design_fan_power, var_design_fan_power) self.assertEqual(idf2.coolingtowervariablespeeds[0].fan_power_ratio_function_of_air_flow_rate_ratio_curve_name, var_fan_power_ratio_function_of_air_flow_rate_ratio_curve_name) self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].minimum_air_flow_rate_ratio, var_minimum_air_flow_rate_ratio) self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].fraction_of_tower_capacity_in_free_convection_regime, var_fraction_of_tower_capacity_in_free_convection_regime) self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].basin_heater_capacity, var_basin_heater_capacity) self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].basin_heater_setpoint_temperature, var_basin_heater_setpoint_temperature) self.assertEqual(idf2.coolingtowervariablespeeds[0].basin_heater_operating_schedule_name, var_basin_heater_operating_schedule_name) self.assertEqual(idf2.coolingtowervariablespeeds[0].evaporation_loss_mode, var_evaporation_loss_mode) self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].evaporation_loss_factor, var_evaporation_loss_factor) self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].drift_loss_percent, var_drift_loss_percent) self.assertEqual(idf2.coolingtowervariablespeeds[0].blowdown_calculation_mode, var_blowdown_calculation_mode) self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].blowdown_concentration_ratio, var_blowdown_concentration_ratio) self.assertEqual(idf2.coolingtowervariablespeeds[0].blowdown_makeup_water_usage_schedule_name, var_blowdown_makeup_water_usage_schedule_name) self.assertEqual(idf2.coolingtowervariablespeeds[0].supply_water_storage_tank_name, var_supply_water_storage_tank_name) self.assertEqual(idf2.coolingtowervariablespeeds[0].outdoor_air_inlet_node_name, var_outdoor_air_inlet_node_name) self.assertEqual(idf2.coolingtowervariablespeeds[0].number_of_cells, var_number_of_cells) self.assertEqual(idf2.coolingtowervariablespeeds[0].cell_control, var_cell_control) self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].cell_minimum_water_flow_rate_fraction, var_cell_minimum_water_flow_rate_fraction) self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].cell_maximum_water_flow_rate_fraction, var_cell_maximum_water_flow_rate_fraction) self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].sizing_factor, var_sizing_factor)
Every day we hear many dramatic stories of how brides to be are not satisfied with the dress that they purchased. The dress that they bought isn’t the same as the one in the picture, which brings tears just days before the WEDDING. This often results in a situation where brides have to be married in a dress they are not happy with. In BRIDAL Hire showrooms, which the bride to be shall select the dress of her dreams, it can often be damaged in the time up to the wedding. To buy? To Rent? Or to have a made to measure Wedding Dress? – that is the question! With having your DRESS made to measure, you get it made to the exact way you want! If the dressmaker , is not bridal dresses specialist, this can be a very risky choice. Each person has different expectations and interests on their special day, so they want to feel perfect for the day. To make the gown requires lengthy bridal dresses sewing years’ experience and the highest mastery. Expensive materials mastery, perfect communication with the client and nuances of a lot more knowledge, which is important for the Bridal to be. The final design of the dress must be perfect- because it may be too little time to fix it! Sometimes neither the designer or the bride doesn’t know what to do- this is because the bride doesn’t always know what she wants, as a seamstress take their initiatives and, in the end, we will all be shed a tear. If you’d like a made to measure dress, we recommend that you start to try already made to measure garments in the shop. So the more dresses you try on, you establish better understanding of what you like and what style will suit you. Also, so that you know what it is suitable shape for your Body. Another plus the for the bride, the dress will be exclusive, so she can take bits of style of each dress and make her own perfect dress. Are you dreaming of a complex, majestic, real princess dress? But meanwhile your finances are restricting you to rent? There is an excellent solution; a solution that respects your finances and style. Formal – Bridal dress rental is very popular only in Europe and less economic countries. The bride usually buys her dress though–a extravaganza giant wedding dress . Positive point, only one – a little more attractive price, and many negative points: limited styles, colors and sizes choice, dress will already have been worn many times, and who knows how it will look when it comes your wedding day, maybe it has been ruined many times and quick stichted has been applied, this may force you into buying a dress at the last minute. What about the quality of materials? What kind of dress materials has been used to make the dress last so long and what type of powerful cleaning has been applied? Perhaps, cleaned and washed dozens of times! Forget high quality and stunning materials such as silk, organza and high quality light lace. Shopping online gives you a range of unique and diverse colors, fabrics and sizes. You are not fully inserted into the Dressmaking opportunities and imagination, frames. You don’t need your family members traveling with you for Fittings , and listen suggestions what style , what fabric and color you have to pick. What to do with your dress later?- to leave to your future daughter? Daughter in Law? Sell? There are many options, even to pass onto someone and for them to give the dress a facelift. Fashion House, which producing many wedding dresses must have worked sewing technic’s for dresses, technique for designing , knows every detail, which will be very important for you, your Event , that make you feel safe very delightfully and conveniently. Negative point – a little bit higher costs (not always, will depend on how dress will look),but advantages you pick the dress one in million! Many “Brides to be” think that it’s better to order the new dress for the special day or to have something brand new made just for them for the Wedding day, but they don’t know what they are going to do with the dress after the wedding. The most common option is to: Dry clean the dress, line the box with Acid free tissue paper and store it in something suitable. This is usually quite expensive. Whatever option you choose, wishing that will find a dress that you love to wear and not the dress wear you….
import csv import time import math import sys import os def placeDataIntoArray(fileName): '''Read uploaded data into memory''' try: from numpy import * # provides complex math and array functions with open(fileName, mode='rbU') as csvfile: datareader = csv.reader(csvfile, delimiter=',', quotechar=' ') dataArray = array([row for row in datareader], dtype=float64, order='C') if (min(dataArray.shape) == 1): # flatten arrays of one row or column return dataArray.flatten(order='C') else: return dataArray except: print 'error in placeDataIntoArray(...)' def splitRawInput(sourceFileName, sessionid = "fakesession", cvSplit = .15, testSplit = .15): ''' Creates separate training, cross-validation and test set files from raw input set uploaded by the user. This uses the 70/15/15 rule, i.e. 70% of raw data is used for training, while 15% are used for both cross-validation and test sets. Note, this function assumes a well-formatted input file, TODO: document valid input file format Returns a list of the locations of these three files on disk relative to the qsar root application folder. The list of elements returned is in the following order [trainX, trainY, testX, testY, cvX, cvY] ''' try: from numpy import * # provides complex math and array functions # read raw data set dataArray = placeDataIntoArray(sourceFileName); static_location = os.path.join( os.getcwd(), 'qsar', 'static', 'qsar', 'uploads'); rawData = array( [e for e in dataArray], dtype=float64, order='C' ); # The last column of the data set is assumed to be the target (y) pIC50 values, # we separate this data from the rest of the observation sets using Numpy's array # slicing syntax cvData = rawData[ 0 : int(len(rawData) * cvSplit), : ] cv_pIC50 = cvData[ :, -1 ] cvData = cvData[ :, :-1 ] # update raw data set, i.e. filter cv elements that were extracted rawData = rawData[ int(len(rawData) * cvSplit) : , : ] testData = rawData[ 0 : int(len(rawData) * testSplit), : ] test_pIC50 = testData[ :, -1 ] testData = testData[ :, :-1 ] # use remaining elements for training data set trainData = rawData[ int(len(rawData) * testSplit) : , : ] train_pIC50 = trainData[ :, -1 ] trainData = trainData[ :, :-1 ] # write all files, any existing file of the same name is overwritten trainX = '\\'.join([static_location, '{0}_{1}_train.csv'.format(sessionid,time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())) ]); with file(trainX, 'wb+') as fileOut: fileW = csv.writer(fileOut) for i in range( 0, trainData.shape[0] ): row = trainData[i] fileW.writerow(row); trainY = '\\'.join([static_location, '{0}_{1}_train_pIC50.csv'.format(sessionid,time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())) ]); with file(trainY, 'wb+') as fileOut: fileW = csv.writer(fileOut) for i in range( 0, train_pIC50.shape[0] ): row = train_pIC50[i] fileW.writerow([row]); testX = '\\'.join([static_location, '{0}_{1}_test.csv'.format(sessionid,time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())) ]); with file(testX, 'wb+') as fileOut: fileW = csv.writer(fileOut) for i in range( 0, testData.shape[0] ): row = testData[i] fileW.writerow(row); testY = '\\'.join([static_location, '{0}_{1}_test_pIC50.csv'.format(sessionid,time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())) ]); with file(testY, 'wb+') as fileOut: fileW = csv.writer(fileOut) for i in range( 0, test_pIC50.shape[0] ): row = test_pIC50[i] fileW.writerow([row]); cvX = '\\'.join([static_location, '{0}_{1}_cv.csv'.format(sessionid,time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())) ]); with file(cvX, 'wb+') as fileOut: fileW = csv.writer(fileOut) for i in range( 0, cvData.shape[0] ): row = cvData[i] fileW.writerow(row); cvY = '\\'.join([static_location, '{0}_{1}_cv_pIC50.csv'.format(sessionid,time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())) ]); with file(cvY, 'wb+') as fileOut: fileW = csv.writer(fileOut) for i in range( 0, cv_pIC50.shape[0] ): row = cv_pIC50[i] fileW.writerow([row]); return [trainX, trainY, testX, testY, cvX, cvY] except: print 'error in splitRawInput( ... )' return []
Mopinion is dedicated to providing its customers with the best possible feedback analytics software on the market. In order to maintain this status, our team strives to make constant and valuable improvements to our platform. We do this using the feedback provided directly by our customers. That being said, were happy to announce the release of the seven exciting new updates, including new machine learning technology, new feedback form metrics, and much more.
from flask import Flask, render_template, request, jsonify, Response, abort, session, stream_with_context, redirect, g from ast import literal_eval import subprocess import re import requests import json import shutil import time import os import sqlite3 import logging import sys import commands import threading minigrade = Flask(__name__) PORT_NUMBER = 8000 # Put your own secret key here. You can't have mine! minigrade.secret_key = <KEY> urlmatch = re.compile('(?:git@|git://|https://)(?P<url>[\w@-]+\.[a-zA-Z]+[:/](?P<user>[a-zA-Z][a-zA-Z0-9-]+)/(?P<repo>.+))') SERVER_IP = 'localhost'#'128.143.136.170' logging.basicConfig(filename='grader.log',level=logging.DEBUG) benchmark_mutex = threading.Lock() def process_repo(repo): logging.debug('Processing repo: ' + repo) result = urlmatch.match(repo) if not result: return None giturl = "https://" + result.group('url') repository = result.group('repo') if repository[-4:] == ".git": repository = repository[:-4] logging.debug('Returning: ' + str(repository)) return (giturl, repository, result.group('user')) def sort_files_by_age(files): filedata = [(filename, os.lstat(filename).st_ctime) for filename in files] filedata = sorted(filedata, key = lambda x: x[1]) filedata = [filetuple[0] for filetuple in filedata] filedata = filter(lambda x: not os.path.isdir(x), filedata) return filedata def cap_logs(): result_files = os.listdir('.') if len(result_files) > 10: filedata = sort_files_by_age(result_files)[:len(result_files) - 10] for f in filedata: os.remove(f) def parse_httperf_output(output_str): dur = -1 avg_resp = -1 io = -1 err = -1 for line in output_str.split('\n'): # need test-duration(s), reply time(ms), Net I/O, errors output_line = line.rstrip() testduration = re.search(r'test-duration (\d+\.\d+) s', output_line) replytime = re.search(r'Reply time \[ms\]: response (\d+\.\d+) .*', output_line) netio = re.search(r'Net I/O: (\d+\.\d+) KB/s', output_line) errorcount = re.search(r'Errors: total (\d+)', output_line) if testduration: #print "Test duration: %f s\n" % float(testduration.group(1)) dur = float(testduration.group(1)) elif replytime: #print "Reply time: %f ms\n" % float(replytime.group(1)) avg_resp = float(replytime.group(1)) elif netio: #print "Net I/O: %f MB\n" % float(netio.group(1)) * dur / 1024 io = float(netio.group(1)) * dur / 1024 elif errorcount: #print "Error count: %d\n" % int(errorcount.group(1)) err = int(errorcount.group(1)) ''' print "Test duration: %f s" % dur print "Reply time: %f ms" % avg_response print "Net I/O: %f MB" % io print "Error count: %d" % err print "END HTTPERF\n" ''' return dur, avg_resp, io, err def grade_stream(assignment, repo): yield "retry: 300000\n" if 'email' not in session: yield "data: inv: Please log in before running the autograder.\n\n" raise StopIteration #session['email'] = "wx4ed@virginia.edu" build = None tests = [] repo_name = "NotADirectory" cwd = os.getcwd() try: with open("tests/{}.test".format(assignment)) as testfile: for idnum, testcase in enumerate(testfile): test = literal_eval(' '.join(testcase.split(' ')[1:])) if testcase.split(' ')[0] == "build": build = test else: tests.append(test) yield "data: tn: {} {}\n\n".format(test['name'], idnum) except: print "No test file for '{}'".format(assignment) yield "data: inv: Error: No valid test file for {}\n\n".format(assignment) raise StopIteration try: yield "data inv: Grading {} from {}...\n\n".format(assignment, repo) logging.debug("Grading " + assignment + " from: " + repo); os.chdir("results/{}".format(assignment)) if not os.path.isdir(session['email']): os.mkdir(session['email']) os.chdir(session['email']) cap_logs() result_files = sort_files_by_age(os.listdir('.')) result_files.reverse() # review the past results for f in result_files: yield "data: nextpast\n\n" with open(f) as result: for line in result: yield "data: past: {}\n\n".format(line) # start cloning the repository # just skip it in ps3 if assignment == "PS3": # ps3 remote benchmark httperf_req_list_file_path = os.path.join(cwd, "tests/zhtta-test-NUL.txt") cmd = "httperf --server %s --port 4414 --rate 10 --num-conns 60 --wlog=y,%s" % (repo, httperf_req_list_file_path) # actually IP address #cmd = "ping -c 2 %s" % repo yield "data: raw: Queuing for benchmark, please wait...\n\n" benchmark_mutex.acquire() logging.debug("Benchmark starts, please wait..."); yield "data: raw: Benchmark starts, please wait...\n\n" import commands yield "data: raw: {}\n\n".format(cmd) ret_text = commands.getoutput(cmd) benchmark_mutex.release() for line in ret_text.split('\n'): yield "data: raw: {}\n\n".format(line) (dur, avg_resp, io, err) = parse_httperf_output(ret_text) with open(str(time.time())+".result", 'w') as results: results.write("Duration: %d s\n\n" % (dur)) results.write("Average Response Time: %d ms\n\n" % avg_resp) results.write("IO: %dMB\n\n" % (io)) results.write("Errors: {}\n".format(err)) if dur != 1 and io > 280 and err == 0: yield "data: tr: Pass %d %ds\n\n" % (0, dur) yield "data: tr: Pass %d %dms\n\n" % (1, avg_resp) yield "data: tr: Pass %d %dMB\n\n" % (2, io) yield "data: tr: Pass %d %d errors\n\n" % (3, err) update_top_runs(session['email'], str(dur), str(avg_resp)) else: yield "data: tr: Fail %d %ds\n\n" % (0, dur) yield "data: tr: Fail %d %dms\n\n" % (1, avg_resp) yield "data: tr: Fail %d %dMB\n\n" % (2, io) yield "data: tr: Fail %d %d errors\n\n" % (3, err) #os.chdir(cwd) #yield "data: done\n\n" else: with open(str(time.time())+".result", 'w') as results: result = process_repo(repo) if not result: results.write("{} is not a valid git repository.\n".format(repo)) yield "data: inv: {} is not a valid git repository.\n\n".format(repo) raise StopIteration logging.debug("Processed repo..."); repo_url, repo_name, repo_user = result if os.path.isdir(repo_name): shutil.rmtree(repo_name) try: logging.debug("Cloning...") yield "data inv: Cloning github repository...\n\n" git = subprocess.check_output("git clone {}".format(repo_url).split(" "), stderr = subprocess.STDOUT) logging.debug("Finished cloning...") yield "data: raw: {}\n\n".format(git) except Exception as e: logging.debug("{} is not a valid repository, because we got {}\n".format(repo,e)) results.write("{} is not a valid repository, because we got {}\n".format(repo,e)) yield "data: inv: Error: {} is not a valid repository, because we got {}\n\n".format(repo,e) raise StopIteration logging.debug("Using repo {}.\n".format(repo)) results.write("Using repository {}.\n".format(repo)) os.chdir(repo_name) # copying files to testing dir... #yield "setting up files..." #shutil.copy("/home/grader/minigrade/tests/testfiles/abc.txt", "abc.txt") if build: logging.debug("Building...") success = re.compile(build['results']) commands = build['cmd'].split(";") for command in commands: yield "data: raw: {}\n\n".format(command) result = None try: result = subprocess.check_output(command, shell = True, stderr = subprocess.STDOUT) except: print "Error building" if result: for line in result.split('\n'): yield "data: raw: {}\n\n".format(line) else: yield "data: raw: Error running {}\n\n".format(command) if result and re.search(success, result): results.write("Build success\n") yield "data: tr: Pass 0\n\n" else: results.write("Build failed\n") yield "data: tr: Fail 0\n\n" yield "data: inv: Build failed!\n\n" raise StopIteration passed = 0 failed = 0 counter = 0 for idnum, test in enumerate(tests): counter += 1 yield "data: raw: {}\n\n".format(test["cmd"]) success = re.compile(test['results']) f = open("test_file{}".format(counter), 'w') temp="" for token in test['cmd'].split(';'): temp = temp + './gash -c "{}"\n'.format(token) print "{}: temp={}".format(counter, temp.rstrip()) f.write(temp.rstrip()) f.close() cwd = os.getcwd() print "cwd={}".format(cwd) for dep in test['dep']: print "dep={}".format(dep) print "typeof(dep)={}".format(type(dep)) shutil.copy("/home/grader/minigrade/tests/testfiles/{}".format(dep), dep) command = "/home/grader/minigrade/dockerscript.sh {} {} test_file{} output_file{}".format(cwd, cwd, counter, counter) print "{}: command={}".format(counter, command) returncode = subprocess.call(command, shell = True, stderr = subprocess.STDOUT) os.chdir(cwd) result ="" try: r = open('{}/output_file{}'.format(cwd,counter), 'r') result = ''.join(r.readlines()).rstrip() r.close() except: print "{}: couldn't open output_file{}".format(counter, counter) result="null" print "{}: test {}".format(session['email'], counter) print "returncode={}".format(returncode) # only print the first 10 lines to prevent spamming m = 0 for line in result.split('\n'): if m < 10: print "result from output_file{}={}".format(counter, line) yield "data: raw: {}\n\n".format(line) else: break m += 1 print "{}: done printing result".format(counter) if m >= 10: yield "data: raw: ...\n\n" if (returncode == 0) and re.match(success, result): results.write("Passed {}\n".format(test['name'])) passed += 1 yield "data: tr: Pass {}\n\n".format(idnum + 1) else: results.write("Failed {}\n".format(test['name'])) failed += 1 yield "data: tr: Fail {}\n\n".format(idnum + 1) results.write("Total pass: {}\n".format(passed)) results.write("Total fail: {}\n".format(failed)) finally: if os.path.isdir(repo_name): shutil.rmtree(repo_name) os.chdir(cwd) yield "data: done\n\n" @minigrade.route('/') def index(): with open("grade.html") as sub_page: return '\n'.join(sub_page.readlines()) @minigrade.route('/grade/') def grade(): assignment = request.args.get("assign", "NoneSuch") repo = request.args.get("repo", "NoneSuch") logging.debug("Grading " + assignment + ": " + repo) response = Response(stream_with_context(grade_stream(assignment, repo)), mimetype="text/event-stream") logging.debug("Finished grading " + repo + ": " + str(response)) return response @minigrade.route('/auth/login', methods=['POST', 'GET']) def login(): if request.method == "GET": return session['email'] if 'email' in session else "null" # The request has to have an assertion for us to verify if 'assertion' not in request.form: abort(400) # Send the assertion to Mozilla's verifier service. data = {'assertion': request.form['assertion'], 'audience': 'http://' + SERVER_IP + ':'+ str(PORT_NUMBER)} resp = requests.post('https://verifier.login.persona.org/verify', data=data, verify=True) # Did the verifier respond? if resp.ok: # Parse the response verification_data = json.loads(resp.content) # Check if the assertion was valid if verification_data['status'] == 'okay': # Log the user in by setting a secure session cookie session.update({'email': verification_data['email']}) logging.debug('Login as: ' + verification_data['email']) return "Logged in as %s" % verification_data['email'] logging.debug('Login failure: ' + str(resp)) # Oops, something failed. Abort. abort(500) @minigrade.route('/auth/logout', methods=['POST']) def logout(): session.pop('email', None) return redirect('/') # Server-side database methods ########## database_path = <PATH> @minigrade.teardown_appcontext def close_db(error): """Closes the database again at the end of the request.""" if hasattr(g, 'sqlite_db'): g.sqlite_db.close() if error: print("There was an error closing the database: {}".format(error)) def connect_db(): """Connects to the specific database.""" rv = sqlite3.connect(database_path) rv.row_factory = sqlite3.Row return rv def get_db(): """Opens a new database connection if there is none yet for the current application context. """ if not hasattr(g, 'sqlite_db'): g.sqlite_db = connect_db() return g.sqlite_db def init_db(): """Creates the database tables.""" with minigrade.app_context(): db = get_db() with minigrade.open_resource('schema.sql') as f: db.cursor().executescript(f.read()) db.commit() def query_db(query, args=(), one=False): """Returns a query to the database as a list""" cur = get_db().execute(query, args) rv = cur.fetchall() cur.close() get_db().commit() return (rv[0] if rv else None) if one else rv ############ # Leaderboard functions ################# leaderboard_path = <PATH> import random @minigrade.route('/leaderboard.html') def leaderboard(): with open("leaderboard.html") as sub_page: return '\n'.join(sub_page.readlines()) @minigrade.route('/leaders.data') def leaders(): with open("leaders.data") as sub_page: return '\n'.join(sub_page.readlines()) def update_top_runs(user, duration, response): ''' Run this to update the top runs with an entry of user-duration-response time entry''' q = query_db("SELECT * FROM topruns WHERE username=?", [user], one=True) if q is None: query_db("INSERT INTO topruns VALUES (?, ?, ?)", [user, str(duration), str(response)]) else: query_db("UPDATE topruns SET duration=?, response=? WHERE username=?", [str(duration), str(response), user]) # THIS LINE determines how many users are shown on the leaderboard. update_leaderboard(5) def get_top_runs(num): ''' Returns the top num runs in a list of 3xnum elements: the first is best duration/response time, the second is best duration, third is response time''' runs = query_db("SELECT * FROM topruns") data = [[],[],[]] runs.sort(key=heuristic) data[0] = runs[:num] runs.sort(key=lambda x: float(x[1])) data[1] = runs[:num] runs.sort(key=lambda x: float(x[2])) data[2] = runs[:num] return data def heuristic(run): '''returns a function of a weighing bewteen duration and response time''' tot_duration = float(run[1]) avg_response = float(run[2]) return tot_duration * avg_response def update_leaderboard(num): '''Updates the leaderboard with 'num' entries for webpages to see''' head = "<h2>Leaderboard</h2>" tbl_template=lambda x: ''' <h3>%s</h3> <table id="leaderboard-dr" style='width:100%%%%;border-spacing:10px'> <tr><th style="text-align:left">ID</th> <th style="text-align:left">Duration Time</th> <th style="text-align:left">Response Time</th> </tr> %%s </table> '''%x titles = ["Best duration/response time", "Best duration", "Best Response Time"] data = get_top_runs(num) fin = "" for i, title in enumerate(titles): tmp = tbl_template(title) row = "" for tup in data[i]: # should be (username, duration, response time) row += "<tr><td>{}</td><td>{}</td><td>{}</td></tr>".format(*tup) fin += tmp % row open(leaderboard_path, 'w').write(fin) #Only run in chroot jail. if __name__ == '__main__': print "running..." minigrade.run(host='0.0.0.0', debug=False, threaded=True, port=PORT_NUMBER) #minigrade.run(debug=True, threaded=True, port=9080)
Last week we explored the topic of grit, and how practice is a pivotal aspect of success. While practice in itself is important, the quality of practice is exceptionally important. Repetition is a key player in what makes practice to effective and is even helpful for work related tasks too like waking up at the same time every morning! In a recent TED Ed lesson, creators Annie Bosler and Don Green explain the basics of how practice affects the nervous system and how to get the most out of your practice sessions. Whether your focus is academic, athletic, or musical, practice is required to master any physical skill. As any elite performer can tell you, there is truth behind the idea that practice makes perfect (or at least gets you closer to it). However, the idea of practice leading to muscle memory may be misleading. Rather, Bosler and Green suggest that the “edge” elite athletes and performers have is due to more efficient neural pathways thanks to a substance called myelin. To understand how practice and repetition affect the brain, it is important to understand the basics of how movements are generated. Movements in the body begin with grey matter in the brain, where information is processed and signals are directed to nerve cells. White matter in the brain consists of mostly fatty tissue and nerve fibers. To create movement, information must travel from the grey matter, through the spinal cord and through axons (chains of nerve fibers) to reach the muscles. Axons are wrapped in a sheath of myelin, a fatty substance that acts like insulation on electrical cables by “preventing energy loss from electrical signals that the brain uses, moving them more efficiently along neural pathways.” These myelin layers are what change with practice. “Focus on the task at hand.” Put away or turn off anything that is potentially distracting. Especially phones, laptops, or television. “Start out slow or in slow-motion. Coordination is built with repetitions, whether correct or incorrect.” Practice the proper motions from the beginning to avoid developing bad habits, then increase speed gradually. Have “frequent repetitions with allotted breaks.” This is a habit of many elite performers and athletes, who break up their practice into many different sessions throughout the day. While our muscles may not actually have memory, these effective practice methods can help our bodies and brains push limits and reach new levels of mastery. The increased layers of myelin from focused, slow, repetitive movements increase one’s mastery of skills, proving that practice really does make perfect – or at least very efficient.
from .auth import UserBL, TokenBL from .feed import CategoryBL, CityBL, VacancyBL from .pages import PageBL, PageBlockBL, PageChunkBL from .mail import MailTemplateBL from .utils import registry from .uploads import UploadedImageBL __all__ = ["UserBL", "CategoryBL", "CityBL", "VacancyBL", "PageBL", "PageBlockBL", "PageChunkBL", 'TokenBL', "UploadedImageBL", ] def init_resource_registry(): registry['bl.category'] = lambda category: CategoryBL(category) registry['bl.vacancy'] = lambda vacancy: VacancyBL(vacancy) registry['bl.city'] = lambda city: CityBL(city) registry['bl.user'] = lambda user: UserBL(user) registry['bl.pagechunk'] = lambda pagechunk: PageChunkBL(pagechunk) registry['bl.pageblock'] = lambda pageblock: PageBlockBL(pageblock) registry['bl.page'] = lambda page: PageBL(page) registry['bl.token'] = lambda token: TokenBL(token) registry['bl.mailtemplate'] = lambda template: MailTemplateBL(template) registry['bl.uploadedimage'] = lambda template: UploadedImageBL(template)
The City of Palo Alto is allowing the placement, construction and modification of 120+ so-called "Small Cell" Cell towers in the public rights of way in Palo Alto’s residential zones, while the City of Palo Alto ignores substantial evidence and their non-prempted duties to regulate the operations of these so-called "Small Cell" Cell towers. The following study of 913 Northern California pregnant women (Kaiser Permante patients) — released online on 12/13/17, shows that the Palo Alto’s current Wireless Expansion plan in residential zones would result in hazards for pregnant women who, live, work in or visit the streets of Palo Alto — where magnetic field strength was measured at 5 mG on 4/21/17. Magnetic field (MF) non-ionizing radiation is widespread and everyone is exposed to some degree. This prospective cohort study of 913 pregnant women examined the association between high MF exposure and miscarriage risk. Cox (proportional hazards) regression was used to examine the association. After controlling for multiple other factors, women who were exposed to higher MF levels had 2.72 times the risk of miscarriage (hazard ratio = 2.72, 95% CI: 1.42–5.19) than those with lower MF exposure. The increased risk of miscarriage associated with high MF was consistently observed regardless of the sources of high MF. The association was much stronger if MF was measured on a typical day of participants’ pregnancies. The finding also demonstrated that accurate measurement of MF exposure is vital for examining MF health effects. Table 1: Characteristics of the Study Population by Daily Magnetic Field Exposure Level (Lowest or Higher Quartiles of MF 99th Percentile). Table 2: Exposure to High Magnetic Fields (MFs) During Pregnancy and the Risk of Miscarriage. Table 3: Exposure to High Magnetic Fields (MFs) During Pregnancy and the Risk of Miscarriage, Stratified by Number of Prior Miscarriages, MF Measured on Typical Days Only. Table 4: Exposure to High Magnetic Fields (MFs) During Pregnancy and the Risk of Miscarriage – Assessing Dose-Response, MF Measured on Typical Days Only. This prospective cohort study was approved by the Kaiser Permanente Northern California (KPNC) Institutional Review Board and conducted among KPNC’s pregnant members in the San Francisco Bay Area, all of whom provided informed consent. All participating pregnant carried an EMDEX Lite meter from Enertech Consultants Inc. for 24 hours during pregnancy. The EMDEX Lite meter is specifically designed to measure MF, which is measured in milligauss (mG). MF measurement conducted on a typical day: a day reflecting participants’ typical pattern of work and leisure activities during pregnancy. Excluded 31 subjects who failed to carry the meter as instructed. We also excluded 107 subjects who had incomplete (<90% of their 24-hour measurements) MF measurement data. Those exclusions were made without knowledge of subjects’ pregnancy outcomes. Those in the higher three quartiles (2.5 to 10.0 mG) were classified in the high MF exposure group. [This] NIEHS-funded study provides additional evidence that exposure to high MF levels [2.5 to 10.0 mG] in pregnancy is associated with increased risk of miscarriage. This finding is also supported by four other studies published during the past 15 years that examined the relationship between high MF exposure and the risk of miscarriage. . . . other studies examined the impact of EMF emitted from cell phones and wireless networks, and observed that more frequent cell phone use and close proximity to wireless base stations were both associated with an increased risk of miscarriage. In this study, we found an almost three-fold increased risk of miscarriage if a pregnant woman was exposed to higher MF levels compared to women with lower MF exposure. The screen of the HF-59B meter shows a peak RF/MW radiation reading of 38,000 µW/m², while standing across the street, about 75 feet away from the "Small Cell". Applying the correction factor for high-speed pulsed digital signals (explained here), the peak levels of 4G/LTE RF/MW radiation are actually over 380,000 µW/m². This is an extreme health hazard. faux-mailbox power supply exposes people to strong magnetic fields. to spray RF/MW radiation into the homes/businesses on Bryant Street. because the "mailbox" lacks warning signs, anyone could lean up against this cabinet in the public right of way.
from PIL import Image import multiprocessing import math sequence_function = lambda z_n, c : z_n ** 2 + c def is_in_cardoid_or_bulb(z): """Algorithm for the test: https://en.wikipedia.org/wiki/Mandelbrot_set#Optimizations """ p = math.sqrt((z.real - 1. / 4) ** 2 + z.imag ** 2) return z.real < p - 2 * (p ** 2) + 1. / 4 and \ ((z.real + 1) ** 2) + (z.imag ** 2) < 1. / 16 # def iterate_over_region(width, height, min_x, max_x, min_y, max_y): def iterate_over_region(args): """Compute the sequences on a given region. args is a 6-tuple composed as follows (width, height, min_x, max_x, min_y, max_y). It returns a 2 dimensionnal array of size width * height containing the number of occurences of a given pixel in the complex sequences. """ width, height, min_iter, max_iter, min_x, max_x, min_y, max_y = args complex_plane = [[0] * height for _ in range(width)] # For each pixel of the screen: for x in xrange(min_x, max_x): for y in xrange(min_y, max_y): # Compute the corresponding complex number. c = complex(((x * 3.) / width) - 2, ((y * 2.0) / height) - 1) # We check if p is in the cardoid or the bulb (which means # that it automatically belongs to the mandelbrot set. if is_in_cardoid_or_bulb(c): continue z = c # Creation of the set of complex number that we will use # to remember de complex number sequence. complex_sequence = set([]) # Compute at most max_iter terms of the complex number # sequence for i in xrange(max_iter): complex_sequence.add(z) z = sequence_function(z, c) # If |z| > 2, we are sure that the sequence diverges. if (z.real * z.real + z.imag * z.imag) > 4: if len(complex_sequence) <= min_iter: break complex_sequence.add(z) # For each diverging sequence, we increment the # counter corresponding to the pixel of the screen # through which it passed. for term in complex_sequence: pixel_x = math.floor(((term.real + 2) * width) / 3.) pixel_y = math.floor(((term.imag + 1) * height) / 2.) if 0 <= pixel_x < width and 0 <= pixel_y < height: complex_plane[int(pixel_x)][int(pixel_y)] += 1 break print "Computation for x in [", min_x, ",", max_x, "] DONE" return complex_plane def slice_screen(width, height, min_iter, max_iter, cpu_number, slice_per_cpu): """We cut the screen in cpu_number slices of width (width / cpu_number). If the number of cpu does not divide the width, the last slices will contain the remaining pixels """ screen_sections = [] slice_size = width / (cpu_number * slice_per_cpu) for i in range((cpu_number * slice_per_cpu) - 1): screen_sections.append((width, height, min_iter, max_iter, i * slice_size, (i + 1) * slice_size, 0, height)) screen_sections.append((width, height, min_iter, max_iter, i * slice_size, width, 0, height)) return screen_sections def fusion_results(width, height, results): """After the computation, we have to add the results of every different slice to get the final array. """ final_result = [[0] * height for _ in range(width)] for x in xrange(width): for y in xrange(height): final_result[x][y] = sum((slice[x][y] for slice in results)) return final_result def iterate_over_screen(width, height, min_iter, max_iter, slice_per_cpu): """This function uses the other functions to : create the process pool, compute the size of the different slices of the screen, use Pool.map to compute the orbits of the different complexe sequences and then fusion all the results together. """ cpu_number = multiprocessing.cpu_count() print "Launching computation on", cpu_number, "cores" sliced_screen = slice_screen(width, height, min_iter, max_iter, cpu_number, slice_per_cpu) print "The screen is decomposed in", len(sliced_screen), "sections" process_pool = multiprocessing.Pool(cpu_number) res = process_pool.map(iterate_over_region, sliced_screen) process_pool.close() process_pool.join() final_result = fusion_results(width, height, res) return final_result def render_picture(width, height, result): """This function renders the final picture and save it to 'test.bmp'. To render the picture, the function computes the minimum and maximum values of the cells, the scale the range of values to the interval [0, 255]. The final picture is rendered using this value as a red component. """ minimum = result[0][0] maximum = result[0][0] print "Starting rendering" print "The image size is", width, "x", height for x in range(width): for y in range(height): if result[x][y] < minimum: minimum = result[x][y] if result[x][y] > maximum: maximum = result[x][y] img = Image.new('RGB', (width, height)) img.putdata([(((result[x][y] - minimum) * 255) / (maximum-minimum), 0, 0) \ for y in range(height) for x in range(width)]) img.save('test_bulb.bmp') print "Rendering done" def render_picture_bis(width, height, result): """This function renders the final picture and save it to 'test.bmp'. To render the picture, the function computes the minimum and maximum values of the cells, the scale the range of values to the interval [0, 255]. The final picture is rendered using this value as a red component. """ minimum = result[0][0] maximum = result[0][0] print "Starting rendering" print "The image size is", width, "x", height for x in range(width): for y in range(height): if result[x][y] < minimum: minimum = result[x][y] if result[x][y] > maximum: maximum = result[x][y] middle = (minimum + maximum) / 2. datas = [] for y in range(height): for x in range(width): if result[x][y] < middle: red_component = ((result[x][y] - minimum) * 255) / (middle-minimum) datas.append((int(red_component), 0, 0)) else: green_component = ((result[x][y] - middle) * 127) / (maximum-middle) datas.append((0, int(green_component), 0)) img = Image.new('RGB', (width, height)) img.putdata(datas) img.save('test_bulb.bmp') print "Rendering done" if __name__ == '__main__': # Height should be (2/3) * width. width = 300 height = 200 # The minimal number of iterations is used to remove the noise in # the picture. min_iter = 300 max_iter = 3000 # In order to speed up the computation, we use more slices than # the number of cpu. This allows the program to begin new # calculation if a slice takes a long time. The memory used by the # program is linear in this variable, be careful. slice_per_cpu = 5 print "start" res = iterate_over_screen(width, height, min_iter, max_iter, slice_per_cpu) print "All computation done" render_picture_bis(width, height, res)
Today i want to introduce you to my friend Ala. We went to RISD together and were both in the apparel design department. She has great style and taste and now works as an interior designer. Check out these spaces and new home cleaning product line she developed! What is your current job and previous & Where do you live? I am currently a Designer at Meryl Santopietro Interiors and for Simply Spotless New York, Inc. Previously, I worked for Ralph Lauren in their Women’s Black Label Fabric Department. I live and work in NYC. My design process when working on projects for our interior design company involves sourcing fabrics, furniture, tile and wood samples. After drawing inspiration for the project and working hand in hand with the client to achieve their vision I assemble design boards to help clients visualize the layout of each room and specifications of furniture, artwork and accessories. When designing our product line, I created the patterns for labels drawing inspiration from textile designs. I worked alongside graphic designers to refine the graphics and font of the bottles. The best part of my job is working with my mom and sister. If you could design for any client who would it be? My dream client would be the developer of a boutique hotel. I would love the opportunity to create a luxurious oasis and tailor each suite to the guest that I envision staying there. My favorite project has been the creation of our product line, Simply Spotless New York, Inc. It has been a long process but it is so rewarding to see how the product developed from my initial sketches of labels. A few of my favorite spots in New York City are: Rubirosa for pizza, Bond St. for sushi, Aroma for coffee when I’m shopping in Soho and for clothing I love Opening Ceremony. You can visit Ala’s interior design firm here: Merylsantopietro.com and the website for simply spotless here: Simplyspotlessny.com Like Simply Spotless of facebook and follow on pinterest, instagram & twitter!
# lint-amnesty, pylint: disable=missing-module-docstring import json from datetime import datetime from django.test import TestCase from pytz import UTC from common.djangoapps.track.utils import DateTimeJSONEncoder class TestDateTimeJSONEncoder(TestCase): # lint-amnesty, pylint: disable=missing-class-docstring def test_datetime_encoding(self): a_naive_datetime = datetime(2012, 5, 1, 7, 27, 10, 20000) a_tz_datetime = datetime(2012, 5, 1, 7, 27, 10, 20000, tzinfo=UTC) a_date = a_naive_datetime.date() an_iso_datetime = '2012-05-01T07:27:10.020000+00:00' an_iso_date = '2012-05-01' obj = { 'number': 100, 'string': 'hello', 'object': {'a': 1}, 'a_datetime': a_naive_datetime, 'a_tz_datetime': a_tz_datetime, 'a_date': a_date, } to_json = json.dumps(obj, cls=DateTimeJSONEncoder) from_json = json.loads(to_json) assert from_json['number'] == 100 assert from_json['string'] == 'hello' assert from_json['object'] == {'a': 1} assert from_json['a_datetime'] == an_iso_datetime assert from_json['a_tz_datetime'] == an_iso_datetime assert from_json['a_date'] == an_iso_date
Brand: Echo Park. SKU: BJGT7916. Availability: 1 in stock Size: N/A Shipping Weight: 550 g Categories: Kits, Paper. Echo Park Paper-Bundle Of Joy/A New Addition- Baby Girl Collection Kit. The perfect addition to scrapbooks, cards, fun projects and more! This package contains twelve 12×12 inch double-sided sheets with a different design on each sheet and two 12×12 inch sticker sheets. Acid and lignin free. Made in USA.
# ---------------------------------------------------------------------------- # Copyright 2015-2016 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- from neon.backends import gen_backend from neon.initializers.initializer import Array from neon.layers.recurrent import LSTM from neon.layers.container import Seq2Seq from neon.transforms import Tanh, Logistic import numpy as np from neon import NervanaObject from neon.util.beamsearch import BeamSearch def reformat_samples(seq2seq_obj, num_beams, batch_size): samples = [[seq2seq_obj.candidates[bb][:, ex] for bb in range(num_beams)] for ex in range(batch_size)] examples = [] for ex in range(batch_size): examples.append(np.vstack([samples[ex][ii] for ii in range(num_beams)])) return examples def test_beamsearch(backend_default): """ Simlulated beam search on a minibatch of 2, for 4 time steps. The LSTM states are real but the "softmax outputs" z are hardcoded and not taken from the network. There are 6 tokens the network outputs, and they have probabilities like exp(1), exp(5), exp(7) The test asserts that the score_lists assigned by _beamsearch_step(z_list) are equal to the probabilities computed manually adding probabilities to z_list. """ be = backend_default batch_size = 2 be.bsz = batch_size time_steps = 4 nout = 6 num_beams = 3 # create unused layers activation = Tanh() gate_activation = Logistic() init_ary = np.eye(nout) init = Array(init_ary) encoder = LSTM(nout, init, activation=activation, gate_activation=gate_activation, name="Enc") decoder = LSTM(nout, init, activation=activation, gate_activation=gate_activation, name="Dec") class DummyFProp(): """ Constructs an artificial beam search example with known correct outputs. This is called inside a nested loop over steps, num_life. In the first time step there is one life beam, after that, 3 life beams per step. There are 4 time steps total. Each beamsearch_step builds one list over num_life beams. At t=0, the winners for ex0 are 1, 4, 5 (indexed by their position) and winners for ex1 are 2,4,5. From there we continue the beam for ex0: 12, 13, 14 6+2=8 6+3=9 6+2=8 40, 43, 45 with scores 5+4=9 5+3=8 5+7=12 three new winners 45, 52, 55 50, 52, 55 5+4=9 5+6=11 5+5=10 for ex2 1 4 5 with scores 5 4 7 we get the three winners 1, 4, 5 and continue (just taking the 3 in order, no sorting) 10 12 13 14 (not unique!) 5+2=7 5+2=7 5+3=8 41 42 43 with scores 4+6=10 4+5=9 4+7=11 winners 43 51 52 51 52 53 7+4=11 7+6=13 7+3=10 scores 11 11 13 continue from the three winners 43 51 52 431 433 434 11+10=21 11+3=14 11+9=20 511 512 513 with scores 11+6=17 11+5=16 11+7=18 winners 431 434 520 520 521 522 13+8=21 13+4=17 13+6=19 scores 21 20 21 continue from three winners 431 511 513 (going along beams, the matches in a beam) 4310 4312 4313 4314 21+2=23 21+2=23 21+3=24 21+10=31 (not unique!) 4341 4342 4343 with scores 20+10=30 20+5=25 20+7=27 winners 4314 4341 5204 5200 5202 5204 21+8=29 21+6=27 21+10=31 scores 31 30 31 overall winners are 4314 4341 5204 """ def __init__(self): self.i = -1 # t=0 # X x x <-- winners: 1, 4, 5 (for example 0) z = be.array(np.exp(np.array([[1, 6, 2, 1, 5, 5], [1, 5, 2, 2, 4, 7]]))).T # t=1 # x x x <-- give we picked 4: new winners 2,3,4 z1 = be.array(np.exp(np.array([[1, 1, 2, 3, 2, 1], [2, 1, 2, 3, 2, 1]]))).T # x x x <-- give we picked 5: # new winners 0,3,[5] # score 12 z2 = be.array(np.exp(np.array([[4, 1, 2, 3, 1, 7], [2, 6, 5, 7, 2, 4]]))).T # x X X <-- give we picked 1: # new winners 0,[2],[5] # scores 12, 11 z3 = be.array(np.exp(np.array([[4, 1, 6, 3, 1, 5], [1, 4, 6, 3, 2, 1]]))).T # t=2 # example 0: given constructed (1, 5), score 11: 3, 4; scores 21, 20 z4 = be.array(np.exp(np.array([[1, 1, 2, 10, 9, 1], [2, 10, 2, 3, 9, 1]]))).T # example 0: given constructed (5, 5), score 12: none selected from this beam z5 = be.array(np.exp(np.array([[4, 1, 2, 3, 1, 7], [2, 6, 5, 7, 2, 4]]))).T # example 0: given constructed (1, 2), score 12: 1; score 20 z6 = be.array(np.exp(np.array([[4, 8, 6, 3, 1, 5], [8, 4, 6, 3, 1, 1]]))).T # t=3 # example 0: given constructed (1, 5, 4), score 20: 1, score 30 z7 = be.array(np.exp(np.array([[1, 10, 2, 1, 1, 1], [2, 1, 2, 3, 10, 1]]))).T # example 0: given constructed (1, 2, 1), score 20: 5, score 30 z8 = be.array(np.exp(np.array([[4, 1, 2, 3, 1, 10], [2, 10, 5, 7, 2, 4]]))).T # example 0: given constructed (1, 5, 3), score 21: 4, score 31 z9 = be.array(np.exp(np.array([[4, 8, 6, 3, 10, 5], [8, 4, 6, 3, 10, 1]]))).T self.z_list = [z, z1, z2, z3, z4, z5, z6, z7, z8, z9] def fprop(self, z, inference=True, init_state=None): self.i += 1 return self.z_list[self.i] def final_state(): return be.zeros_like(decoder.h[-1]) class InObj(NervanaObject): def __init__(self): self.shape = (nout, time_steps) self.decoder_shape = (nout, time_steps) decoder.fprop = DummyFProp().fprop layers = Seq2Seq([encoder, decoder], decoder_connections=[0]) layers.decoder._recurrent[0].final_state = final_state in_obj = InObj() layers.configure(in_obj) # made zeros because zeros have shape layers.allocate() layers.allocate_deltas(None) beamsearch = BeamSearch(layers) inputs = be.iobuf(in_obj.shape) beamsearch.beamsearch(inputs, num_beams=num_beams) ex0 = np.array([[1, 5, 4, 1], [1, 2, 1, 5], [1, 5, 3, 4]]) ex1 = np.array([[5, 1, 4, 4], [5, 1, 1, 1], [5, 2, 0, 4]]) # extract all candidates examples = reformat_samples(beamsearch, num_beams, batch_size) assert np.allclose(examples[0], ex0) assert np.allclose(examples[1], ex1) if __name__ == '__main__': be = gen_backend(backend='gpu', batch_size=2) test_beamsearch(be)
Whether you’re looking for a nice stress reliever or you’re committed to improving your short game, your own personal putting green is always a good investment. putting is hands-down the most important part of the game. Nothing’ more embarrassing than missing an easy putt!! If you shoot an 85 on 18 holes, on average you’ll have 34 putts per round, and if you shoot a 71 you’ll average 29 putts per round. Long story short, putting is just as important for any casual golfer as well as pros. Here’s a guide for making your very own DIY putting green. If you’re a millionaire with plenty of money to spend, I’d recommend buying a putting green or even getting one built. It’s very hard to replicate a real putting green, especially by yourself. But then again, if you were a millionaire I doubt you’d be reading this guide. So what exactly do we need? Let’s make a list of what you need to make your very own putting green! A semi-flat surface/floor to practice on. Solid carpet works best to replicate a putting green! One ball or more depending if you’re likely to lose them under the sofa. That’s our guide to making your own DIY putting green, and hopefully, it works out well for ya! If you find yourself on an uneven floor, you can practice uphill/downhill break for those tough par 4’s. Any day out on the course is better than a day at work, but bringing the course to you isn’t too bad . Happy golfing!!
#!/usr/bin/env python # -*- coding: utf-8 -*- """ cookiecutter.config ------------------- Global configuration handling """ from __future__ import unicode_literals import copy import logging import os import io try: import ruamel.yaml as yaml except ImportError: import yaml from .exceptions import ConfigDoesNotExistException from .exceptions import InvalidConfiguration logger = logging.getLogger(__name__) DEFAULT_CONFIG = { 'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'), 'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'), 'default_context': {} } def get_config(config_path): """ Retrieve the config from the specified path, returning it as a config dict. """ if not os.path.exists(config_path): raise ConfigDoesNotExistException logger.debug('config_path is {0}'.format(config_path)) with io.open(config_path, encoding='utf-8') as file_handle: try: yaml_dict = yaml.safe_load(file_handle) except yaml.scanner.ScannerError as e: raise InvalidConfiguration( '{0} is not a valid YAML file: line {1}: {2}'.format( config_path, e.problem_mark.line, e.problem)) config_dict = copy.copy(DEFAULT_CONFIG) config_dict.update(yaml_dict) return config_dict def get_user_config(): """ Retrieve config from the user's ~/.cookiecutterrc, if it exists. Otherwise, return None. """ # TODO: test on windows... USER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc') if os.path.exists(USER_CONFIG_PATH): return get_config(USER_CONFIG_PATH) return copy.copy(DEFAULT_CONFIG)
Below is a list of all of the veterans whose last name begins with the letter C. Caferio, John C. Calderone, Adone T. Caldwell, William I. Cales, R. W. Callegari, Peter B. Calvin, Arthur L. Calvin, G. W. Cameron, George A. Canada, Curtis F. Canfield, A. M. Canine, John R. Cannon, Jack O. Cannon, R. M. Caolo, Michael Capin, R. W. Cardenas, A. H. Care, E. A. Carlin, Thomas C. Carlson, Clifford H. Carlson, Richard L. Carlson, Vernon Lyman Carlyle, H. H. Carney, C. H. Carney, Stephen J. Carpenter, W. Carr, L. Carrell, Richard P. Carroll, H. J. Carroll, Richard C. Carson, Marvin L. Carstens, Edlef Carter, E. Carter, William A. Cartright, William E. Cartwright, Julius M. Carver, Robert J. Case, Leonard H. Casey, John J. Casperson, Jack H. Cassell, H. O. Cassidy, Andrew J. Cassidy, Philip E. Cassyre, C. A. Castleberry, Gera N. Catledge, R. N. Caudle, A. B. Cauthron, E. C. Cauthron, Kenneth C. Cavanaugh, Bert B. Cavanaugh, Francis Cavin, G. C. Caza, D. R. Cerny, Joe J. Cewe, Robert E. Chadwell, Virgil L. Chadwick, B. W. Chaez, I. J. Chamberlain, R. G. Chambers, M. O. Chambers, Robert M. Chambers, Stanley K. Chambers, Theodore Chamblee, Manning W. Chancey, R. E. Chaney, C. G. Chaney, Gerald O. Chaney, Johnnie , Jr. Chapman, John H. Chappel, E. D. Chase, Ralph E. Chauncey, Edward S. Cheatham, William R. Cherpergi, J. P. Cherpeske, D. F. Cherry, Benjamin L. Childress, Marvin T. Childs, J. W. Childs, Wade D. Chippi, Peter Chittester, N. E. Chladik, E. J. Choquet, Sylvio J. Christensen, Oliver Roy Christensen, Ralph R. Christian, William Garnett Christiansen, John J. , Jr. Christiansen, Peter E. Christiansen, Walter G. Christoffel, H. J. Christy, Robert Lee Chup, Andrew Cicale, A. S. Cicolani, George Peter Cieliesz, Mitchell Cissna, Wayne Robert Clardy, C. T. Clardy, Roy T. Clark, B. V. Clark, Billy Anderson Clark, Cecil M. Clark, Lloyd T. Clark, Ohmar Clark, Verne F. , Jr. Clark, Walter C. Clark, William Clayton, Owen D. Clayton, Paul M. Clearman, Richard W. Clemans, Guy P. Clemmitt, J. Click, John K. Clifford, Jack M. Clifton, Morris L. Clifton, Raymond L. Cline, Cletis A. Clingenpeel, Donald O. Clingman, H. F. Cloutier, Eugene F. Clutter, Richard C. Cluverius, Wat T. Clymer, Everett V. Clymer, W. V. Coats, P. E. Cobb, D. P. Cobb, Tyrus P. Cobb, Walter B. Cobble, R. C. Coburn, James W. Coburn, Robert H. Cockrell, Claude S. Coen, Bernard C. Coffey, John F. Cole, Arles E. Cole, Cyrus W. Cole, Duane F. Cole, G. H. Coleman, Virgle R. Collins, Charles H. Collins, J. D. Collins, J. P. Collins, William Colpitt, C. H. Combs, Clyde R. Combs, Donald B. Conosenti, A. Conrad, Kenneth Conradt, J. F. Conrath, A. E. Conrelio, F. A. Constein, Edward D. Cony, Robert R. Coogan, J. H. Cook, Bruce R. Cook, Corlio J. Cook, E. M. Cook, R. W. Cook, Vernon E. Cook, W. T. Cook, Willie E. Cooley, Alvin L. Coombs, E. F. Cooper, F. R. Cooper, J. O. Cooper, Joseph R. Cooper, Sidney D. Coor, Rene E. Corder, Curtis S. Corlett, E. R. Cornell, Alex C. Cornett, G. C. Correll, E. Correll, Phillip K. Coston, B. E. Cottier, Charles E. Coulson, Scott E. Countryman, William H. County, A. J. Courter, Harold L. Couture, L. V. Cowden, John W. Cowell, Kenneth W. Cowser, J. A. Cox, Aaron C. Cox, Earl A. Cox, Eugene Cox, R. M. Crandall, L. M. Crane, Douglas F. Crask, Herbert G. Craver, Jack R. Crawford, James G. Crawford, Richard C. Crawford, Robert , Jr. Cray, Willard A. Crayne, Charles R. Creed, Roscoe W. Cremeans, Earl D. Crew, G. L. Crews, C. Crill, Calvin L. Crisp, George , Jr. Crites, F. T. Cromwell, Howard D. Crook, Harold R. Cross, R. R. Crossley, R. P. Crouch, Loren B. Crowder, G. W. Crowder, J. P. Crowley, J. J. Crudo, W. J. Crump, Robert W. Cunningham, Walter D. Cupp, H. G. Currey, Joseph M. Curry, R. J. Curtis, B. D. Curtis, Ennis O. Custance, H. F. Cygirt, Joseph P. Cyr, John L. Czerwenka, Adolph P.
#!/usr/bin/env python # coding:utf8 import time import os import sys import platform import subprocess try: # python2 import commands except: pass from argparse import ArgumentParser PATH_OF_THIS_SCRIPT = os.path.split(os.path.realpath(__file__))[0] sys.path.insert(0, os.path.join(PATH_OF_THIS_SCRIPT, "..")) import GetOrganelleLib from GetOrganelleLib.seq_parser import * from GetOrganelleLib.pipe_control_func import executable, make_blast_db, execute_blast from GetOrganelleLib.versions import get_versions PATH_OF_THIS_SCRIPT = os.path.split(os.path.realpath(__file__))[0] import platform SYSTEM_NAME = "" if platform.system() == "Linux": SYSTEM_NAME = "linux" elif platform.system() == "Darwin": SYSTEM_NAME = "macOS" else: sys.stdout.write("Error: currently GetOrganelle is not supported for " + platform.system() + "! ") exit() GO_LIB_PATH = os.path.split(GetOrganelleLib.__file__)[0] GO_DEP_PATH = os.path.realpath(os.path.join(GO_LIB_PATH, "..", "GetOrganelleDep", SYSTEM_NAME)) # V1_4 this_dir_split = '/' if 'Win' in platform.architecture()[1]: this_dir_split = '\\' options = '' short_candidates = {} def require_commands(): global options usage = 'python '+str(os.path.basename(__file__))+' -g input.fastg -f refernce.fasta' parser = ArgumentParser(usage=usage) parser.add_argument('-g', dest='in_fastg_file', type=str, help='followed by your input fastg file') parser.add_argument('-f', dest='reference_fa_base', type=str, help='followed by Fasta index format') parser.add_argument('--keep-temp', dest='keep_temp', default=False, action='store_true', help='Choose to disable deleting temp files produced by blast and this script') parser.add_argument('--bt', dest='blast_hits_threshold', default=0.60, help='Default: 0.60', type=float) parser.add_argument('--max-gap', dest='max_gap_to_add', default=1500, help='Default: 1500', type=int) parser.add_argument('--con-all', dest='connect_inner_contig', default=False, action='store_true', help='Choose to activate connecting all possible contigs. Default: False') parser.add_argument('--depth', dest='depth_to_connect', default=1.0, help='Default: 1.0', type=float) parser.add_argument("--which-blast", dest="which_blast", default="", help="Assign the path to BLAST binary files if not added to the path. " "Default: try GetOrganelleDep/" + SYSTEM_NAME + "/ncbi-blast first, then $PATH") # parser.add_argument('--merge-overlaps', default=False, action='store_true', help='Choose to activate automatically merging overlapping contigs') # parser.add_argument('--min-os', dest='min_overlap_similarity', default=0.9, help='The similarity threshold to merge overlapping contigs. Default: 0.9', type=float) # parser.add_argument('--min-ol', dest='min_overlap_length', default=15, help='The length threshold to merge overlapping contigs. Default: 15', type=int) parser.add_argument("-v", "--version", action="version", version="GetOrganelle v{version}".format(version=get_versions())) try: options = parser.parse_args() except Exception as e: sys.stdout.write('\n######################################'+str(e)) sys.stdout.write('\n"-h" for more usage') exit() else: if not (options.in_fastg_file and options.reference_fa_base): sys.stdout.write("\n######################################\nInsufficient arguments!") sys.stdout.write("\n\"-h\" for more usage") exit() def check_db(which_blast=""): global options in_index = options.reference_fa_base + '.index' if options.reference_fa_base: time0 = time.time() ref_fasta = read_fasta(options.reference_fa_base) if len(ref_fasta[0]) > 1: options.reference_fa_base += '.1st.fasta' write_fasta(out_file=options.reference_fa_base, matrix=[[ref_fasta[0][0]], [ref_fasta[1][0]], ref_fasta[2]], overwrite=True) sys.stdout.write('\nWarning: multi-seqs in reference file, only use the 1st sequence.') elif len(ref_fasta[0]) == 0: sys.stdout.write('\nError: illegal reference file!') exit() make_blast_db(input_file=options.reference_fa_base, output_base=in_index, which_blast=which_blast) sys.stdout.write('\nMaking BLAST db cost '+str(time.time()-time0)) else: sys.stdout.write('\nError: No reference input!') exit() return in_index def blast_and_call_new_matrix(fasta_file, index_files, out_file, len_db, which_blast=""): global options time0 = time.time() sys.stdout.write('\nMaking BLAST ...') fasta_file += '.Temp' execute_blast(query=fasta_file, blast_db=index_files, output=out_file, outfmt=6, threads=4, e_value="1e-20", which_blast=which_blast) time1 = time.time() sys.stdout.write('\nBLAST to '+index_files.split(this_dir_split)[-1]+' cost '+str(time1-time0)) # ---------------------------------------- # find start and end points of query # initialize candidates: fastq topologies and sequences query_matrix = read_fasta(options.in_fastg_file) len_fastg = len(query_matrix[0]) hits_candidates = {} short_names = [] for i in range(len_fastg): full_name = query_matrix[0][i] short_name = '_'.join(full_name.split()[0].split('_')[1:]).split('_length')[0] coverage = float(full_name.split('cov_')[1].split(';')[0].split('\'')[0].split(':')[0]) hits_candidates[short_name] = {False: set(), True: set(), 'coverage': coverage} short_names.append(short_name) for i in range(len_fastg): full_name = query_matrix[0][i] short_name = short_names[i] connected_edges = set() if ':' in full_name: for edge in full_name.rstrip(';').split(':')[1].split(','): edge_short_name = '_'.join(edge.split('_')[1:]).split('_length')[0] if edge_short_name in hits_candidates: if edge.endswith('\''): connected_edges.add((edge_short_name, False)) else: connected_edges.add((edge_short_name, True)) if full_name.split(';')[0].split(':')[0].endswith('\''): sequence = query_matrix[1][i] len_seq = len(sequence) new_items = {'identity': [0 for j in range(len_seq)], ('index', False): i, ('seq', False): sequence, ('seq', True): complementary_seq(sequence), 'len_seq': len_seq, False: connected_edges} hits_candidates[short_name].update(new_items) else: sequence = query_matrix[1][i] len_seq = len(sequence) new_items = {'identity': [0 for j in range(len_seq)], 'start_block': {'q': (len_seq, len_seq), 'r':[]}, 'end_block': {'q': (0, 0), 'r': []}, ('index', True): i, ('seq', True): sequence, ('seq', False): complementary_seq(sequence), 'len_seq': len_seq, True: connected_edges} hits_candidates[short_name].update(new_items) # ----------------------------------- # detect k-mer k_mer = 0 try: for short_name in hits_candidates: for direction in [True, False]: for next_edge_info in hits_candidates[short_name][direction]: if k_mer: if hits_candidates[short_name][('seq', direction)][-k_mer:] != hits_candidates[next_edge_info[0]][('seq', next_edge_info[1])][:k_mer]: raise ValueError else: for k_mer in range(127, 19, -2): if hits_candidates[short_name][('seq', direction)][-k_mer:] == hits_candidates[next_edge_info[0]][('seq', next_edge_info[1])][:k_mer]: break else: raise ValueError except ValueError: k_mer = 0 pass sys.stdout.write('\nDetected k-mer:'+str(k_mer)) # calculate edge connections according to hits_candidates and max_gap # # wait to improve: # miss the directions for jointed edges! def get_jointed_edges_within_distance(all_infos, this_edge, this_direction, length_left, jointed_edges, k_mer, recurse_depth=0): for this_next_edge in all_infos[this_edge][this_direction]: this_length_left = length_left - all_infos[this_next_edge[0]]['len_seq'] + k_mer if this_length_left >= 0 and this_next_edge not in jointed_edges: # try: # arbitrarily set recurse_depth to 20 if recurse_depth < 20: jointed_edges = get_jointed_edges_within_distance(all_infos, this_next_edge[0], this_direction==this_next_edge[1], this_length_left, jointed_edges, k_mer, recurse_depth+1) # except RuntimeError: # sys.stdout.write('\nWarning: RuntimeError!') # pass jointed_edges.add(this_next_edge) return jointed_edges edge_connections = {} for edge in hits_candidates: for direction in [False, True]: edge_connections[(edge, direction)] = get_jointed_edges_within_distance(hits_candidates, edge, direction, options.max_gap_to_add+k_mer, set(), k_mer) # compare candidates with blast results blast_out_lines = open(out_file) for line in blast_out_lines: line_split = line.strip().split('\t') query = '_'.join(line_split[0].split('_')[1:]).split('_length')[0] q_start, q_end = int(line_split[6]), int(line_split[7]) r_start, r_end = int(line_split[8]), int(line_split[9]) identity = float(line_split[2]) for i in range(q_start-1, q_end): hits_candidates[query]['identity'][i] = max(identity, hits_candidates[query]['identity'][i]) if q_start < hits_candidates[query]['start_block']['q'][0]: hits_candidates[query]['start_block']['q'] = (q_start, q_end) hits_candidates[query]['start_block']['r'] = [(r_start, r_end)] elif q_start == hits_candidates[query]['start_block']['q'][0]: if q_end > hits_candidates[query]['start_block']['q'][1]: hits_candidates[query]['start_block']['q'] = (q_start, q_end) hits_candidates[query]['start_block']['r'] = [(r_start, r_end)] elif q_end == hits_candidates[query]['start_block']['q'][1]: hits_candidates[query]['start_block']['r'].append((r_start, r_end)) if q_end > hits_candidates[query]['end_block']['q'][1]: hits_candidates[query]['end_block']['q'] = (q_start, q_end) hits_candidates[query]['end_block']['r'] = [(r_start, r_end)] elif q_end == hits_candidates[query]['end_block']['q'][1]: if q_start < hits_candidates[query]['end_block']['q'][0]: hits_candidates[query]['end_block']['q'] = (q_start, q_end) hits_candidates[query]['end_block']['r'] = [(r_start, r_end)] elif q_start == hits_candidates[query]['end_block']['q'][0]: hits_candidates[query]['end_block']['r'].append((r_start, r_end)) blast_out_lines.close() time2 = time.time() sys.stdout.write('\nParsing BLAST result cost '+str(time2-time1)) # ------------------------------------ # map terminal blocks of candidates to reference bases # workout points to connect # {base: [(query name, query identity, is_start_of_query, direction_in_reference)]} ref_bases_dict = {} for hit in hits_candidates.keys(): average_identity = sum(hits_candidates[hit]['identity'])/float(len(hits_candidates[hit]['identity'])) hits_candidates[hit]['identity'] = average_identity if average_identity >= options.blast_hits_threshold: for block in ['start_block', 'end_block']: is_start_of_query = bool(block == 'start_block') if options.connect_inner_contig or not bool(hits_candidates[hit][not is_start_of_query]): if hits_candidates[hit]['coverage'] >= options.depth_to_connect: query_loci = hits_candidates[hit][block]['q'] if is_start_of_query: length_to_terminal = query_loci[0] - 1 else: length_to_terminal = hits_candidates[hit]['len_seq'] - query_loci[1] for reference_block in hits_candidates[hit][block]['r']: direction_in_ref = bool(bool(reference_block[0] <= reference_block[1]) == is_start_of_query) ref_block_to_mark = int(not is_start_of_query) if reference_block[ref_block_to_mark] in ref_bases_dict: ref_bases_dict[reference_block[ref_block_to_mark]].append((hit, length_to_terminal, is_start_of_query, direction_in_ref)) else: ref_bases_dict[reference_block[ref_block_to_mark]] = [(hit, length_to_terminal, is_start_of_query, direction_in_ref)] # ------------------------------------ # search for new connections used_edge_numbers = [] for crazy_string in list(hits_candidates): for numbers in ''.join(filter(lambda ch: ch in '0123456789-_', crazy_string)).split('_'): for num in numbers.split('-'): used_edge_numbers.append(int(num)) used_edge_numbers.sort() variances_to_pass = {'edge': used_edge_numbers[-1]+1, 'index': len_fastg} def make_connections(edge1, base1, edge2, base2, k_mer): # if end to end and disable self-connection if edge1[3] != edge2[3] and edge1[0] != edge2[0]: # if not connected if (edge2[0], edge2[2]) not in edge_connections[(edge1[0], not edge1[2])]: # if Overlaps if edge1[3] or base1 == base2: overlap_or_gap_length = (base2-base1)%len_db+1 + edge1[1] + edge2[1] edge_name = str(variances_to_pass['edge'])+'overlap'+str(overlap_or_gap_length) new_full_name = 'EDGE_'+edge_name+'_length_'+str(overlap_or_gap_length+2*k_mer)+'_cov_80' forward_edge_sequence = hits_candidates[edge1[0]][('seq', not edge1[2])][-k_mer:] + '?'*overlap_or_gap_length + hits_candidates[edge2[0]][('seq', edge2[2])][:k_mer] reverse_edge_sequence = hits_candidates[edge2[0]][('seq', not edge2[2])][-k_mer:] + '?'*overlap_or_gap_length + hits_candidates[edge1[0]][('seq', edge1[2])][:k_mer] else: overlap_or_gap_length = (base2-base1)%len_db-1 - edge1[1] - edge2[1] # if still overlaps if overlap_or_gap_length < 0: overlap_or_gap_length = -overlap_or_gap_length edge_name = str(variances_to_pass['edge'])+'overlap'+str(overlap_or_gap_length) new_full_name = 'EDGE_'+edge_name+'_length_'+str(overlap_or_gap_length+2*k_mer)+'_cov_20' forward_edge_sequence = hits_candidates[edge1[0]][('seq', not edge1[2])][-k_mer:] + '?'*overlap_or_gap_length + hits_candidates[edge2[0]][('seq', edge2[2])][:k_mer] reverse_edge_sequence = hits_candidates[edge2[0]][('seq', not edge2[2])][-k_mer:] + '?'*overlap_or_gap_length + hits_candidates[edge1[0]][('seq', edge1[2])][:k_mer] # if Gaps else: edge_name = str(variances_to_pass['edge'])+'gap'+str(overlap_or_gap_length) new_full_name = 'EDGE_'+edge_name+'_length_'+str(overlap_or_gap_length+2*k_mer)+'_cov_5' forward_edge_sequence = hits_candidates[edge1[0]][('seq', not edge1[2])][-k_mer:] + 'N'*overlap_or_gap_length + hits_candidates[edge2[0]][('seq', edge2[2])][:k_mer] reverse_edge_sequence = hits_candidates[edge2[0]][('seq', not edge2[2])][-k_mer:] + 'N'*overlap_or_gap_length + hits_candidates[edge1[0]][('seq', edge1[2])][:k_mer] variances_to_pass['edge'] += 1 # these_directions = {'to_edge1':False, 'edge1':not edge1[2],'to_edge2':True, 'edge2':edge2[2]} # add new edge to matrix query_matrix[0].append(new_full_name+':'+query_matrix[0][hits_candidates[edge2[0]][('index', edge2[2])]].split(';')[0].split(':')[0]+';') edge2_full_name = query_matrix[0][hits_candidates[edge2[0]][('index', not edge2[2])]] if ':' in edge2_full_name: query_matrix[0][hits_candidates[edge2[0]][('index', not edge2[2])]] = edge2_full_name.rstrip(';')+','+new_full_name+'\';' else: query_matrix[0][hits_candidates[edge2[0]][('index', not edge2[2])]] = edge2_full_name.rstrip(';')+':'+new_full_name+'\';' query_matrix[0].append(new_full_name+'\':'+query_matrix[0][hits_candidates[edge1[0]][('index', edge1[2])]].split(';')[0].split(':')[0]+';') edge1_full_name = query_matrix[0][hits_candidates[edge1[0]][('index', not edge1[2])]] if ':' in edge1_full_name: query_matrix[0][hits_candidates[edge1[0]][('index', not edge1[2])]] = edge1_full_name.rstrip(';')+','+new_full_name+';' else: query_matrix[0][hits_candidates[edge1[0]][('index', not edge1[2])]] = edge1_full_name.rstrip(';')+':'+new_full_name+';' query_matrix[1].append(forward_edge_sequence) query_matrix[1].append(reverse_edge_sequence) # add new edge to hits_candidates hits_candidates[edge_name] = {('index', True): variances_to_pass['index'], ('index', False): variances_to_pass['index']+1, ('seq', True): forward_edge_sequence, ('seq', False): forward_edge_sequence, 'len_seq': overlap_or_gap_length+2*k_mer, True: [(edge2[0], edge2[2])], False: [(edge1[0], edge1[2])]} variances_to_pass['index'] += 2 hits_candidates[edge1[0]][not edge1[2]].add((edge_name, True)) hits_candidates[edge2[0]][not edge2[2]].add((edge_name, False)) # add new edge to edge_connections (update) edge_connections[(edge1[0], not edge1[2])] = get_jointed_edges_within_distance(hits_candidates, edge1[0], not edge1[2], options.max_gap_to_add+k_mer, set(), k_mer) edge_connections[(edge2[0], not edge2[2])] = get_jointed_edges_within_distance(hits_candidates, edge2[0], not edge2[2], options.max_gap_to_add+k_mer, set(), k_mer) edge_connections[(edge_name, True)] = get_jointed_edges_within_distance(hits_candidates, edge_name, True, options.max_gap_to_add+k_mer, set(), k_mer) edge_connections[(edge_name, False)] = get_jointed_edges_within_distance(hits_candidates, edge_name, False, options.max_gap_to_add+k_mer, set(), k_mer) ref_bases_list = sorted(list(ref_bases_dict)) len_ref_base = len(ref_bases_list) for i in range(len_ref_base): candidates = ref_bases_dict[ref_bases_list[i]] # the same base len_candidates = len(candidates) if len_candidates >= 2: for k in range(len_candidates): for l in range(1, len_candidates): make_connections(candidates[k], ref_bases_list[i], candidates[l], ref_bases_list[i], k_mer) # next bases for candidate_infos in candidates: i_plus = i + 1 base = ref_bases_list[i_plus % len_ref_base] while i_plus-i < len_ref_base and (base - ref_bases_list[i]) % len_db <= options.max_gap_to_add: for hit_infos in ref_bases_dict[base]: make_connections(candidate_infos, ref_bases_list[i], hit_infos, base, k_mer) i_plus += 1 base = ref_bases_list[i_plus%len_ref_base] sys.stdout.write('\nRedirecting contig path cost '+str(time.time()-time2)) return query_matrix def del_complementary(fastg_file): global options time0 = time.time() temp_matrix = read_fasta(fasta_dir=fastg_file) i = 0 while i < len(temp_matrix[0]): if temp_matrix[0][i].rstrip(';').split(':')[0].endswith('\''): del temp_matrix[0][i] del temp_matrix[1][i] else: i += 1 write_fasta(out_file=fastg_file + '.Temp', matrix=temp_matrix, overwrite=True) sys.stdout.write('\nDel complementary cost'+str(time.time()-time0)) def remove_temp_files(fastg_file): global options if not options.keep_temp: if options.in_fastg_file: os.remove(fastg_file+'.Temp') try: os.remove(fastg_file+'.blast_in') except OSError: pass try: os.remove(options.reference_fa_base+'.index.nhr') os.remove(options.reference_fa_base+'.index.nin') os.remove(options.reference_fa_base+'.index.nsq') except OSError: pass def main(): time0 = time.time() sys.stdout.write( "\nThis script would join the spades fastg contigs according to the reference." "\nIt would add extra gap nodes (N) and/or overlap nodes (?) in between the connectible nodes and generate " " a new fastg file." "\n" "\nThis is a BETA version:" "\nAlthough it will not produce wrong connections, it usually replicates the same right connection." "\nDon't be surprised if you find any other bugs.\n") require_commands() global options if not options.which_blast: try_this_bin = os.path.join(GO_DEP_PATH, "ncbi-blast", "blastn") if os.path.isfile(try_this_bin) and executable(try_this_bin): output, err = subprocess.Popen( try_this_bin + " -version", stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True).communicate() if "not found" in output.decode("utf8"): sys.stdout.write(output.decode("utf8") + "\n") else: options.which_blast = os.path.split(try_this_bin)[0] if not executable(os.path.join(options.which_blast, "blastn")): sys.stdout.write(os.path.join(options.which_blast, "blastn") + " not accessible!") exit() if not executable(os.path.join(options.which_blast, "makeblastdb")): sys.stdout.write(os.path.join(options.which_blast, "makeblastdb") + " not accessible!") exit() # fastg to fasta fasta_file = options.in_fastg_file del_complementary(fasta_file) # make blast database if not made include_index = check_db(which_blast=options.which_blast) len_db = len(read_fasta(options.reference_fa_base)[1][0]) # make blast new_fasta_matrix = blast_and_call_new_matrix(fasta_file=fasta_file, index_files=include_index, out_file=fasta_file + '.blast_in', len_db=len_db, which_blast=options.which_blast) # write out fastg write_fasta(out_file=fasta_file + '.Ncontigs_added.' + fasta_file.split('.')[-1], matrix=new_fasta_matrix, overwrite=False) remove_temp_files(fasta_file) sys.stdout.write('\n\nTotal cost: '+str(time.time()-time0)+'\n\n') if __name__ == '__main__': main() """Copyright 2016 Jianjun Jin Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License."""
The Cornell Historic Monograph Collection consists of digital surrogates for materials that were part of a joint study involving Digital Preservation between Cornell University and the Xerox Corporation. Begun in 1990, a process was developed where brittle and decaying books were digitally scanned, using prototype equipment co-developed by Cornell and the Xerox Corporation (the CLASS scanner) and stored as 600dpi, bitonal TIFF images, compressed with ITU Group 4 compression, on digital platters on an EPOCH "jukebox" digital server. Facsimiles of these books were generated and the books were returned to the shelves. The images were available online using specially developed clients in Unix, MAC and PC platforms. These clients were developed at Library Technology at Cornell University by William Turner III, David Fielding and Chris Stuart. Of the 456 General Monographs that make up the Historic Monograph Collection, 441 that are not covered by copyright protection are being made available for online viewing.
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan import wsme from wsme import types as wtypes from tuskar.api.controllers.v1.types.base import Base from tuskar.api.controllers.v1.types.link import Link from tuskar.api.controllers.v1.types.relation import Relation from tuskar.api.controllers.v1.types.chassis import Chassis from tuskar.api.controllers.v1.types.node import Node from tuskar.api.controllers.v1.types.capacity import Capacity class Rack(Base): """A representation of Rack in HTTP body.""" id = int name = wtypes.text slots = int subnet = wtypes.text location = wtypes.text state = wtypes.text chassis = Chassis capacities = [Capacity] nodes = [Node] links = [Link] resource_class = Relation @classmethod def convert_with_links(self, rack, links): kwargs = rack.as_dict() # returns a new dict, overwriting keys is safe if rack.chassis_id: kwargs['chassis'] = Chassis(id=rack.chassis_id, links=[Link.build_ironic_link('chassis', rack.chassis_id)]) else: kwargs['chassis'] = Chassis() if rack.resource_class_id: l = [Link.build('self', pecan.request.host_url, 'resource_classes', rack.resource_class_id)] kwargs['resource_class'] = Relation(id=rack.resource_class_id, links=l) kwargs['capacities'] = [Capacity(name=c.name, value=c.value, unit=c.unit) for c in rack.capacities] kwargs['nodes'] = [Node(id=n.node_id, links=[Link.build_ironic_link('node', n.node_id)]) for n in rack.nodes] return Rack(links=links, **kwargs) @classmethod def convert(self, rack, base_url, minimal=False): links = [Link.build('self', pecan.request.host_url, 'rack', rack.id)] if minimal: return Rack(links=links, id=str(rack.id))
I was led to believe by school admin that getting certified as a MIBC was impossible without this program. Not true. You just need to take the test. ALSO we were misled to think that this program trumps experience. We were literally told that this course is the same as "two years work experience". It was incredibly disheartening to be told that its cheaper and preferred by companies to train for someone within to get MIBC certified than to hire someone new, especially if you have no experience in that field. I was told directly by a hiring manager after always seemingly acing every interview (MIBC even had a required course on getting hired), that for-profit schools are looked down upon and not taken seriously. I feel foolish and ashamed that I had my parents co-sign a 12000 loan for a 10-month program when i could have attended a similar program at a,LEGIT university. Also myself along with every other MIBC student received the same listless email from the job placement specialist, regarding a scant handful of jobs that seemed few and far in between. Thus , another lie - when I was talking with the pci recruiter, he stated these jobs were always in demand. It turns out the pci recruiters get paid commission - quell surprise. The head of the coding program waw completely awesome, though. That place didn't' deserve to have her working for them. We apologize for your negative experience in 2010. Our recruiters do not and have not received a commission. We did have our MIIBC program at the time due to strong demand from employers but found that with students not wanting to take the certification exam our outcomes were not what we wanted. Please let us know if career services can be of any assistance. This school is terrible. I was enrolled and ready to start classes when I decided to withdrawal. Everyone you talk to is just so pushy and tries to push you into things. When I went to withdrawal from their school they would not let me and kept offering me other programs to enroll in, finally I was able to withdrawal and my admissions advisor was very very rude and unprofessional. This school only wants your money. Now I am having to get ahold of them to have them fill out a withdrawal clearance letter so I can attend another school and now you can't find a single person to talk to all you get is someone's voicemail. This school is a joke, do not waste your money here. We apologize for your unpleasant experience. We do condone our representatives being pushy or not respecting our students wishes. Our financial aid department and business office would be more than happy to give you a withdrawal clearance letter. Please call us at 800-676-7912 so that we may help you resolve this. PCI is a terrible terrible school. I have degrees I just attended PCI briefly because I wanted to pick up an extra trade. The financial aid office is a JOKE. THEY SWITCH STAFF LIKE IT'S NOTHING. THEY are very rude and only care about your money!!!!!! I was a student at pinnacle career institute online for HVAC was recommend to by company the teachers was was engaged in the class and if you had any questions you could call or email them with problems you having with the class the boot camp was about 10 days really enjoyed boot camp it was small so you had more time to study in the labs for the hands on training I WOULD RECOMMEND PINNACLE TO ANY ONE. We apologize that you had an unpleasant experience with your financial aid. If there is any way that we can still be of assistance, our business office would love to help. Please let us know how we can help. Spent $12,000 , still could not find employment in the field I was "trained" for. I was actually told PCI and other non -accredited , for profit schools are looked down upon at multiple job interviews by bemused recruiters (wheras part of the high-pressure enrollment script leads you to believe PCI is respected in the field.) Ultimately my next employment was at Walmart, despite being on the honor roll the entire duration of my courses. I don't think any of the medical billing jobs I applied for even called my references. I feel this is a scam. What you are not told is there is no one will hire you. You have no experience. The online degree does not help in any way. I got a job in a doctor's office and emergency clinic through my externship site. I went from not working to having two jobs its been great. I am now in school working on my RN. Get started right away. Do not attend this school. I learned the hard way. I did not plan to end up getting pregnant, but I did. Well I attended their week boot camp, had a few complications of the pregnancy (sickness and others). Besides being treated like crap when I was at boot camp for being pregnant, I also received injuries while there that was uncalled for. When it came time to learn hands on, the instructor did not allow me or my partner to practice as much as she did the other two students. I see they show a lot of favoritism with students. Well I started my externship once after I returned home, well more complications put me into the hospital and then doctor put me on bed rest! I had completed almost half of my hours. I told all the faculty about the bed rest, one lady told me I could return after delivery and start off where I left at. Then I speak to my coordinator and she tells me I will have to complete all the hours all over again AND return to Missouri and redo the boot camp again. I will then also have to pay more charges. This school is definitely all about money. I checked my pell grants and I still had extra on my pell grant for the 2014-2015 school year that could have been used, but they want me to pay out of my pocket, THEN I will have to pay again out of pocket to re-enter school. PLEASE I'm begging everyone, do not attend this school either online or campus. I apologize for the complications during your externship making it difficult for you to finish your program. It is important for our student to have the hands on skills fresh before going to any externship site, which may be why they asked you to repeat the boot camp. We have changed the program so that the externship is now better integrated with the program. Please let us know if we can be of any assistance. 72% of 25 users found this page helpful.
# -*- coding: utf-8 -*- from rules.rule import * import re class Rule(KLCRule): """ Create the methods check and fix to use with the kicad lib files. """ def __init__(self, component): super(Rule, self).__init__(component, 'Power-flag symbols follow some special rules/KLC-exceptions') self.makePinINVISIBLE = False self.makePinPowerInput = False self.fixTooManyPins = False self.fixPinSignalName = False self.fixNoFootprint = False def check(self): """ Proceeds the checking of the rule. """ fail = False if self.component.isPossiblyPowerSymbol(): if (len(self.component.pins) != 1): self.error("Power-flag symbols have exactly one pin") fail = True self.fixTooManyPins = True else: if (self.component.pins[0]['electrical_type'] != 'W'): self.error("The pin in power-flag symbols has to be of a POWER-INPUT") fail = True self.makePinPowerInput = True if (not self.component.pins[0]['pin_type'].startswith('N')): self.error("The pin in power-flag symbols has to be INVISIBLE") fail = True self.makePinINVISIBLE = True if ((self.component.pins[0]['name'] != self.component.name) and ('~'+self.component.pins[0]['name'] != self.component.name)): self.error("The pin name ("+self.component.pins[0]['name']+") in power-flag symbols has to be the same as the component name ("+self.component.name+")") fail = True self.fixPinSignalName = True # footprint field must be empty if self.component.fields[2]['name'] != '' and self.component.fields[2]['name'] != '""': self.error("Graphical symbols have no footprint association (footprint was set to '"+self.component.fields[2]['name']+"')") fail = True self.fixNoFootprint = True # FPFilters must be empty if len(self.component.fplist) > 0: self.error("Graphical symbols have no footprint filters") fail = True self.fixNoFootprint = True return fail def fix(self): """ Proceeds the fixing of the rule, if possible. """ if self.fixTooManyPins: self.info("FIX for too many pins in power-symbol not supported") if self.makePinPowerInput: self.info("FIX: switching pin-type to power-input") self.component.pins[0]['electrical_type'] = 'W' if self.makePinINVISIBLE: self.info("FIX: making pin invisible") self.component.pins[0]['pin_type'] = 'N'+self.component.pins[0]['pin_type'] if self.fixPinSignalName: newname = self.component.name if self.component.name.startswith('~'): newname = self.component.name[1:len(self.component.name)] self.info("FIX: change pin name to '"+newname+"'") self.component.pins[0]['pin_type'] = 'N'+self.component.pins[0]['pin_type'] if self.fixNoFootprint: self.info("FIX empty footprint association and FPFilters") self.component.fplist.clear() self.component.fields[2] = ''
Meeting planners have more than enough to think about when it comes to searching for the perfect venue – and eco-consciousness is increasingly making its way top of mind for many. It is currently estimated that the average hotel guest generates 2.2 pounds of waste each night of their stay. And, with the meetings and event industry recently being deemed as the second most wasteful sector in the United States by the EPA, we at JW Marriott Austin knew we had to go above and beyond to deliver more efficient meetings and events with the lowest possible carbon footprint. By offering an array of environmentally friendly offerings and programs, the JW Marriott Austin makes it easy for companies and planners to go green for their next meeting or event. The hotel, located in the heart of Texas' vibrant state capital and just steps from city's convention center with 370,000 square feet of meeting and exhibit space, makes the environment a priority at all times. Just by selecting the JW Marriott Austin as the venue for a meeting or event, planners are already making a green choice. It is a built in and effortless choice. With a goal to further reduce energy and water consumption by 20 percent for 2020, the JW Marriott Austin is well on its way to continuing efforts to provide meeting planners and guests the most environmentally friendly venue option for all their needs. Upon its opening in 2015, JW Marriott Austin received a LEED (Leadership in Energy and Environmental Design) Silver certification. LEED is an internationally recognized green building certification system providing third-party verification that a building has been designed and constructed using performance-improving strategies. For example, LEED certified properties focus on resource efficiency by using less water and energy, and reduce greenhouse gas emissions. LEED certification is also the most widely used green building rating system in the world with 1.85 million square feet of construction space certifying every single day. JW Marriott Austin, which features 120,000 square feet of meeting and event space on property, also received a four-star Austin Energy Green Building rating. The City of Austin created the program as the nation's first green building program in 1990, and since then it has grown to be the nation's most successful sustainable building program. Austin Energy Building ratings encourage Central Texans to design and construct more sustainable homes and buildings with the mission to lead the transformation of the building industry to a sustainable future. Certifications like these are essential when it comes to going the extra green mile without leaving a huge footprint. But what exactly do these strategies entail when it comes to JW Marriot Austin? I'm glad you asked. JW Marriott Austin has implemented a variety of strategies to uphold its LEED Silver and Austin Green Energy Building statuses, from everything as simple as updating light fixtures with energy saving light bulbs to utilizing local Texas manufactured building materials, and beyond.
import sys, time import paramiko from linux_server import SizelessLinuxServer class ParamikoServer(SizelessLinuxServer): def receive(self): stdout = "" while self.session.recv_ready(): stdout += self.session.recv(sys.maxint) stderr = "" while self.session.recv_stderr_ready(): stderr += self.session.recv_sterr(sys.maxint) return stdout, stderr def receive_all(self): stdout = "" stderr = "" while stdout[-2:] != '$ ': time.sleep(0.1) stdout2, stderr2 = self.receive() stdout += stdout2 stderr += stderr2 return stdout, stderr def receive_each(self): stdout = "" while stdout[-2:] != '$ ': time.sleep(0.1) stdout, stderr = self.receive() yield stdout, stderr def disconnect(self): self.client.close() self.connected = False def run_command(self, command, root=None, path=None): "Returns (output, error) as strings." stdout = "" stderr = "" for stdout2, stderr2 in self.run_command_each(command, root, path): stdout += stdout2 stderr += stderr2 stdout = "\n".join(stdout.split('\r\n')[1:-1]) # drop command and prompt return stdout, stderr def run_command_each(self, command, root=None, path=None): if root is not None: self.cwd(self.fullpath(root, path)) print command self.session.sendall(command + '\n') for stdout, stderr in self.receive_each(): yield stdout, stderr
13 Dec A Giant Marble Instrument. What???
from collections import Counter import gzip from operator import itemgetter from os import listdir, path import re import cPickle as pickle import json from math import log, sqrt from scipy.sparse import csr_matrix, lil_matrix, coo_matrix import numpy as np from sklearn.preprocessing import normalize import unicodedata __author__ = 'gronostaj' def list_dirs(dirpath): return [f for f in listdir(dirpath) if path.isdir(path.join(dirpath, f))] def list_files(dirpath): return [f for f in listdir(dirpath) if path.isfile(path.join(dirpath, f))] class Serializer: @staticmethod def serialize(obj, serializer, filename, gz=False, **kwargs): if gz: with gzip.open('%s.gz' % filename, 'wb', 5) as f: f.write(serializer.dumps(obj, **kwargs)) else: with open(filename, 'wb') as f: f.write(serializer.dumps(obj, **kwargs)) @staticmethod def deserialize(serializer, filename): gz = filename.endswith('.gz') if gz: with gzip.open(filename, 'rb') as f: obj = serializer.load(f) else: with open(filename, 'rb') as f: obj = serializer.load(f) return obj @staticmethod def pickle(obj, filename, gz=True): Serializer.serialize(obj, pickle, filename, gz) @staticmethod def unpickle(filename): return Serializer.deserialize(pickle, filename) @staticmethod def to_json(obj, filename, gz=True): Serializer.serialize(obj, json, filename, gz, sort_keys=True, indent=4, separators=(',', ': ')) @staticmethod def from_json(filename): return Serializer.deserialize(json, filename) class Droogle: SUFFIXES = ('%s.pickle', '%s.pickle.gz', '%s.json', '%s.json.gz') _WORDMAP = 'wordmap' _MATRIX = 'matrix' _CHUNKS = 'chunks' def __init__(self, indexdir): dbs = {} for req in (Droogle._WORDMAP, Droogle._MATRIX, Droogle._CHUNKS): satisfying = [ path.join(indexdir, suffix % req) for suffix in Droogle.SUFFIXES if path.isfile(path.join(indexdir, suffix % req)) ] if not satisfying: raise FileMissingError(req) else: dbs[req] = satisfying[0] self.dbs = { k: Serializer.unpickle(f) if f.endswith('.pickle') or f.endswith('.pickle.gz') else Serializer.from_json(f) for k, f in dbs.iteritems() } @staticmethod def _sanitize(str): return re.sub(r'[^\x00-\x7F]+', ' ', str.lower()) @staticmethod def _bagofwords(str): return Counter(re.findall(r'\w+', str)) @staticmethod def _indexstring(filename, str, separator): bags = {} chunks = {} wordset = set() for i, chunk in enumerate(re.split(separator, str)): bag = Droogle._bagofwords(Droogle._sanitize(chunk)) bags['%s_%d' % (filename, i)] = dict(bag) chunks['%s_%d' % (filename, i)] = chunk wordset = wordset | set(bag.keys()) return bags, chunks, wordset @staticmethod def index(dirpath, inputfiles, separator): bags = {} chunks = {} wordset = set() for inputfile in inputfiles: print("- Parsing file %s" % inputfile) with open(path.join(dirpath, inputfile), 'r') as f: thisbag, thischunks, thisset = Droogle._indexstring(inputfile, f.read(), separator) bags.update(thisbag) chunks.update(thischunks) wordset = wordset | thisset print("- Building matrix") wordmap = {w: i for i, w in enumerate(wordset)} chunkmap = {c: i for i, c in enumerate(bags.keys())} matrix = lil_matrix((len(wordset), len(bags))) chunks = {chunkmap[n]: c for n, c in chunks.items()} for chunkname, chunkid in chunkmap.iteritems(): bag = dict(bags[chunkname]) for word, quantity in bag.iteritems(): wordid = wordmap[word] matrix[wordid, chunkid] = quantity matrix = csr_matrix(matrix) print("- Optimizing matrix") nonzero = np.diff(matrix.indptr) idf = lil_matrix(np.array(map(lambda c: log(len(wordset) / c), nonzero))) matrix = matrix.transpose().multiply(idf) normalize(matrix, copy=False) matrix = matrix.transpose() print("- Saving files") Serializer.to_json(wordmap, path.join(dirpath, "%s.json" % Droogle._WORDMAP)) Serializer.pickle(matrix, path.join(dirpath, "%s.pickle" % Droogle._MATRIX)) Serializer.pickle(chunks, path.join(dirpath, "%s.pickle" % Droogle._CHUNKS)) return len(bags), len(wordset) def query(self, string): bag = Droogle._bagofwords(Droogle._sanitize(string)) norm = sqrt(reduce(lambda v, x: v + x ** 2, bag.values())) bag = {k: v / norm for k, v in dict(bag).iteritems()} bagmap = { self.dbs[Droogle._WORDMAP][word]: count for word, count in bag.iteritems() if word in self.dbs[Droogle._WORDMAP] } bagmap = zip(*bagmap.items()) lookup = coo_matrix( (bagmap[1], ([0] * len(bagmap[0]), bagmap[0])), dtype='double', shape=(1, self.dbs[Droogle._MATRIX].shape[0]) ).dot(self.dbs[Droogle._MATRIX]) results = [(self.dbs[Droogle._CHUNKS][i], lookup[0, i]) for i in xrange(self.dbs[Droogle._MATRIX].shape[1])] return map(itemgetter(0), sorted(results, key=itemgetter(1), reverse=True)) class FileMissingError(Exception): def __init__(self, filename): self.filename = filename
NEW YORK — These aren’t the same ‘ol Celtics. No Rajon Rondo. No Ray Allen. No Perk, Posey or P.J. Brown. They took another step backward this season, falling to seventh in the Eastern Conference. They were pretty awful on the road, their defense didn’t have quite the same bite, and their offense was pretty anemic. You never knew what you were going to get from them, maybe a win over a great team on one night and a loss to a terrible team the next. And when they were down 0-3 to the New York Knicks in this first round series, it appeared to be time to finally count them out. Well … uh … never mind. Maybe these are the same ‘ol Celtics. Fueled by a defense that continues to hold it’s own against one of the most potent offensive attacks in the league, the Celtics staved off elimination for the second time on Wednesday. This time they did it in enemy territory, holding on for a 92-86 victory at Madison Square Garden that sends the series back to Boston for Game 6 on Friday. So now, things get really interesting. No team in NBA history has ever come back from an 0-3 series deficit, but it’s starting to look like great defense can beat great offense. The Knicks have shot just 37 percent and scored just 94 points per 100 possessions over the last two games. Coming up empty in Boston without J.R. Smith is one thing. But with Smith back and the opportunity to win a playoff series on their home floor for the first time since 1999, the Knicks laid another egg on Wednesday. In his return from a one-game suspension, Smith missed his first 10 shots and finished 3-for-14. Carmelo Anthony wasn’t much better, shooting 8-for-24, meaning that the Knicks basically got the same production out of the pair as they did in Game 4 (when Smith didn’t play). The one thing the Knicks still have going offensively is Raymond Felton on the pick and roll. He continued to get to the rim in Game 5, rendering Avery Bradley useless and scoring 21 points on 10-for-19 shooting. But too often, the Knicks became stagnant offensively, resorting to more isolations and contested jumpers. They’ve lived by the three all season, but have shot a brutal 12-for-52 (23 percent) from beyond the arc in the last two games. Anthony has missed his last 15 3-point attempts. Of course, the Celtics wouldn’t have won Games 4 and 5 if they weren’t scoring themselves. And Wednesday was easily their best offensive performance of the series. Part of it was better execution. But mostly, they just shot better. That was the one source of optimism when they were down 0-3. They’re a bad offensive team, but they’re not a bad shooting team, and they were missing a lot of decent shots in those first three games. The Knicks have played aggressively on the ball all series, leaving shooters open. And now the Celtics are finally making them pay. Their 3-point percentage has increased in every game of the series, peaking with an 11-for-22 performance in Game 5. Really, these are both jump-shooting teams, and games will sometimes be determined by whether or not the shots go in. But it was clear on Wednesday which team was forcing more misses. That’s the team that had its season on the line, the team that never goes down without a fight. The Knicks wore all black to this game, thinking they were attending a funeral. Instead, they got a free trip back to Boston, thanks to a prideful team that just won’t die. Same ‘ol Celtics, apparently. Never count ’em out. I love Doc Rivers reaction at the end of the game. For the first time in my life I find myself rooting for the Boston Celtics… The disrespect from JR and Kenyon really pissed me off, can’t imagine how the C’s felt. Yeah, but that was a dirty arm pull by Garnett on Carmelo. He should’ve been tossed. The first few games KG and Pail shot the ball horribly..There was no way they were going to keep missing all those shots..All I could think is once they start playing(hitting shots)like they have been all year…then we’ll have us a series..GO Green Gang!!!! Its hard to predict.. really hard.. that is why I believe in the saying “The Ball is Round” sometimes your in top or bottom. But this issue needs some real player to stay in the PLAYOFFS. If JR Smith will comeback in Boston the Knicks will win period and with exclamation point. This series is so on fire! Can’t wait for game 6! Lol @ Kenyon Martin. You can make jokes but you cant take em. What a poor sport cry baby. Professional athlete, powerforward/center acting hard to a little dude with a microphone. Heres a word of advice to Martin and Smith: A mouth that is closed, gathers no foot. The foundation that the Celtics are built from is unrivalled in the NBA! The other storied franchise the ‘Lakers’ simply don’t have it. I am continuously amazed at how much heart and guts these Celtics have. They would bleed for that jersey! They have no right to even be in this position with all their injuries and age etc. Somehow Celtic Pride finds a way. It’s playoff time which means it’s Celtic Time!! Just get Game 6 and then anything can happen. BLACK shirts BLACK pants…AIN’T WORKING!!! Remember when we were too old? When our season was over without Rondo? When we were down for the count at 0-3? Good, neither do we. Another trip to the East Finals is what should happen why? Because of guys like K.G. The Knicks are soft and not battle tested like Boston. CELTICS M O T H E R F U C K E R!!!!!!!!!! Stop pretending the Celtics are playing great with bad players, the Celtics are playing bad with bad players, They cant score, they cant rebound offensive, they comit a lot of stupid turnovers, they dropped a 20 point lead, they have been horrible. the Knicks simply manage to have been worse in the last two games (and in game 1). I am a Knick Fan and a diehard fan of Melo but this series has been embaracing,.. yeah, no credit to how the celtics played the last two games… the knicks went so bad that even milwaukee can do the same thing… it takes two to tango bro. Celtics are a great team and doing well without Rondo, but after this season they are not going to be as great like the last couplemofmseasons. I give them respect for playing with nothing to lose but it’s the Knicks time now. Even if celtics lose the knicks can’t beat the pacers and won’t be the heat especially lebron in playoff mode. With clowns like J.R smith and Carmelo leading the knicks u know nothing is guaranteed except that the one sure thing, that these guys aint championship material. Just keeping the faith bro. Whatever happens, NYK would’ve probably learned after the series why they aren’t considered a winning team just yet. I’m just proud of the Celtics that despite being overmatched, they still are able to make the Knicks pee their pants (LOL). One thing is for sure in this series, Boston will keep swinging til the final buzzer. game 6 on the home turf for the lagging opponents. Both celtics/houston will likely win game 6 and force 7! they both have momentum and both top teams have shown vulnerability due to lack of variety! These 4 teams and with some effort from clippers and golden gate will make first round playoff one of the best in a long time. Contrary to everybody’s belief( including me) that will be totally lopsided 1st round! Every game is a grind for the Celtics. They hv to execute 85%+ in order to win, both off & esp def. If they didn’t hv the system & just relied on super star individual plays, they would not hv enjoyed success in playoffs. Though Celtics did hit quite a no of difficult & clutch shots, from PP, Green (2 3’s), KG, Terry. The main issue is are they able to keep the defense up to championship level, & hv more guys producing if they’re in the rotation, while Melo & JR miss their wild attempts. these 2 guys do or die by the wild shot attempts. Is it enough to stop everyone else? I’m glad the Celtics didn’t fall into the trap of trying to do too much for the “funeral look” by the Knicks. I hope Crawford also don’t fall into the antics which Felton did after the game finished, as he can easily make mistakes on court because of this. For a basketball fan like me, this series is so amusing and interesting. The Knicks, with all their drama, desperately need to get out of the first round and all those fun facts that keep popping up make it so amusing. Melo has only won 2 playoff series and lost 9, he made it out of the 1st round only once in his career. The Knicks haven’t won a playoff series in 13 years. They were up 3-0 and no team has ever come back from a 3-0 deficit to win the series in the NBA. i have faith in them… GO CELTICS!!!! Don’t Let The CELTICS Win! New York Knicks looks like idiots now, after that whole funeral gig. Jesus Christ, they have to be the dumbest team in the history of the NBA to even try that. This is the Celtics we are talking about, the single greatest NBA franschise. They are not going to lay down. Even if the Knicks win this series, I have lost all respect for the New York Knicks. And guess what, I’ve always hated the Celtics but goddamn I have respected them. Looks like Beantown might still be holding on here. And the TRUTH hurts! Oh & let’s remember that history gets made everyday! For one-It’s Celtics pride baby, i never loose faith! Two-all jr did was piss Terry off real, real, reeeal bad as well as all the Celts & then all the black suites only just compounded That Sweet Celtics Fire-good luck w/ that ny! ONE GAME @ A TIME!!! Would have been three if traitor Allen hadn’t left. That’s why you should never go easy on veteran champions. Revenge from 2011 is what’s on the Knicks’ head. What’s on the Head of the Celtics is CHAMPIONSHIP. this is what happens to teams who have experience and will to win.. the C’s may be old but their will and experience pushes them to a whole different level.. knicks lack something like that.. they brag because they beat them 3 straight and wearing black and stuff.. HAHA! who’s laughing now KNICKS? Well, nice job contradicting yourself after the Blogtable: Being Relevant In Boston article a few hours ago, quality journalism here. Anyway, about the Knicks, I remember Barkley saying you can’t possibly keep making threes like they did in the beginning of the season, and of course he was right. Now in the playoffs, against a top defense, with added pressure, they’re much less effective on that aspect. I like Woodson because he tries to simplify the processes, but this is the time a system or designed plays make a difference, because they want to be aggressive and win, but in some situations they don’t know how, and end up committing offensive fouls. The Celtics are doing really well. It’s hard to come back from 0-3, but we’ll see. Felton has given Bradley problems, which is not common. Garnett should win Defensive Player Of The Year every season. Any one understood WTF Garnett was saying? How could you ever count out such a proud franchise? With all their injuries they still find ways to get the job done, that’s why I respect the Celtics so much. They’ve lost Rondo, Sullinger and Barbosa, three very important players. I mean, take Carmelo, Novak and another role player away from NY and they probably wouldn’t have made the playoffs. I think the Celtics could pull it off, maybe the Rockets could too, I wouldn’t be surprised if it happened. And if it does happen, I think it’s safe to say the Celtics could h ave won a championship if it wasn’t for the injuries, considering NY is one of the best teams in the NBA right now. It’s just a shame they have no respect for a team that deserves it. As Mick famously said to Rocky “I ain’t heard no bell yet”. Go C’s!! Time for a new commercial….cause that’s the definition of “Big” :-). Love,your heart, guys…always have, and always will. I don’t understand why coach Woodson does not play more minutes with Pablo Prigioni. His numbers (+/-) are always good for the team and his style complicates things for a Boston team without a point guard. Call me crazy but I think we could see the first comeback from 0-3 in the history of this league. This is not the most likely scenario but I’ve feeling Boston or Houston have a real chance to win the next two games. After three games, it was unthinkable but after five why not. I agree. Houston and Boston have a good momentum to win 4 straight. But I think Houston has a higher percentage to win it just because Westbrook isn’t playing. Well basketball was way more interesting back in the day when teams were rivals and not pals….remember those Barkley – Shaq fights? Rodman vs the world????? those were the days…now the Celtics are the only ones with rivalries resembling the old days! Yep, that’s what you gotta love about these Celtics, they’re remembrance of the 80’s and 90’s basketball, pure grit, not making friends, playing good old fashion basketball… I really admire them, even so, NY has a solid young talented team, but if they can pull a W in Boston for game 6, who knows… you could see the look in the NY players, they were defeated, they seem like they’re living a nightmare… and now, they have to go to Boston, all emotions in the building… it’s gonna be a GREAT game to watch… so far, the best series!!! Indiana is a huge mismatch for the Celtics that’s why most were rooting for an Indie/Boston 1st rd. If they ever get out of New York you can bet your life that they’re a shoe in for the ECF. I too am a Celtic fan. While you correct, in part, that they are running on heart, that’s not the only thing they’re doing. It is sheer execution. That’s it! They are playing fundamental basketball: moving the ball side-to-side, getting the highest percentage shot. Look at the scoring: it’s balanced; with multiple players in double figures. Also, the teams are getting used to each other. It took us nearly too long to get there, but we did. It’s Grind Out City from here on. Nevertheless, NY has the advantage. They have to win one, we have to win two. It’s an uphill battle. But I believe we have the intestinal-fortitude to do it. LET’S GO CELTICS!!!! it is admirable indeed but you forget… these are aging guys. If ever Boston win the series which I hope they do, they will be pretty vulnerable for the next series because of the effort they need to exert to oust the Knicks. Im not hating BTW just being real. Celtics had plenty of 7 game series in their 2008 championship run, 2010 finals run, and 2012 ECF run. Didn’t seem to slow them one bit. I love Doc but I hate Boston’s rude fan.
# -*- coding: utf-8 -*- """ Logging support with colors, just available in *unix. Borrow from `tornado.log`: https://github.com/tornadoweb/tornado/blob/master/tornado/log.py """ from __future__ import print_function, division import sys import functools import logging import logging.handlers try: import curses except ImportError: curses = None # try: # import cPickle as pickle # except ImportError: import pickle try: from cStringIO import StringIO except ImportError: from StringIO import StringIO logger = logging.getLogger("TASKQ") dumps = functools.partial(pickle.dumps, protocol=pickle.HIGHEST_PROTOCOL) loads = pickle.loads class NoSuchIdExists(Exception): pass class ClassUnpickler(object): """This class aim to resolve pickle.Unpickler.find_class. And it implement context manager protocol, `as` substatement return a Unpickler object initialize by StringIO buffer. ``` with Unpicle(s) as unpickler: obj = unpickler.load() ``` """ def __init__(self, buffer, cls): self._f = StringIO(buffer) self._cls = cls def __enter__(self): def _resolve_class(module, name): return self._cls unpickler = pickle.Unpickler(self._f) unpickler.find_class = _resolve_class return unpickler def __exit__(self, exc_type, exc_value, exc_traceback): if hasattr(self, '_f'): self._f.close() def singleton(cls): instance = cls() instance.__call__ = lambda: instance return instance def enable_pretty_logging(log_level="info", logger=logger): logger.setLevel(getattr(logging, log_level.upper())) stream_handler = logging.StreamHandler() stream_handler.setFormatter(LogFormatter()) logger.addHandler(stream_handler) class LogFormatter(logging.Formatter): DEFAULT_FORMAT = "%(color)s[%(levelname)1.1s %(asctime)s]%(end_color)s %(message)s" DEFAULT_DATE_FORMAT = "%y-%m-%d %H:%M:%S" DEFAULT_COLORS = { logging.DEBUG: 4, # Blue logging.INFO: 2, # Green logging.WARNING: 3, # Yellow logging.ERROR: 1, # Red } def __init__(self, color=True, fmt=DEFAULT_FORMAT, datefmt=DEFAULT_DATE_FORMAT, colors=DEFAULT_COLORS): logging.Formatter.__init__(self, datefmt=datefmt) self._fmt = fmt self._colors = {} if color and _stderr_supports_color(): fg_color = (curses.tigetstr("setaf") or curses.tigetstr("setf") or "") for levelno, code in colors.items(): self._colors[levelno] = unicode(curses.tparm(fg_color, code), "ascii") self._normal = unicode(curses.tigetstr("sgr0"), "ascii") else: self._normal = "" def format(self, record): try: message = record.getMessage() assert isinstance(message, basestring) record.message = _safe_unicode(message) except Exception as e: record.message = "Bad message (%s): %r" % (e, record.__dict__) record.asctime = self.formatTime(record, self.datefmt) if record.levelno in self._colors: record.color = self._colors[record.levelno] record.end_color = self._normal else: record.color = record.end_color = "" formatted = self._fmt % record.__dict__ if record.exc_info: if not record.exc_text: record.exc_text = self.formatException(record.exc_info) if record.exc_text: lines = [formatted.rstrip()] lines.extend(_safe_unicode(ln) for ln in record.exc_text.split('\n')) formatted = '\n'.join(lines) return formatted.replace('\n', '\n ') def _stderr_supports_color(): color = False if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty(): try: curses.setupterm() if curses.tigetnum("colors") > 0: color = True except Exception: pass return color def _safe_unicode(s): if isinstance(s, (unicode, type(None))): return s if not isinstance(s, bytes): raise TypeError("Excepted bytes, unicode, None; got %r" % type(s)) try: return s.decode("utf-8") except UnicodeDecodeError: return repr(s) if __name__ == "__main__": enable_pretty_logging() print(_stderr_supports_color()) print(hasattr(sys.stderr, 'isatty')) print(curses) try: 1 / 0 except ZeroDivisionError: # logger.error("error", exc_info=sys.exc_info()) logger.error("error", exc_info=True)
Treat your table to a little decadence with the Walter G Caviar napkins. This design has been hand block printed in India. This style will not be repeated this year, we have 2 units remaning.
from __future__ import unicode_literals from django.db import models from authentication.models import User, Usergroup from compute.models import Computer from image.models import Image, Imagerevision from managementnode.models import Managementnode from core.models import State class Request(models.Model): stateid = models.ForeignKey(State, db_column='stateid', related_name="rel_si") userid = models.ForeignKey(User, db_column='userid') laststateid = models.ForeignKey(State, db_column='laststateid', related_name="rel_laststateid" ) logid = models.IntegerField() forimaging = models.IntegerField() test = models.IntegerField() preload = models.IntegerField() start = models.DateTimeField() end = models.DateTimeField() daterequested = models.DateTimeField() datemodified = models.DateTimeField(blank=True, null=True) checkuser = models.IntegerField() class Meta: db_table = 'request' class Serverrequest(models.Model): name = models.CharField(max_length=255) serverprofileid = models.SmallIntegerField() requestid = models.OneToOneField(Request, db_column='requestid') fixedip = models.CharField(db_column='fixedIP', max_length=15, blank=True, null=True) # Field name made lowercase. fixedmac = models.CharField(db_column='fixedMAC', max_length=17, blank=True, null=True) # Field name made lowercase. admingroupid = models.ForeignKey(Usergroup, db_column='admingroupid', blank=True, null=True, related_name="rel_agi") logingroupid = models.ForeignKey(Usergroup, db_column='logingroupid', blank=True, null=True, related_name="rel_login") monitored = models.IntegerField() class Meta: db_table = 'serverrequest' # Create your models here. class Reservation(models.Model): requestid = models.ForeignKey(Request, db_column='requestid') computerid = models.ForeignKey(Computer, db_column='computerid') imageid = models.ForeignKey(Image, db_column='imageid') imagerevisionid = models.ForeignKey(Imagerevision, db_column='imagerevisionid') managementnodeid = models.ForeignKey(Managementnode, db_column='managementnodeid') remoteip = models.CharField(db_column='remoteIP', max_length=15, blank=True, null=True) # Field name made lowercase. lastcheck = models.DateTimeField(blank=True, null=True) pw = models.CharField(max_length=40, blank=True, null=True) connectip = models.CharField(db_column='connectIP', max_length=15, blank=True, null=True) # Field name made lowercase. connectport = models.SmallIntegerField(blank=True, null=True) class Meta: db_table = 'reservation' class Reservationaccounts(models.Model): reservationid = models.ForeignKey(Reservation, db_column='reservationid') userid = models.ForeignKey(User, db_column='userid') password = models.CharField(max_length=50, blank=True, null=True) class Meta: db_table = 'reservationaccounts' unique_together = (('reservationid', 'userid'),) class Blockrequest(models.Model): name = models.CharField(max_length=80) imageid = models.ForeignKey(Image, db_column='imageid') nummachines = models.IntegerField(db_column='numMachines') # Field name made lowercase. groupid = models.ForeignKey(Usergroup, db_column='groupid', blank=True, null=True) repeating = models.CharField(max_length=7) ownerid = models.ForeignKey(User, db_column='ownerid') managementnodeid = models.ForeignKey(Managementnode, db_column='managementnodeid', blank=True, null=True) expiretime = models.DateTimeField(db_column='expireTime') # Field name made lowercase. processing = models.IntegerField() status = models.CharField(max_length=9) comments = models.TextField(blank=True, null=True) class Meta: db_table = 'blockRequest' class Blocktimes(models.Model): blockrequestid = models.ForeignKey(Blockrequest, db_column='blockRequestid') # Field name made lowercase. start = models.DateTimeField() end = models.DateTimeField() processed = models.IntegerField() skip = models.IntegerField() class Meta: db_table = 'blockTimes' class Blockcomputers(models.Model): blocktimeid = models.ForeignKey(Blocktimes, db_column='blockTimeid') # Field name made lowercase. computerid = models.ForeignKey(Computer, db_column='computerid') imageid = models.ForeignKey(Image, db_column='imageid') reloadrequestid = models.IntegerField() class Meta: db_table = 'blockComputers' unique_together = (('blocktimeid', 'computerid'),) class Blockwebdate(models.Model): blockrequestid = models.ForeignKey(Blockrequest, db_column='blockRequestid') # Field name made lowercase. start = models.DateField() end = models.DateField() days = models.IntegerField(blank=True, null=True) weeknum = models.IntegerField(blank=True, null=True) class Meta: db_table = 'blockWebDate' class Blockwebtime(models.Model): blockrequestid = models.ForeignKey(Blockrequest, db_column='blockRequestid') # Field name made lowercase. starthour = models.IntegerField() startminute = models.IntegerField() startmeridian = models.CharField(max_length=2) endhour = models.IntegerField() endminute = models.IntegerField() endmeridian = models.CharField(max_length=2) order = models.IntegerField() class Meta: db_table = 'blockWebTime'
Seriously Joking: Is Cholesterol really the culprit it is made out to be? T, can you please translate all this wonderful information to English for me? Thanks! And btw, who's this Stephania you're so interested in? Does Nishrin know?
# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import json from tests.api import test_api class TestAPIReleases(test_api.TestAPI): def test_releases_empty(self): with test_api.make_runtime_storage({}): response = self.app.get('/api/1.0/releases') self.assertEqual(200, response.status_code) def test_releases(self): with test_api.make_runtime_storage( {'releases': [ {'release_name': 'prehistory', 'end_date': 1365033600}, {'release_name': 'havana', 'end_date': 1381968000}, {'release_name': 'icehouse', 'end_date': 1397692800}]}): response = self.app.get('/api/1.0/releases') releases = json.loads(response.data)['releases'] self.assertEqual(3, len(releases)) self.assertIn({'id': 'all', 'text': 'All'}, releases) self.assertIn({'id': 'icehouse', 'text': 'Icehouse'}, releases) def test_releases_search(self): with test_api.make_runtime_storage( {'releases': [ {'release_name': 'prehistory', 'end_date': 1365033600}, {'release_name': 'havana', 'end_date': 1381968000}, {'release_name': 'icehouse', 'end_date': 1397692800}]}): response = self.app.get('/api/1.0/releases?query=hav') releases = json.loads(response.data)['releases'] self.assertEqual(1, len(releases)) self.assertIn({'id': 'havana', 'text': 'Havana'}, releases) def test_release_details(self): with test_api.make_runtime_storage( {'releases': [ {'release_name': 'prehistory', 'end_date': 1365033600}, {'release_name': 'icehouse', 'end_date': 1397692800}]}): response = self.app.get('/api/1.0/releases/icehouse') release = json.loads(response.data)['release'] self.assertEqual({'id': 'icehouse', 'text': 'Icehouse'}, release)
Reagan’s record on immigration is a poor one. He signed legislation in 1986 that granted amnesty to millions of illegal aliens but also included provisions to prevent future illegal immigration. Predictably, the grant of amnesty “succeeded” while the preventative measures turned out to be a joke. When Reagan signed the Immigration Reform and Control Act back in 1986, he had no reason to suppose that only the “reform” and not the “control” provisions would ever be put into effect. The legislation was tightly drawn. And the new enforcement provisions that it mandated were aggressive–remarkably so, I thought when I looked them over. Perhaps most notably, the legislation required employers throughout the country to verify their workers’ legal status, thus supplementing the single line at the border with a defense in depth. It’s always preferable to agree, rather than disagree, with Peter Robinson. So I’m happy to see that we agree as to what position Ronald Reagan would take today regarding immigration reform. We both think Reagan would insist that the federal government enforce existing immigration laws before enacting new statutes that grant amnesty. Accordingly, we agree that Reagan would have opposed President Bush’s 2006 proposed comprehensive immigration reform proposal. Peter and I also agree that the 1986 immigration reform law that Reagan signed into law was a failure. The “liberal” part of it succeeded; millions of illegal immigrants received amnesty. However, the “conservative” part failed; the provisions designed to prevent future illegal immigration were not enforced. Where Peter and I disagree is on the question of whether, or to what extent, this failure was predictable. I think it was. However, in fairness to Peter, and to Reagan, I admit that things typically seem predictable after they occur. Yet, it’s the lessons I learned from Reagan and some of the intellectuals associated with him that make the failure of the 1986 law seem so unsurprising. One such lesson is that the government is very good at conferring status, privileges, and benefits but not very good at administering programs. Earlier this week, I read that one of the agencies involved in the Gulf Coast disaster planning approved a plan that includes details for protecting the Gulf’s walrus population and instructions for contacting a scientist who has been dead for five years. This kind of story was the staple of many a Reagan speech on “the rubber chicken circuit” during the 1960s and 1970s. Why, then, was he confident that the government would effectively enforce the immigration laws? Such confidence seems all the more misplaced one when considers that, unlike disaster planning, immigration enforcement has an ideological dimension. It’s difficult enough for the government to administer complex programs when acting in good faith. When liberal bureaucrats have ideological reservations about what they are asked to do, as I believe was the case with the 1986 legislation, the prospects for success are bleak. Perhaps the Reagan administration in its prime could have ridden herd over the immigration bureaucracy. But the administration was on its way out when the time came to enforce the 1986 act. And it was predictable that a successor administration would indulge in benign neglect or worse. Peter notes that the 1986 legislation was “tightly drawn” and adequately funded. But from Reagan and the Reaganites I learned to be skeptical of claims that tightly drawn laws and lots of funding will affect the world in the ways they are intended to (the law of unintended consequences, and all that). Why wasn’t there more such skepticism when it came to this law? My guess is that the admirable humanitarian side of Reagan got the better of the Reagan conservative in this instance. Peter cites the portion of the 1986 law that required employers to verify their workers’ legal status. This portion of the law ran counter to both the desire of corporate America to hire whom it pleased without risking penalties and the sensibilities of liberal America, which sympathized profoundly with illegal aliens who wanted to work. As such, it was a long shot, at best. In 1986, as a practitioner of both employment law and (to a slight extent) immigration law, I heard plenty of skepticism from employers about whether employer sanctions would become an entrenched part of the true legal landscape. That skepticism was well-founded. I agree with Peter about the enduring relevance of Reagan. But when it comes to immigration reform, I believe that underlying Reaganite theories about the limits of government are a better guide than his 1986 position on the specific issue at hand.
# # Copyright 2015,2016,2017 Joseph C. Pietras # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function import os import sys import re from setuptools import setup def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() def theVersion(): vfile = "svnplus/tagprotect.py" # relative path , file with version string verStr = "" f = open(os.path.join(os.path.dirname(__file__), vfile)) if f: regex = re.compile(r'^VERSION\s*=\s*') for line in f: if regex.match(line) is not None: #print('line: {0}'.format(line), file=sys.stderr) verStr = re.sub(r'\s*$', r'', line) #print('verStr: {0}'.format(verStr), file=sys.stderr) verStr = re.sub(r'^VERSION\s*=\s*', r'', verStr) #print('verStr: {0}'.format(verStr), file=sys.stderr) verStr = re.sub(r'^"(.+)"$', r'\1', verStr) #print('verStr: {0}'.format(verStr), file=sys.stderr) verStr = re.sub(r"^'(.+)'$", r'\1', verStr) #print('verStr: {0}'.format(verStr), file=sys.stderr) break else: print('failed to open will NOT read', file=sys.stderr) if verStr != "": print('version is: {0} from file "{1}"'.format(verStr, vfile), file=sys.stderr) return verStr exit(1) setup(name='svnplus', author_email='joseph.pietras@gmail.com', author='Joseph C. Pietras', classifiers=['Intended Audience :: Information Technology', 'License :: OSI Approved :: Apache Software License', 'Natural Language :: English'], data_files=[ ("/root/svnplus", ['LICENSE', 'pre-commit', 'pre-commit.conf']) ], description='''This is a subversion hook. It provides a protection mechanism for subversion repositories so that previously committed "tags" are immutable.''', include_package_data=True, keywords='subversion hook tagprotect immutable', license='Apache Software License 2.0', long_description=read('README'), packages=['svnplus'], url='https://github.com/ossCare/svnPlus', version=theVersion(), zip_safe=False)
Distinction Nuit from Eprise by Lise Charmel is new for winter 2018. Beautiful saphir blue embroidered over black with liberal use of beautiful lace. A stunning addition to any winter lingerie collection. One colour "Nuit Saphir" a stunning combination of blue on black.
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('core', '0002_auto_20150428_1430'), ] operations = [ migrations.CreateModel( name='Satellite', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('version', models.IntegerField(default=1)), ('name', models.CharField(max_length=140)), ('nickname', models.CharField(max_length=140)), ('tle', models.FileField(null=True, upload_to='')), ('status', models.IntegerField(default=0, max_length=2, choices=[(0, 'operation status unknown'), (1, 'operational'), (2, 'non operational'), (3, 'partially operational'), (4, 'on standby'), (5, 'spare'), (6, 'extended mission')])), ], options={ }, bases=(models.Model,), ), ]
Watkins Poultry Merchants of New York is proud to raise and supply a large variety of birds to local live markets. Our unique poultry variety helps meet the diverse culinary needs of New York City and its neighbors. Watkins Poultry birds are NEVER given any growth stimulant or hormone- they are all-natural! Our poultry variety meets the needs of diverse culinary traditions. Send your zip code to info@watkinspoultry.com and we will recommend a live poultry market in your area that carries our variety. Our exclusive breed of red chicken is savory and juicy, with good portions of flavorful meat from the breast, wings, and thighs. The Sasso Red Chicken is raised slowly to 3lbs, 5lbs and 8lbs, so perfectly-sized chickens are always available. Discover the difference on The Sasso Red Chicken page. The Little Pilgrim Turkey is an American heritage breed. What makes them unique is that they grow slowly to 12lbs, providing delicious and healthy lean turkey meat all year. The Classic White Chicken is a popular favorite because of its hefty portions of white breast-meat. These chickens are grown naturally to 3lbs, 5lbs and 8lbs, so perfectly-sized chickens are always available. Nothing brings people together like turkey dinner. We grow our turkeys from 10lbs to 25lbs all year long, so a perfectly delicious turkey is always available. View the Standard Turkey gallery. They may look exotic, but Silkies are a commonly cooked chicken. Underneath the Silkie’s distinct fluffy white feathers are skin, meat and bones that have a unique bluish-black hue. Silkies have long been believed in Chinese medicine to have healing properties, but their intense chicken flavor is reason enough to make a delicious soup or dish any time! View the Silkie gallery. Gourmet is easy when this 2lb chicken is brought home fresh from the market. With a taste that is somewhere between chicken and turkey, Guinea Hens enhance the best braises and sauces for a meal that will impress the best of cooks. View the Guinea Hen gallery. Coturnix Quail have tender, flavorful meat and also produce many eggs that are used for a variety of gourmet dishes. Watkins Poultry Merchants supplies both the bird and its eggs. View the Coturnix Quail gallery. Because of their lean dark meat, the Muscovy duck is sometimes considered an excellent alternative to veal. With sizes ranging from 5lbs for hens and 10lbs for drakes, the perfect Muscovy duck is always available. View the Muscovy Duck gallery. This duck is excellent for two reasons. Its small 4lb size makes it a convenient easy-to-cook bird, and the hens naturally lay many eggs that are twice the size of chicken eggs. Watkins Poultry Merchants supplies both the Khaki Campbell Duck and its eggs. View the Khaki Campbell “Water” Duck gallery. Red fowl is a common name for egg-laying hens. Their extra-firm meat is perfect for soup-making and slow-cooking because it never loses its taste or texture. View the Red Fowl gallery.
from __future__ import unicode_literals import time import hmac import hashlib import itertools from ..utils import ( ExtractorError, int_or_none, parse_age_limit, parse_iso8601, ) from .common import InfoExtractor class VikiBaseIE(InfoExtractor): _VALID_URL_BASE = r'https?://(?:www\.)?viki\.(?:com|net|mx|jp|fr)/' _API_QUERY_TEMPLATE = '/v4/%sapp=%s&t=%s&site=www.viki.com' _API_URL_TEMPLATE = 'http://api.viki.io%s&sig=%s' _APP = '65535a' _APP_VERSION = '2.2.5.1428709186' _APP_SECRET = '-$iJ}@p7!G@SyU/je1bEyWg}upLu-6V6-Lg9VD(]siH,r.,m-r|ulZ,U4LC/SeR)' def _prepare_call(self, path, timestamp=None): path += '?' if '?' not in path else '&' if not timestamp: timestamp = int(time.time()) query = self._API_QUERY_TEMPLATE % (path, self._APP, timestamp) sig = hmac.new( self._APP_SECRET.encode('ascii'), query.encode('ascii'), hashlib.sha1 ).hexdigest() return self._API_URL_TEMPLATE % (query, sig) def _call_api(self, path, video_id, note, timestamp=None): resp = self._download_json( self._prepare_call(path, timestamp), video_id, note) error = resp.get('error') if error: if error == 'invalid timestamp': resp = self._download_json( self._prepare_call(path, int(resp['current_timestamp'])), video_id, '%s (retry)' % note) error = resp.get('error') if error: self._raise_error(resp['error']) return resp def _raise_error(self, error): raise ExtractorError( '%s returned error: %s' % (self.IE_NAME, error), expected=True) class VikiIE(VikiBaseIE): IE_NAME = 'viki' _VALID_URL = r'%s(?:videos|player)/(?P<id>[0-9]+v)' % VikiBaseIE._VALID_URL_BASE _TESTS = [{ 'url': 'http://www.viki.com/videos/1023585v-heirs-episode-14', 'info_dict': { 'id': '1023585v', 'ext': 'mp4', 'title': 'Heirs Episode 14', 'uploader': 'SBS', 'description': 'md5:c4b17b9626dd4b143dcc4d855ba3474e', 'upload_date': '20131121', 'age_limit': 13, }, 'skip': 'Blocked in the US', }, { # clip 'url': 'http://www.viki.com/videos/1067139v-the-avengers-age-of-ultron-press-conference', 'md5': '86c0b5dbd4d83a6611a79987cc7a1989', 'info_dict': { 'id': '1067139v', 'ext': 'mp4', 'title': "'The Avengers: Age of Ultron' Press Conference", 'description': 'md5:d70b2f9428f5488321bfe1db10d612ea', 'duration': 352, 'timestamp': 1430380829, 'upload_date': '20150430', 'uploader': 'Arirang TV', 'like_count': int, 'age_limit': 0, } }, { 'url': 'http://www.viki.com/videos/1048879v-ankhon-dekhi', 'info_dict': { 'id': '1048879v', 'ext': 'mp4', 'title': 'Ankhon Dekhi', 'duration': 6512, 'timestamp': 1408532356, 'upload_date': '20140820', 'uploader': 'Spuul', 'like_count': int, 'age_limit': 13, }, 'params': { # m3u8 download 'skip_download': True, } }, { # episode 'url': 'http://www.viki.com/videos/44699v-boys-over-flowers-episode-1', 'md5': '190f3ef426005ba3a080a63325955bc3', 'info_dict': { 'id': '44699v', 'ext': 'mp4', 'title': 'Boys Over Flowers - Episode 1', 'description': 'md5:52617e4f729c7d03bfd4bcbbb6e946f2', 'duration': 4155, 'timestamp': 1270496524, 'upload_date': '20100405', 'uploader': 'group8', 'like_count': int, 'age_limit': 13, } }, { # youtube external 'url': 'http://www.viki.com/videos/50562v-poor-nastya-complete-episode-1', 'md5': '216d1afdc0c64d1febc1e9f2bd4b864b', 'info_dict': { 'id': '50562v', 'ext': 'mp4', 'title': 'Poor Nastya [COMPLETE] - Episode 1', 'description': '', 'duration': 607, 'timestamp': 1274949505, 'upload_date': '20101213', 'uploader': 'ad14065n', 'uploader_id': 'ad14065n', 'like_count': int, 'age_limit': 13, } }, { 'url': 'http://www.viki.com/player/44699v', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) video = self._call_api( 'videos/%s.json' % video_id, video_id, 'Downloading video JSON') title = None titles = video.get('titles') if titles: title = titles.get('en') or titles[titles.keys()[0]] if not title: title = 'Episode %d' % video.get('number') if video.get('type') == 'episode' else video.get('id') or video_id container_titles = video.get('container', {}).get('titles') if container_titles: container_title = container_titles.get('en') or container_titles[container_titles.keys()[0]] title = '%s - %s' % (container_title, title) descriptions = video.get('descriptions') description = descriptions.get('en') or descriptions[titles.keys()[0]] if descriptions else None duration = int_or_none(video.get('duration')) timestamp = parse_iso8601(video.get('created_at')) uploader = video.get('author') like_count = int_or_none(video.get('likes', {}).get('count')) age_limit = parse_age_limit(video.get('rating')) thumbnails = [] for thumbnail_id, thumbnail in video.get('images', {}).items(): thumbnails.append({ 'id': thumbnail_id, 'url': thumbnail.get('url'), }) subtitles = {} for subtitle_lang, _ in video.get('subtitle_completions', {}).items(): subtitles[subtitle_lang] = [{ 'ext': subtitles_format, 'url': self._prepare_call( 'videos/%s/subtitles/%s.%s' % (video_id, subtitle_lang, subtitles_format)), } for subtitles_format in ('srt', 'vtt')] result = { 'id': video_id, 'title': title, 'description': description, 'duration': duration, 'timestamp': timestamp, 'uploader': uploader, 'like_count': like_count, 'age_limit': age_limit, 'thumbnails': thumbnails, 'subtitles': subtitles, } streams = self._call_api( 'videos/%s/streams.json' % video_id, video_id, 'Downloading video streams JSON') if 'external' in streams: result.update({ '_type': 'url_transparent', 'url': streams['external']['url'], }) return result formats = [] for format_id, stream_dict in streams.items(): height = self._search_regex( r'^(\d+)[pP]$', format_id, 'height', default=None) for protocol, format_dict in stream_dict.items(): if format_id == 'm3u8': formats = self._extract_m3u8_formats( format_dict['url'], video_id, 'mp4', m3u8_id='m3u8-%s' % protocol) else: formats.append({ 'url': format_dict['url'], 'format_id': '%s-%s' % (format_id, protocol), 'height': height, }) self._sort_formats(formats) result['formats'] = formats return result class VikiChannelIE(VikiBaseIE): IE_NAME = 'viki:channel' _VALID_URL = r'%s(?:tv|news|movies|artists)/(?P<id>[0-9]+c)' % VikiBaseIE._VALID_URL_BASE _TESTS = [{ 'url': 'http://www.viki.com/tv/50c-boys-over-flowers', 'info_dict': { 'id': '50c', 'title': 'Boys Over Flowers', 'description': 'md5:ecd3cff47967fe193cff37c0bec52790', }, 'playlist_count': 70, }, { 'url': 'http://www.viki.com/tv/1354c-poor-nastya-complete', 'info_dict': { 'id': '1354c', 'title': 'Poor Nastya [COMPLETE]', 'description': 'md5:05bf5471385aa8b21c18ad450e350525', }, 'playlist_count': 127, }, { 'url': 'http://www.viki.com/news/24569c-showbiz-korea', 'only_matching': True, }, { 'url': 'http://www.viki.com/movies/22047c-pride-and-prejudice-2005', 'only_matching': True, }, { 'url': 'http://www.viki.com/artists/2141c-shinee', 'only_matching': True, }] _PER_PAGE = 25 def _real_extract(self, url): channel_id = self._match_id(url) channel = self._call_api( 'containers/%s.json' % channel_id, channel_id, 'Downloading channel JSON') titles = channel['titles'] title = titles.get('en') or titles[titles.keys()[0]] descriptions = channel['descriptions'] description = descriptions.get('en') or descriptions[descriptions.keys()[0]] entries = [] for video_type in ('episodes', 'clips', 'movies'): for page_num in itertools.count(1): page = self._call_api( 'containers/%s/%s.json?per_page=%d&sort=number&direction=asc&with_paging=true&page=%d' % (channel_id, video_type, self._PER_PAGE, page_num), channel_id, 'Downloading %s JSON page #%d' % (video_type, page_num)) for video in page['response']: video_id = video['id'] entries.append(self.url_result( 'http://www.viki.com/videos/%s' % video_id, 'Viki')) if not page['pagination']['next']: break return self.playlist_result(entries, channel_id, title, description)
It’s easy to peel biscuits from a sheet of baking paper – each has a curve around its flat bottom where fat has greased the base. Similar physics shaped these ‘supraparticles’, each 100 times smaller than an Amaretti biscuit, yet made from a blob of nanoparticles mixed with a secret ingredient – the Greek spirit Ouzo. As water and alcohol in the mixture evaporate, anise oil – which gives Ouzo its aniseed flavour – lubricates the base of the particle. Each perfect macaroon-like shape can be lifted away without the risk of sticking or 'pinning' which might otherwise spoil its structure. Tweaking the balance of oil in the mixture creates different hollows within the porous particles, which could soon be piped full of drug compounds for delivery inside the body.
class Queue(object): def __init__(self): """ initialize your data structure here. """ self.stack1 = [] self.stack2 = [] def push(self, x): """ :type x: int :rtype: nothing """ self.stack1.append(x) def pop(self): """ :rtype: nothing """ if not len(self.stack2): while len(self.stack1): self.stack2.append(self.stack1.pop()) self.stack2.pop() def peek(self): """ :rtype: int """ if not len(self.stack2): while len(self.stack1): self.stack2.append(self.stack1.pop()) return self.stack2[-1] def empty(self): """ :rtype: bool """ return len(self.stack1) == 0 and len(self.stack2) ==0 if __name__ == '__main__': q = Queue() # q.push(1) # q.push(2) # print q.peek() # q.push(3) # print q.peek() # q.push(1) # q.push(2) # print q.peek() q.push(1) print q.peek()
How can you boost sales of your products with our Premium Private Affiliate Program? Would you like to promote your products using the most effective form of online marketing, without having to invest a lot of money upfront or employ additional staff, but you have no idea where to start? Read six of the most common problems of entrepreneurs who are looking into promoting their products using affiliate marketing and find out how we can help you solve them. Take advantage of the sophisticated offer of Affee360, created for entrepreneurs who want to take their sales to the next level and reach with your offer to thousands of new clients. Problem One: Lack of awareness and most basic knowledge about Affiliate Marketing. For many entrepreneurs, affiliate marketing…. Doesn’t really exist! A lot of them have no idea not only about affiliate marketing itself but also about the potential of promoting a product with the help of affiliates – not to mention that it could help them sell their goods and services outside of their home market. Moreover, many business owners believe that running an affiliate program requires a massive investment and employing many new employees. Solution: If you don’t know how to go about affiliate marketing, contact us – we will get you through the whole process, step-by-step, helping you find publishers who will promote your products and attract a lot of new clients to your business. We will also evaluate your products and prepare a personalised marketing strategy, tailored specifically to your business. We offer you a full affiliate program management, from start to finish. Most of the platforms that provide affiliate services leave entrepreneurs without any guidance or help. The most common form of “promotion” within the platform are newsletters containing the newest offers, which are sent to all affiliates without any extra targeting. Unfortunately, that’s not enough to encourage the best affiliates to promote your product or service. This leads to a situation in which your product gets lost in the pile of other offers, and lack of any marketing activity and statistics discourages affiliates from promoting it. Solution: At Affee360 we make sure that every product reaches the right publishers. Our employees take of introducing business owners and their products to publishers – thanks to that, your offer will always be actively promoted, and you will get new clients. Problem 3: The product is “unfit” for Affiliate Marketing. Many business people would like to take advantage of possibilities that come with affiliate marketing. Unfortunately, on a typical platform, their products are ignored, because they sell rarely – usually, this is caused by the cost (it’s harder to sell upscale products online, even despite a higher commission) or the character of the niche. Publishers prefer products that are affordable and generate much more frequent sales. Solution: Thanks to years of experience of our team, we are capable of evaluating each product and introducing it to appropriate publishers. This allows us to find people who will be interested in promoting even those products which by inexperienced marketers could be seen as highly unattractive. Some of the publishers use unethical promotion methods – they resort to spam or create sites with little to no quality content which are then artificially boosted in search engine results, usually using blackhat SEO techniques. Others, create websites full of fake reviews or develop “review” videos that could potentially harm your brand. Solution: Strict selection of publishers who want to promote your product. At Affee360, we value professionalism – we work with big online websites as well as companies and people who are reputable in the industry. We know how much work you put into creating your brand, that’s why we allow only ethical and legal forms of promotion. Your product is good. It’s excellent. Unfortunately, only you and a bunch of your clients who have used it so far, know it. Publishers who you contact are not interested in taking the risk and, as a result, you are unable to find anyone willing to promote it. This stops you from expanding your business and achieving the economy of scale, what would be possible if there were quality publishers behind your product. You waste both your money and time. Solution: At Affee360 we evaluate which products can take advantage of affiliate marketing. Using our network of publishers, we are capable of quickly finding the right people to promote your product. And you will save a lot of time, which you’ll be able to spend developing and enhancing your product and business. Some of the above problems, for example, low quality of service in an open affiliate platform, could be overcome with an in-house affiliate system. Unfortunately, unless you are a big enterprise with an established reputation, finding publishers (especially those that really know how to convert their audience) who would be willing to promote your product is nearly impossible. That’s why most in-house affiliate programs are closed down even before they really started. The second prevalent problem is the lack of experienced staff to manage such program. Contrary to what some people think, it’s very complicated and experience demanding process, both when it comes to sales and marketing. Solution: Leave your affiliate strategy to our team and sign up for the Premium Private Affiliate Program. By registering and getting approved, you don’t have to create any affiliate infrastructure or employ dedicated employees to manage your in-house affiliate system. You don’t have to lose resources to get in touch with potential publishers either. With Affee360, you get the full package -everything, that you have to do is make sure you can deal with increased sales of your products. Why Affee360 and our Premium Private Affiliate Program? Complete, premium-level sales and management services. We will match the best publishers to your products and make sure that you get both the right communication and marketing materials that will guarantee high sales of your product. Help with expansion into new markets and access to thousands of new clients. Thanks to our industry contacts and experience, we can help you get across publishers (and thanks to them – across new customers) around the world. – Brand protection – We diligently analyse the forms of promotion used by publishers who work with us. This way, we can be sure that your brand will not be harmed. Instead, it will become recognisable and benefit from appearing in reputable online marketing channels. Win-win principles in negotiations – we believe that the foundation of every business is keeping long-lasting and transparent relations with stakeholders. That’s why, all negotiations between entrepreneurs and publishers are conducted with the win-win principles in mind so, that the conditions of cooperation are beneficial for both parties. Competitor analysis – Your competitors never sleep. That’s why we take a closer look at the strategies and actions of other companies. This way we can be one step ahead. We will take your business to the world of the most profitable form of online promotion. We are 100% confident that you want your business to grow and your profits to increase. Apply now to the Premium Private Affiliate Program. We will take you through the registration process step-by-step and a manager assigned to you will soon analyse your offer and help you get your first publishers, eager to promote your product. Are you ready for new clients?
"""This is page for showing the high scores""" import pygame import sys import os import serge.visual import serge.actor import serge.common import serge.sound import serge.engine import serge.blocks.visualblocks import serge.blocks.scores import game import common from theme import get as G ### The high score table ### class ScoresPage(serge.actor.Actor, common.MoveableBackground): """Show the scores page""" def __init__(self, game, world): """Initialise the page""" super(ScoresPage, self).__init__('scores') # self.addLogo(world) # self.actions = serge.blocks.layout.HorizontalBar('actions', height=100) self.actions.moveTo(320, 400) self.actions.setLayerName('ui') world.addActor(self.actions) # #b = serge.actor.Actor('button', 'return') #b.visual = serge.blocks.visualblocks.SpriteText('Return', BUTTON_TEXT, 'button_back', font_size=BIG_TEXT) #b.linkEvent('left-click', self.handleReturn) #self.actions.addActor(b) # b = serge.actor.Actor('button', 'play') b.visual = serge.blocks.visualblocks.SpriteText('Play', G('text-button-colour'), 'button_back', font_size=G('large-text-size')) b.linkEvent('left-click', self.handlePlay) self.actions.addActor(b) # b = serge.actor.Actor('button', 'reset') b.visual = serge.blocks.visualblocks.SpriteText('Reset', G('text-button-colour'), 'button_back', font_size=G('large-text-size')) b.linkEvent('left-click', self.handleReset) self.actions.addActor(b) # b = serge.actor.Actor('button', 'quit') b.visual = serge.blocks.visualblocks.SpriteText('Quit', G('text-button-colour'), 'button_back', font_size=G('large-text-size')) b.linkEvent('left-click', self.handleQuit) self.actions.addActor(b) # self.background = serge.actor.Actor('scores-page') self.background.setSpriteName('scores-page') self.background.moveTo(320, 240) self.background.setLayerName('course') world.addActor(self.background) # # The scores self.shots = serge.blocks.layout.VerticalBar('ui-grid', width=G('score-grid-width'), height=G('score-grid-height')) self.shots.setOrigin(G('score-grid-offset-x'), G('score-grid-offset-y')) self.shots.setLayerName('ui') t = serge.actor.Actor('text', 'header') t.visual = serge.visual.Text('', G('text-button-colour'), font_size=G('normal-text-size'), justify='left') self.shots.addActor(t) # self.shots_row = [] for row in range(5): t = serge.actor.Actor('text', row) t.visual = serge.visual.Text('', G('text-button-colour'), font_size=G('large-text-size'), justify='left') self.shots.addActor(t) self.shots_row.append(t) world.addActor(self.shots) # self.game = game self.world = world # self.setUpTable() self.game_start = None # self.addEffects() def setUpTable(self): """Set up the high score table""" var = 'HOME' if not sys.platform.startswith('win') else 'HOMEPATH' self.score_filename = os.path.join(os.getenv(var), '.bogolf.scores') if os.path.isfile(self.score_filename): self.log.info('Loading scores from %s' % self.score_filename) self.table = serge.serialize.Serializable.fromFile(self.score_filename) else: self.log.info('New scores file at %s' % self.score_filename) self.resetTable() def saveTable(self): """Save the high score table""" self.table.toFile(self.score_filename) def handleReturn(self, obj, arg): """Handle that we requested to return""" serge.sound.Register.playSound('letter') self.world.getEngine().setCurrentWorldByName('end') def handlePlay(self, obj, arg): """Handle that we requested to play""" serge.sound.Register.playSound('letter') self.world.getEngine().setCurrentWorldByName('start') def handleQuit(self, obj, arg): """Handle clicking on quit""" self.log.info('Quiting now') serge.sound.Register.playSound('end-game') serge.engine.CurrentEngine().stop() def handleReset(self, obj, arg): """Handle clicking on reset""" self.log.info('Resetting high scores') serge.sound.Register.playSound('letter') self.table.resetCategory('%s - %d holes - shots' % (self.gamestart.selected_game_name, self.gamestart.selected_holes)) self.updateTable() def resetTable(self): """Reset the scores table""" self.table = serge.blocks.scores.HighScoreTable() for game in (('easy', 'medium', 'hard')): for holes in (1, 3, 6, 9, 12, 15, 18): self.table.addCategory('%s - %d holes - shots' % (game, holes), number=5, sort_columns=[1,2], directions=['descending', 'descending']) self.table.addCategory('%s - %d holes - time' % (game, holes), number=5, sort_columns=[2,1], directions=['descending', 'descending']) self.saveTable() def activateWorld(self): """When we are activated""" if self.gamestart: self.updateTable() def updateTable(self): """Update the current scores table""" for row in range(5): self.shots_row[row].setText('') results = self.table.getCategory('%s - %d holes - shots' % (self.gamestart.selected_game_name, self.gamestart.selected_holes)) for row, (name, shots, time, date) in enumerate(results): if shots == 0: result = 'Even par' else: result = '%d %s par' % (abs(shots), 'over' if shots > 0 else 'under') if self.pad.this_score == row+1: self.shots_row[row].visual.setColour((255,255,255)) else: self.shots_row[row].visual.setColour(G('text-button-colour')) self.shots_row[row].setText('%d - %s in %s seconds' % (row+1, result, self.niceTime(time)))
Surgical glue made from nanoparticles is super sticky and traceable by ultrasound. Even the least intrusive operations can damage tissue. Stitches and staples are too cumbersome for many procedures and there is a need for strong and safe surgical glues that can be seen on medical scans to help guide non-intrusive surgery. A team led by researchers from the Institute for Basic Science in Seoul made a surgical adhesive using nanoparticles of tantalum oxide, which are opaque to x-rays and ultrasounds, wrapped inside a sticky silica shell. The team used their ‘nanoglue’ to seal a punctured liver in a rabbit. The glue proved as sticky as an approved adhesive, but with fewer side-effects such as inflammation. The team also designed a fluorescent version of the nanoparticles and used them to guide lung surgery on a rat. Multipurpose nanoparticles such as these could help safely seal surgical wounds and may have a future in drug delivery and disease diagnosis.
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ovirt_vms short_description: Module to manage Virtual Machines in oVirt/RHV version_added: "2.2" author: - Ondra Machacek (@machacekondra) description: - This module manages whole lifecycle of the Virtual Machine(VM) in oVirt/RHV. - Since VM can hold many states in oVirt/RHV, this see notes to see how the states of the VM are handled. options: name: description: - Name of the Virtual Machine to manage. - If VM don't exists C(name) is required. Otherwise C(id) or C(name) can be used. id: description: - ID of the Virtual Machine to manage. state: description: - Should the Virtual Machine be running/stopped/present/absent/suspended/next_run/registered. When C(state) is I(registered) and the unregistered VM's name belongs to an already registered in engine VM in the same DC then we fail to register the unregistered template. - I(present) state will create/update VM and don't change its state if it already exists. - I(running) state will create/update VM and start it. - I(next_run) state updates the VM and if the VM has next run configuration it will be rebooted. - Please check I(notes) to more detailed description of states. - I(registered) is supported since 2.4. choices: [ absent, next_run, present, registered, running, stopped, suspended ] default: present cluster: description: - Name of the cluster, where Virtual Machine should be created. - Required if creating VM. allow_partial_import: description: - Boolean indication whether to allow partial registration of Virtual Machine when C(state) is registered. version_added: "2.4" vnic_profile_mappings: description: - "Mapper which maps an external virtual NIC profile to one that exists in the engine when C(state) is registered. vnic_profile is described by the following dictionary:" - "C(source_network_name): The network name of the source network." - "C(source_profile_name): The prfile name related to the source network." - "C(target_profile_id): The id of the target profile id to be mapped to in the engine." version_added: "2.5" cluster_mappings: description: - "Mapper which maps cluster name between VM's OVF and the destination cluster this VM should be registered to, relevant when C(state) is registered. Cluster mapping is described by the following dictionary:" - "C(source_name): The name of the source cluster." - "C(dest_name): The name of the destination cluster." version_added: "2.5" role_mappings: description: - "Mapper which maps role name between VM's OVF and the destination role this VM should be registered to, relevant when C(state) is registered. Role mapping is described by the following dictionary:" - "C(source_name): The name of the source role." - "C(dest_name): The name of the destination role." version_added: "2.5" domain_mappings: description: - "Mapper which maps aaa domain name between VM's OVF and the destination aaa domain this VM should be registered to, relevant when C(state) is registered. The aaa domain mapping is described by the following dictionary:" - "C(source_name): The name of the source aaa domain." - "C(dest_name): The name of the destination aaa domain." version_added: "2.5" affinity_group_mappings: description: - "Mapper which maps affinty name between VM's OVF and the destination affinity this VM should be registered to, relevant when C(state) is registered." version_added: "2.5" affinity_label_mappings: description: - "Mappper which maps affinity label name between VM's OVF and the destination label this VM should be registered to, relevant when C(state) is registered." version_added: "2.5" lun_mappings: description: - "Mapper which maps lun between VM's OVF and the destination lun this VM should contain, relevant when C(state) is registered. lun_mappings is described by the following dictionary: - C(logical_unit_id): The logical unit number to identify a logical unit, - C(logical_unit_port): The port being used to connect with the LUN disk. - C(logical_unit_portal): The portal being used to connect with the LUN disk. - C(logical_unit_address): The address of the block storage host. - C(logical_unit_target): The iSCSI specification located on an iSCSI server - C(logical_unit_username): Username to be used to connect to the block storage host. - C(logical_unit_password): Password to be used to connect to the block storage host. - C(storage_type): The storage type which the LUN reside on (iscsi or fcp)" version_added: "2.5" reassign_bad_macs: description: - "Boolean indication whether to reassign bad macs when C(state) is registered." version_added: "2.5" template: description: - Name of the template, which should be used to create Virtual Machine. - Required if creating VM. - If template is not specified and VM doesn't exist, VM will be created from I(Blank) template. template_version: description: - Version number of the template to be used for VM. - By default the latest available version of the template is used. version_added: "2.3" use_latest_template_version: description: - Specify if latest template version should be used, when running a stateless VM. - If this parameter is set to I(yes) stateless VM is created. type: bool version_added: "2.3" storage_domain: description: - Name of the storage domain where all template disks should be created. - This parameter is considered only when C(template) is provided. - IMPORTANT - This parameter is not idempotent, if the VM exists and you specfiy different storage domain, disk won't move. version_added: "2.4" disk_format: description: - Specify format of the disk. - If C(cow) format is used, disk will by created as sparse, so space will be allocated for the volume as needed, also known as I(thin provision). - If C(raw) format is used, disk storage will be allocated right away, also known as I(preallocated). - Note that this option isn't idempotent as it's not currently possible to change format of the disk via API. - This parameter is considered only when C(template) and C(storage domain) is provided. choices: [ cow, raw ] default: cow version_added: "2.4" memory: description: - Amount of memory of the Virtual Machine. Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB). - Default value is set by engine. memory_guaranteed: description: - Amount of minimal guaranteed memory of the Virtual Machine. Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB). - C(memory_guaranteed) parameter can't be lower than C(memory) parameter. - Default value is set by engine. cpu_shares: description: - Set a CPU shares for this Virtual Machine. - Default value is set by oVirt/RHV engine. cpu_cores: description: - Number of virtual CPUs cores of the Virtual Machine. - Default value is set by oVirt/RHV engine. cpu_sockets: description: - Number of virtual CPUs sockets of the Virtual Machine. - Default value is set by oVirt/RHV engine. cpu_threads: description: - Number of virtual CPUs sockets of the Virtual Machine. - Default value is set by oVirt/RHV engine. version_added: "2.5" type: description: - Type of the Virtual Machine. - Default value is set by oVirt/RHV engine. choices: [ desktop, server ] operating_system: description: - Operating system of the Virtual Machine. - Default value is set by oVirt/RHV engine. choices: - debian_7 - freebsd - freebsdx64 - other - other_linux - other_linux_ppc64 - other_ppc64 - rhel_3 - rhel_4 - rhel_4x64 - rhel_5 - rhel_5x64 - rhel_6 - rhel_6x64 - rhel_6_ppc64 - rhel_7x64 - rhel_7_ppc64 - sles_11 - sles_11_ppc64 - ubuntu_12_04 - ubuntu_12_10 - ubuntu_13_04 - ubuntu_13_10 - ubuntu_14_04 - ubuntu_14_04_ppc64 - windows_10 - windows_10x64 - windows_2003 - windows_2003x64 - windows_2008 - windows_2008x64 - windows_2008r2x64 - windows_2008R2x64 - windows_2012x64 - windows_2012R2x64 - windows_7 - windows_7x64 - windows_8 - windows_8x64 - windows_xp boot_devices: description: - List of boot devices which should be used to boot. For example C([ cdrom, hd ]). - Default value is set by oVirt/RHV engine. choices: [ cdrom, hd, network ] host: description: - Specify host where Virtual Machine should be running. By default the host is chosen by engine scheduler. - This parameter is used only when C(state) is I(running) or I(present). high_availability: description: - If I(yes) Virtual Machine will be set as highly available. - If I(no) Virtual Machine won't be set as highly available. - If no value is passed, default value is set by oVirt/RHV engine. type: bool lease: description: - Name of the storage domain this virtual machine lease reside on. - NOTE - Supported since oVirt 4.1. version_added: "2.4" delete_protected: description: - If I(yes) Virtual Machine will be set as delete protected. - If I(no) Virtual Machine won't be set as delete protected. - If no value is passed, default value is set by oVirt/RHV engine. stateless: description: - If I(yes) Virtual Machine will be set as stateless. - If I(no) Virtual Machine will be unset as stateless. - If no value is passed, default value is set by oVirt/RHV engine. clone: description: - If I(yes) then the disks of the created virtual machine will be cloned and independent of the template. - This parameter is used only when C(state) is I(running) or I(present) and VM didn't exist before. type: bool default: 'no' clone_permissions: description: - If I(yes) then the permissions of the template (only the direct ones, not the inherited ones) will be copied to the created virtual machine. - This parameter is used only when C(state) is I(running) or I(present) and VM didn't exist before. type: bool default: 'no' cd_iso: description: - ISO file from ISO storage domain which should be attached to Virtual Machine. - If you pass empty string the CD will be ejected from VM. - If used with C(state) I(running) or I(present) and VM is running the CD will be attached to VM. - If used with C(state) I(running) or I(present) and VM is down the CD will be attached to VM persistently. force: description: - Please check to I(Synopsis) to more detailed description of force parameter, it can behave differently in different situations. type: bool default: 'no' nics: description: - List of NICs, which should be attached to Virtual Machine. NIC is described by following dictionary. - C(name) - Name of the NIC. - C(profile_name) - Profile name where NIC should be attached. - C(interface) - Type of the network interface. One of following I(virtio), I(e1000), I(rtl8139), default is I(virtio). - C(mac_address) - Custom MAC address of the network interface, by default it's obtained from MAC pool. - NOTE - This parameter is used only when C(state) is I(running) or I(present) and is able to only create NICs. To manage NICs of the VM in more depth please use M(ovirt_nics) module instead. disks: description: - List of disks, which should be attached to Virtual Machine. Disk is described by following dictionary. - C(name) - Name of the disk. Either C(name) or C(id) is reuqired. - C(id) - ID of the disk. Either C(name) or C(id) is reuqired. - C(interface) - Interface of the disk, either I(virtio) or I(IDE), default is I(virtio). - C(bootable) - I(True) if the disk should be bootable, default is non bootable. - C(activate) - I(True) if the disk should be activated, default is activated. - NOTE - This parameter is used only when C(state) is I(running) or I(present) and is able to only attach disks. To manage disks of the VM in more depth please use M(ovirt_disks) module instead. sysprep: description: - Dictionary with values for Windows Virtual Machine initialization using sysprep. - C(host_name) - Hostname to be set to Virtual Machine when deployed. - C(active_directory_ou) - Active Directory Organizational Unit, to be used for login of user. - C(org_name) - Organization name to be set to Windows Virtual Machine. - C(domain) - Domain to be set to Windows Virtual Machine. - C(timezone) - Timezone to be set to Windows Virtual Machine. - C(ui_language) - UI language of the Windows Virtual Machine. - C(system_locale) - System localization of the Windows Virtual Machine. - C(input_locale) - Input localization of the Windows Virtual Machine. - C(windows_license_key) - License key to be set to Windows Virtual Machine. - C(user_name) - Username to be used for set password to Windows Virtual Machine. - C(root_password) - Password to be set for username to Windows Virtual Machine. cloud_init: description: - Dictionary with values for Unix-like Virtual Machine initialization using cloud init. - C(host_name) - Hostname to be set to Virtual Machine when deployed. - C(timezone) - Timezone to be set to Virtual Machine when deployed. - C(user_name) - Username to be used to set password to Virtual Machine when deployed. - C(root_password) - Password to be set for user specified by C(user_name) parameter. - C(authorized_ssh_keys) - Use this SSH keys to login to Virtual Machine. - C(regenerate_ssh_keys) - If I(True) SSH keys will be regenerated on Virtual Machine. - C(custom_script) - Cloud-init script which will be executed on Virtual Machine when deployed. This is appended to the end of the cloud-init script generated by any other options. - C(dns_servers) - DNS servers to be configured on Virtual Machine. - C(dns_search) - DNS search domains to be configured on Virtual Machine. - C(nic_boot_protocol) - Set boot protocol of the network interface of Virtual Machine. Can be one of C(none), C(dhcp) or C(static). - C(nic_ip_address) - If boot protocol is static, set this IP address to network interface of Virtual Machine. - C(nic_netmask) - If boot protocol is static, set this netmask to network interface of Virtual Machine. - C(nic_gateway) - If boot protocol is static, set this gateway to network interface of Virtual Machine. - C(nic_name) - Set name to network interface of Virtual Machine. - C(nic_on_boot) - If I(True) network interface will be set to start on boot. cloud_init_nics: description: - List of dictionaries representing network interafaces to be setup by cloud init. - This option is used, when user needs to setup more network interfaces via cloud init. - If one network interface is enough, user should use C(cloud_init) I(nic_*) parameters. C(cloud_init) I(nic_*) parameters are merged with C(cloud_init_nics) parameters. - Dictionary can contain following values. - C(nic_boot_protocol) - Set boot protocol of the network interface of Virtual Machine. Can be one of C(none), C(dhcp) or C(static). - C(nic_ip_address) - If boot protocol is static, set this IP address to network interface of Virtual Machine. - C(nic_netmask) - If boot protocol is static, set this netmask to network interface of Virtual Machine. - C(nic_gateway) - If boot protocol is static, set this gateway to network interface of Virtual Machine. - C(nic_name) - Set name to network interface of Virtual Machine. - C(nic_on_boot) - If I(True) network interface will be set to start on boot. version_added: "2.3" kernel_path: description: - Path to a kernel image used to boot the virtual machine. - Kernel image must be stored on either the ISO domain or on the host's storage. version_added: "2.3" initrd_path: description: - Path to an initial ramdisk to be used with the kernel specified by C(kernel_path) option. - Ramdisk image must be stored on either the ISO domain or on the host's storage. version_added: "2.3" kernel_params: description: - Kernel command line parameters (formatted as string) to be used with the kernel specified by C(kernel_path) option. version_added: "2.3" instance_type: description: - Name of virtual machine's hardware configuration. - By default no instance type is used. version_added: "2.3" description: description: - Description of the Virtual Machine. version_added: "2.3" comment: description: - Comment of the Virtual Machine. version_added: "2.3" timezone: description: - Sets time zone offset of the guest hardware clock. - For example C(Etc/GMT) version_added: "2.3" serial_policy: description: - Specify a serial number policy for the Virtual Machine. - Following options are supported. - C(vm) - Sets the Virtual Machine's UUID as its serial number. - C(host) - Sets the host's UUID as the Virtual Machine's serial number. - C(custom) - Allows you to specify a custom serial number in C(serial_policy_value). version_added: "2.3" serial_policy_value: description: - Allows you to specify a custom serial number. - This parameter is used only when C(serial_policy) is I(custom). version_added: "2.3" vmware: description: - Dictionary of values to be used to connect to VMware and import a virtual machine to oVirt. - Dictionary can contain following values. - C(username) - The username to authenticate against the VMware. - C(password) - The password to authenticate against the VMware. - C(url) - The URL to be passed to the I(virt-v2v) tool for conversion. For example I(vpx://wmware_user@vcenter-host/DataCenter/Cluster/esxi-host?no_verify=1) - C(drivers_iso) - The name of the ISO containing drivers that can be used during the I(virt-v2v) conversion process. - C(sparse) - Specifies the disk allocation policy of the resulting virtual machine. I(true) for sparse, I(false) for preallocated. Default value is I(true). - C(storage_domain) - Specifies the target storage domain for converted disks. This is required parameter. version_added: "2.3" xen: description: - Dictionary of values to be used to connect to XEN and import a virtual machine to oVirt. - Dictionary can contain following values. - C(url) - The URL to be passed to the I(virt-v2v) tool for conversion. For example I(xen+ssh://root@zen.server). This is required parameter. - C(drivers_iso) - The name of the ISO containing drivers that can be used during the I(virt-v2v) conversion process. - C(sparse) - Specifies the disk allocation policy of the resulting virtual machine. I(true) for sparse, I(false) for preallocated. Default value is I(true). - C(storage_domain) - Specifies the target storage domain for converted disks. This is required parameter. version_added: "2.3" kvm: description: - Dictionary of values to be used to connect to kvm and import a virtual machine to oVirt. - Dictionary can contain following values. - C(name) - The name of the KVM virtual machine. - C(username) - The username to authenticate against the KVM. - C(password) - The password to authenticate against the KVM. - C(url) - The URL to be passed to the I(virt-v2v) tool for conversion. For example I(qemu:///system). This is required parameter. - C(drivers_iso) - The name of the ISO containing drivers that can be used during the I(virt-v2v) conversion process. - C(sparse) - Specifies the disk allocation policy of the resulting virtual machine. I(true) for sparse, I(false) for preallocated. Default value is I(true). - C(storage_domain) - Specifies the target storage domain for converted disks. This is required parameter. version_added: "2.3" notes: - If VM is in I(UNASSIGNED) or I(UNKNOWN) state before any operation, the module will fail. If VM is in I(IMAGE_LOCKED) state before any operation, we try to wait for VM to be I(DOWN). If VM is in I(SAVING_STATE) state before any operation, we try to wait for VM to be I(SUSPENDED). If VM is in I(POWERING_DOWN) state before any operation, we try to wait for VM to be I(UP) or I(DOWN). VM can get into I(UP) state from I(POWERING_DOWN) state, when there is no ACPI or guest agent running inside VM, or if the shutdown operation fails. When user specify I(UP) C(state), we always wait to VM to be in I(UP) state in case VM is I(MIGRATING), I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). In other states we run start operation on VM. When user specify I(stopped) C(state), and If user pass C(force) parameter set to I(true) we forcibly stop the VM in any state. If user don't pass C(force) parameter, we always wait to VM to be in UP state in case VM is I(MIGRATING), I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). If VM is in I(PAUSED) or I(SUSPENDED) state, we start the VM. Then we gracefully shutdown the VM. When user specify I(suspended) C(state), we always wait to VM to be in UP state in case VM is I(MIGRATING), I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). If VM is in I(PAUSED) or I(DOWN) state, we start the VM. Then we suspend the VM. When user specify I(absent) C(state), we forcibly stop the VM in any state and remove it. extends_documentation_fragment: ovirt ''' EXAMPLES = ''' # Examples don't contain auth parameter for simplicity, # look at ovirt_auth module to see how to reuse authentication: - name: Creates a new Virtual Machine from template named 'rhel7_template' ovirt_vms: state: present name: myvm template: rhel7_template - name: Register VM ovirt_vms: state: registered storage_domain: mystorage cluster: mycluster name: myvm - name: Register VM using id ovirt_vms: state: registered storage_domain: mystorage cluster: mycluster id: 1111-1111-1111-1111 - name: Register VM, allowing partial import ovirt_vms: state: registered storage_domain: mystorage allow_partial_import: "True" cluster: mycluster id: 1111-1111-1111-1111 - name: Register VM with vnic profile mappings and reassign bad macs ovirt_vms: state: registered storage_domain: mystorage cluster: mycluster id: 1111-1111-1111-1111 vnic_profile_mappings: - source_network_name: mynetwork source_profile_name: mynetwork target_profile_id: 3333-3333-3333-3333 - source_network_name: mynetwork2 source_profile_name: mynetwork2 target_profile_id: 4444-4444-4444-4444 reassign_bad_macs: "True" - name: Register VM with mappings ovirt_vms: state: registered storage_domain: mystorage cluster: mycluster id: 1111-1111-1111-1111 role_mappings: - source_name: Role_A dest_name: Role_B domain_mappings: - source_name: Domain_A dest_name: Domain_B lun_mappings: - source_storage_type: iscsi source_logical_unit_id: 1IET_000d0001 source_logical_unit_port: 3260 source_logical_unit_portal: 1 source_logical_unit_address: 10.34.63.203 source_logical_unit_target: iqn.2016-08-09.brq.str-01:omachace dest_storage_type: iscsi dest_logical_unit_id: 1IET_000d0002 dest_logical_unit_port: 3260 dest_logical_unit_portal: 1 dest_logical_unit_address: 10.34.63.204 dest_logical_unit_target: iqn.2016-08-09.brq.str-02:omachace affinity_group_mappings: - source_name: Affinity_A dest_name: Affinity_B affinity_label_mappings: - source_name: Label_A dest_name: Label_B cluster_mappings: - source_name: cluster_A dest_name: cluster_B - name: Creates a stateless VM which will always use latest template version ovirt_vms: name: myvm template: rhel7 cluster: mycluster use_latest_template_version: true # Creates a new server rhel7 Virtual Machine from Blank template # on brq01 cluster with 2GiB memory and 2 vcpu cores/sockets # and attach bootable disk with name rhel7_disk and attach virtio NIC - ovirt_vms: state: present cluster: brq01 name: myvm memory: 2GiB cpu_cores: 2 cpu_sockets: 2 cpu_shares: 1024 type: server operating_system: rhel_7x64 disks: - name: rhel7_disk bootable: True nics: - name: nic1 - name: Run VM with cloud init ovirt_vms: name: rhel7 template: rhel7 cluster: Default memory: 1GiB high_availability: true cloud_init: nic_boot_protocol: static nic_ip_address: 10.34.60.86 nic_netmask: 255.255.252.0 nic_gateway: 10.34.63.254 nic_name: eth1 nic_on_boot: true host_name: example.com custom_script: | write_files: - content: | Hello, world! path: /tmp/greeting.txt permissions: '0644' user_name: root root_password: super_password - name: Run VM with cloud init, with multiple network interfaces ovirt_vms: name: rhel7_4 template: rhel7 cluster: mycluster cloud_init_nics: - nic_name: eth0 nic_boot_protocol: dhcp nic_on_boot: true - nic_name: eth1 nic_boot_protocol: static nic_ip_address: 10.34.60.86 nic_netmask: 255.255.252.0 nic_gateway: 10.34.63.254 nic_on_boot: true - name: Run VM with sysprep ovirt_vms: name: windows2012R2_AD template: windows2012R2 cluster: Default memory: 3GiB high_availability: true sysprep: host_name: windowsad.example.com user_name: Administrator root_password: SuperPassword123 - name: Migrate/Run VM to/on host named 'host1' ovirt_vms: state: running name: myvm host: host1 - name: Change VMs CD ovirt_vms: name: myvm cd_iso: drivers.iso - name: Eject VMs CD ovirt_vms: name: myvm cd_iso: '' - name: Boot VM from CD ovirt_vms: name: myvm cd_iso: centos7_x64.iso boot_devices: - cdrom - name: Stop vm ovirt_vms: state: stopped name: myvm - name: Upgrade memory to already created VM ovirt_vms: name: myvm memory: 4GiB - name: Hot plug memory to already created and running VM (VM won't be restarted) ovirt_vms: name: myvm memory: 4GiB # When change on the VM needs restart of the VM, use next_run state, # The VM will be updated and rebooted if there are any changes. # If present state would be used, VM won't be restarted. - ovirt_vms: state: next_run name: myvm boot_devices: - network - name: Import virtual machine from VMware ovirt_vms: state: stopped cluster: mycluster name: vmware_win10 timeout: 1800 poll_interval: 30 vmware: url: vpx://user@1.2.3.4/Folder1/Cluster1/2.3.4.5?no_verify=1 name: windows10 storage_domain: mynfs username: user password: password - name: Create vm from template and create all disks on specific storage domain ovirt_vms: name: vm_test cluster: mycluster template: mytemplate storage_domain: mynfs nics: - name: nic1 - name: Remove VM, if VM is running it will be stopped ovirt_vms: state: absent name: myvm ''' RETURN = ''' id: description: ID of the VM which is managed returned: On success if VM is found. type: str sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c vm: description: "Dictionary of all the VM attributes. VM attributes can be found on your oVirt/RHV instance at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm." returned: On success if VM is found. type: dict ''' import traceback try: import ovirtsdk4.types as otypes except ImportError: pass from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ovirt import ( BaseModule, check_params, check_sdk, convert_to_bytes, create_connection, equal, get_dict_of_struct, get_entity, get_link_name, get_id_by_name, ovirt_full_argument_spec, search_by_name, wait, ) class VmsModule(BaseModule): def __get_template_with_version(self): """ oVirt/RHV in version 4.1 doesn't support search by template+version_number, so we need to list all templates with specific name and then iterate through it's version until we find the version we look for. """ template = None if self.param('template'): templates_service = self._connection.system_service().templates_service() templates = templates_service.list(search='name=%s' % self.param('template')) if self.param('template_version'): templates = [ t for t in templates if t.version.version_number == self.param('template_version') ] if not templates: raise ValueError( "Template with name '%s' and version '%s' was not found'" % ( self.param('template'), self.param('template_version') ) ) template = sorted(templates, key=lambda t: t.version.version_number, reverse=True)[0] return template def __get_storage_domain_and_all_template_disks(self, template): if self.param('template') is None: return None if self.param('storage_domain') is None: return None disks = list() for att in self._connection.follow_link(template.disk_attachments): disks.append( otypes.DiskAttachment( disk=otypes.Disk( id=att.disk.id, format=otypes.DiskFormat(self.param('disk_format')), storage_domains=[ otypes.StorageDomain( id=get_id_by_name( self._connection.system_service().storage_domains_service(), self.param('storage_domain') ) ) ] ) ) ) return disks def build_entity(self): template = self.__get_template_with_version() disk_attachments = self.__get_storage_domain_and_all_template_disks(template) return otypes.Vm( id=self.param('id'), name=self.param('name'), cluster=otypes.Cluster( name=self.param('cluster') ) if self.param('cluster') else None, disk_attachments=disk_attachments, template=otypes.Template( id=template.id, ) if template else None, use_latest_template_version=self.param('use_latest_template_version'), stateless=self.param('stateless') or self.param('use_latest_template_version'), delete_protected=self.param('delete_protected'), high_availability=otypes.HighAvailability( enabled=self.param('high_availability') ) if self.param('high_availability') is not None else None, lease=otypes.StorageDomainLease( storage_domain=otypes.StorageDomain( id=get_id_by_name( service=self._connection.system_service().storage_domains_service(), name=self.param('lease') ) ) ) if self.param('lease') is not None else None, cpu=otypes.Cpu( topology=otypes.CpuTopology( cores=self.param('cpu_cores'), sockets=self.param('cpu_sockets'), threads=self.param('cpu_threads'), ) ) if ( any((self.param('cpu_cores'), self.param('cpu_sockets'), self.param('cpu_threads'))) ) else None, cpu_shares=self.param('cpu_shares'), os=otypes.OperatingSystem( type=self.param('operating_system'), boot=otypes.Boot( devices=[ otypes.BootDevice(dev) for dev in self.param('boot_devices') ], ) if self.param('boot_devices') else None, ) if ( self.param('operating_system') or self.param('boot_devices') ) else None, type=otypes.VmType( self.param('type') ) if self.param('type') else None, memory=convert_to_bytes( self.param('memory') ) if self.param('memory') else None, memory_policy=otypes.MemoryPolicy( guaranteed=convert_to_bytes(self.param('memory_guaranteed')), ) if self.param('memory_guaranteed') else None, instance_type=otypes.InstanceType( id=get_id_by_name( self._connection.system_service().instance_types_service(), self.param('instance_type'), ), ) if self.param('instance_type') else None, description=self.param('description'), comment=self.param('comment'), time_zone=otypes.TimeZone( name=self.param('timezone'), ) if self.param('timezone') else None, serial_number=otypes.SerialNumber( policy=otypes.SerialNumberPolicy(self.param('serial_policy')), value=self.param('serial_policy_value'), ) if ( self.param('serial_policy') is not None or self.param('serial_policy_value') is not None ) else None, ) def update_check(self, entity): return ( equal(self.param('cluster'), get_link_name(self._connection, entity.cluster)) and equal(convert_to_bytes(self.param('memory')), entity.memory) and equal(convert_to_bytes(self.param('memory_guaranteed')), entity.memory_policy.guaranteed) and equal(self.param('cpu_cores'), entity.cpu.topology.cores) and equal(self.param('cpu_sockets'), entity.cpu.topology.sockets) and equal(self.param('cpu_threads'), entity.cpu.topology.threads) and equal(self.param('type'), str(entity.type)) and equal(self.param('operating_system'), str(entity.os.type)) and equal(self.param('high_availability'), entity.high_availability.enabled) and equal(self.param('lease'), get_link_name(self._connection, getattr(entity.lease, 'storage_domain', None))) and equal(self.param('stateless'), entity.stateless) and equal(self.param('cpu_shares'), entity.cpu_shares) and equal(self.param('delete_protected'), entity.delete_protected) and equal(self.param('use_latest_template_version'), entity.use_latest_template_version) and equal(self.param('boot_devices'), [str(dev) for dev in getattr(entity.os, 'devices', [])]) and equal(self.param('instance_type'), get_link_name(self._connection, entity.instance_type), ignore_case=True) and equal(self.param('description'), entity.description) and equal(self.param('comment'), entity.comment) and equal(self.param('timezone'), getattr(entity.time_zone, 'name', None)) and equal(self.param('serial_policy'), str(getattr(entity.serial_number, 'policy', None))) and equal(self.param('serial_policy_value'), getattr(entity.serial_number, 'value', None)) ) def pre_create(self, entity): # If VM don't exists, and template is not specified, set it to Blank: if entity is None: if self.param('template') is None: self._module.params['template'] = 'Blank' def post_update(self, entity): self.post_create(entity) def post_create(self, entity): # After creation of the VM, attach disks and NICs: self.changed = self.__attach_disks(entity) self.changed = self.__attach_nics(entity) def pre_remove(self, entity): # Forcibly stop the VM, if it's not in DOWN state: if entity.status != otypes.VmStatus.DOWN: if not self._module.check_mode: self.changed = self.action( action='stop', action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN, wait_condition=lambda vm: vm.status == otypes.VmStatus.DOWN, )['changed'] def __suspend_shutdown_common(self, vm_service): if vm_service.get().status in [ otypes.VmStatus.MIGRATING, otypes.VmStatus.POWERING_UP, otypes.VmStatus.REBOOT_IN_PROGRESS, otypes.VmStatus.WAIT_FOR_LAUNCH, otypes.VmStatus.UP, otypes.VmStatus.RESTORING_STATE, ]: self._wait_for_UP(vm_service) def _pre_shutdown_action(self, entity): vm_service = self._service.vm_service(entity.id) self.__suspend_shutdown_common(vm_service) if entity.status in [otypes.VmStatus.SUSPENDED, otypes.VmStatus.PAUSED]: vm_service.start() self._wait_for_UP(vm_service) return vm_service.get() def _pre_suspend_action(self, entity): vm_service = self._service.vm_service(entity.id) self.__suspend_shutdown_common(vm_service) if entity.status in [otypes.VmStatus.PAUSED, otypes.VmStatus.DOWN]: vm_service.start() self._wait_for_UP(vm_service) return vm_service.get() def _post_start_action(self, entity): vm_service = self._service.service(entity.id) self._wait_for_UP(vm_service) self._attach_cd(vm_service.get()) self._migrate_vm(vm_service.get()) def _attach_cd(self, entity): cd_iso = self.param('cd_iso') if cd_iso is not None: vm_service = self._service.service(entity.id) current = vm_service.get().status == otypes.VmStatus.UP cdroms_service = vm_service.cdroms_service() cdrom_device = cdroms_service.list()[0] cdrom_service = cdroms_service.cdrom_service(cdrom_device.id) cdrom = cdrom_service.get(current=current) if getattr(cdrom.file, 'id', '') != cd_iso: if not self._module.check_mode: cdrom_service.update( cdrom=otypes.Cdrom( file=otypes.File(id=cd_iso) ), current=current, ) self.changed = True return entity def _migrate_vm(self, entity): vm_host = self.param('host') vm_service = self._service.vm_service(entity.id) if vm_host is not None: # In case VM is preparing to be UP, wait to be up, to migrate it: if entity.status == otypes.VmStatus.UP: hosts_service = self._connection.system_service().hosts_service() current_vm_host = hosts_service.host_service(entity.host.id).get().name if vm_host != current_vm_host: if not self._module.check_mode: vm_service.migrate(host=otypes.Host(name=vm_host)) self._wait_for_UP(vm_service) self.changed = True return entity def _wait_for_UP(self, vm_service): wait( service=vm_service, condition=lambda vm: vm.status == otypes.VmStatus.UP, wait=self.param('wait'), timeout=self.param('timeout'), ) def _wait_for_vm_disks(self, vm_service): disks_service = self._connection.system_service().disks_service() for da in vm_service.disk_attachments_service().list(): disk_service = disks_service.disk_service(da.disk.id) wait( service=disk_service, condition=lambda disk: disk.status == otypes.DiskStatus.OK, wait=self.param('wait'), timeout=self.param('timeout'), ) def wait_for_down(self, vm): """ This function will first wait for the status DOWN of the VM. Then it will find the active snapshot and wait until it's state is OK for stateless VMs and statless snaphot is removed. """ vm_service = self._service.vm_service(vm.id) wait( service=vm_service, condition=lambda vm: vm.status == otypes.VmStatus.DOWN, wait=self.param('wait'), timeout=self.param('timeout'), ) if vm.stateless: snapshots_service = vm_service.snapshots_service() snapshots = snapshots_service.list() snap_active = [ snap for snap in snapshots if snap.snapshot_type == otypes.SnapshotType.ACTIVE ][0] snap_stateless = [ snap for snap in snapshots if snap.snapshot_type == otypes.SnapshotType.STATELESS ] # Stateless snapshot may be already removed: if snap_stateless: """ We need to wait for Active snapshot ID, to be removed as it's current stateless snapshot. Then we need to wait for staless snapshot ID to be read, for use, because it will become active snapshot. """ wait( service=snapshots_service.snapshot_service(snap_active.id), condition=lambda snap: snap is None, wait=self.param('wait'), timeout=self.param('timeout'), ) wait( service=snapshots_service.snapshot_service(snap_stateless[0].id), condition=lambda snap: snap.snapshot_status == otypes.SnapshotStatus.OK, wait=self.param('wait'), timeout=self.param('timeout'), ) return True def __attach_disks(self, entity): if not self.param('disks'): return vm_service = self._service.service(entity.id) disks_service = self._connection.system_service().disks_service() disk_attachments_service = vm_service.disk_attachments_service() self._wait_for_vm_disks(vm_service) for disk in self.param('disks'): # If disk ID is not specified, find disk by name: disk_id = disk.get('id') if disk_id is None: disk_id = getattr( search_by_name( service=disks_service, name=disk.get('name') ), 'id', None ) # Attach disk to VM: disk_attachment = disk_attachments_service.attachment_service(disk_id) if get_entity(disk_attachment) is None: if not self._module.check_mode: disk_attachments_service.add( otypes.DiskAttachment( disk=otypes.Disk( id=disk_id, ), active=disk.get('activate', True), interface=otypes.DiskInterface( disk.get('interface', 'virtio') ), bootable=disk.get('bootable', False), ) ) self.changed = True def __get_vnic_profile_id(self, nic): """ Return VNIC profile ID looked up by it's name, because there can be more VNIC profiles with same name, other criteria of filter is cluster. """ vnics_service = self._connection.system_service().vnic_profiles_service() clusters_service = self._connection.system_service().clusters_service() cluster = search_by_name(clusters_service, self.param('cluster')) profiles = [ profile for profile in vnics_service.list() if profile.name == nic.get('profile_name') ] cluster_networks = [ net.id for net in self._connection.follow_link(cluster.networks) ] try: return next( profile.id for profile in profiles if profile.network.id in cluster_networks ) except StopIteration: raise Exception( "Profile '%s' was not found in cluster '%s'" % ( nic.get('profile_name'), self.param('cluster') ) ) def __attach_nics(self, entity): # Attach NICs to VM, if specified: nics_service = self._service.service(entity.id).nics_service() for nic in self.param('nics'): if search_by_name(nics_service, nic.get('name')) is None: if not self._module.check_mode: nics_service.add( otypes.Nic( name=nic.get('name'), interface=otypes.NicInterface( nic.get('interface', 'virtio') ), vnic_profile=otypes.VnicProfile( id=self.__get_vnic_profile_id(nic), ) if nic.get('profile_name') else None, mac=otypes.Mac( address=nic.get('mac_address') ) if nic.get('mac_address') else None, ) ) self.changed = True def _get_role_mappings(module): roleMappings = list() for roleMapping in module.params['role_mappings']: roleMappings.append( otypes.RegistrationRoleMapping( from_=otypes.Role( name=roleMapping['source_name'], ) if roleMapping['source_name'] else None, to=otypes.Role( name=roleMapping['dest_name'], ) if roleMapping['dest_name'] else None, ) ) return roleMappings def _get_affinity_group_mappings(module): affinityGroupMappings = list() for affinityGroupMapping in module.params['affinity_group_mappings']: affinityGroupMappings.append( otypes.RegistrationAffinityGroupMapping( from_=otypes.AffinityGroup( name=affinityGroupMapping['source_name'], ) if affinityGroupMapping['source_name'] else None, to=otypes.AffinityGroup( name=affinityGroupMapping['dest_name'], ) if affinityGroupMapping['dest_name'] else None, ) ) return affinityGroupMappings def _get_affinity_label_mappings(module): affinityLabelMappings = list() for affinityLabelMapping in module.params['affinity_label_mappings']: affinityLabelMappings.append( otypes.RegistrationAffinityLabelMapping( from_=otypes.AffinityLabel( name=affinityLabelMapping['source_name'], ) if affinityLabelMapping['source_name'] else None, to=otypes.AffinityLabel( name=affinityLabelMapping['dest_name'], ) if affinityLabelMapping['dest_name'] else None, ) ) return affinityLabelMappings def _get_domain_mappings(module): domainMappings = list() for domainMapping in module.params['domain_mappings']: domainMappings.append( otypes.RegistrationDomainMapping( from_=otypes.Domain( name=domainMapping['source_name'], ) if domainMapping['source_name'] else None, to=otypes.Domain( name=domainMapping['dest_name'], ) if domainMapping['dest_name'] else None, ) ) return domainMappings def _get_lun_mappings(module): lunMappings = list() for lunMapping in module.params['lun_mappings']: lunMappings.append( otypes.RegistrationLunMapping( from_=otypes.Disk( lun_storage=otypes.HostStorage( type=otypes.StorageType(lunMapping['source_storage_type']) if (lunMapping['source_storage_type'] in ['iscsi', 'fcp']) else None, logical_units=[ otypes.LogicalUnit( id=lunMapping['source_logical_unit_id'], ) ], ), ) if lunMapping['source_logical_unit_id'] else None, to=otypes.Disk( lun_storage=otypes.HostStorage( type=otypes.StorageType(lunMapping['dest_storage_type']) if (lunMapping['dest_storage_type'] in ['iscsi', 'fcp']) else None, logical_units=[ otypes.LogicalUnit( id=lunMapping['dest_logical_unit_id'], port=lunMapping['dest_logical_unit_port'], portal=lunMapping['dest_logical_unit_portal'], address=lunMapping['dest_logical_unit_address'], target=lunMapping['dest_logical_unit_target'], password=lunMapping['dest_logical_unit_password'], username=lunMapping['dest_logical_unit_username'], ) ], ), ) if lunMapping['dest_logical_unit_id'] else None, ), ), return lunMappings def _get_cluster_mappings(module): clusterMappings = list() for clusterMapping in module.params['cluster_mappings']: clusterMappings.append( otypes.RegistrationClusterMapping( from_=otypes.Cluster( name=clusterMapping['source_name'], ), to=otypes.Cluster( name=clusterMapping['dest_name'], ) if clusterMapping['dest_name'] else None, ) ) return clusterMappings def _get_vnic_profile_mappings(module): vnicProfileMappings = list() for vnicProfileMapping in module.params['vnic_profile_mappings']: vnicProfileMappings.append( otypes.VnicProfileMapping( source_network_name=vnicProfileMapping['source_network_name'], source_network_profile_name=vnicProfileMapping['source_profile_name'], target_vnic_profile=otypes.VnicProfile( id=vnicProfileMapping['target_profile_id'], ) if vnicProfileMapping['target_profile_id'] else None, ) ) return vnicProfileMappings def import_vm(module, connection): vms_service = connection.system_service().vms_service() if search_by_name(vms_service, module.params['name']) is not None: return False events_service = connection.system_service().events_service() last_event = events_service.list(max=1)[0] external_type = [ tmp for tmp in ['kvm', 'xen', 'vmware'] if module.params[tmp] is not None ][0] external_vm = module.params[external_type] imports_service = connection.system_service().external_vm_imports_service() imported_vm = imports_service.add( otypes.ExternalVmImport( vm=otypes.Vm( name=module.params['name'] ), name=external_vm.get('name'), username=external_vm.get('username', 'test'), password=external_vm.get('password', 'test'), provider=otypes.ExternalVmProviderType(external_type), url=external_vm.get('url'), cluster=otypes.Cluster( name=module.params['cluster'], ) if module.params['cluster'] else None, storage_domain=otypes.StorageDomain( name=external_vm.get('storage_domain'), ) if external_vm.get('storage_domain') else None, sparse=external_vm.get('sparse', True), host=otypes.Host( name=module.params['host'], ) if module.params['host'] else None, ) ) # Wait until event with code 1152 for our VM don't appear: vms_service = connection.system_service().vms_service() wait( service=vms_service.vm_service(imported_vm.vm.id), condition=lambda vm: len([ event for event in events_service.list( from_=int(last_event.id), search='type=1152 and vm.id=%s' % vm.id, ) ]) > 0 if vm is not None else False, fail_condition=lambda vm: vm is None, timeout=module.params['timeout'], poll_interval=module.params['poll_interval'], ) return True def _get_initialization(sysprep, cloud_init, cloud_init_nics): initialization = None if cloud_init or cloud_init_nics: initialization = otypes.Initialization( nic_configurations=[ otypes.NicConfiguration( boot_protocol=otypes.BootProtocol( nic.pop('nic_boot_protocol').lower() ) if nic.get('nic_boot_protocol') else None, name=nic.pop('nic_name', None), on_boot=nic.pop('nic_on_boot', None), ip=otypes.Ip( address=nic.pop('nic_ip_address', None), netmask=nic.pop('nic_netmask', None), gateway=nic.pop('nic_gateway', None), ) if ( nic.get('nic_gateway') is not None or nic.get('nic_netmask') is not None or nic.get('nic_ip_address') is not None ) else None, ) for nic in cloud_init_nics if ( nic.get('nic_gateway') is not None or nic.get('nic_netmask') is not None or nic.get('nic_ip_address') is not None or nic.get('nic_boot_protocol') is not None or nic.get('nic_on_boot') is not None ) ] if cloud_init_nics else None, **cloud_init ) elif sysprep: initialization = otypes.Initialization( **sysprep ) return initialization def control_state(vm, vms_service, module): if vm is None: return force = module.params['force'] state = module.params['state'] vm_service = vms_service.vm_service(vm.id) if vm.status == otypes.VmStatus.IMAGE_LOCKED: wait( service=vm_service, condition=lambda vm: vm.status == otypes.VmStatus.DOWN, ) elif vm.status == otypes.VmStatus.SAVING_STATE: # Result state is SUSPENDED, we should wait to be suspended: wait( service=vm_service, condition=lambda vm: vm.status == otypes.VmStatus.SUSPENDED, ) elif ( vm.status == otypes.VmStatus.UNASSIGNED or vm.status == otypes.VmStatus.UNKNOWN ): # Invalid states: module.fail_json(msg="Not possible to control VM, if it's in '{}' status".format(vm.status)) elif vm.status == otypes.VmStatus.POWERING_DOWN: if (force and state == 'stopped') or state == 'absent': vm_service.stop() wait( service=vm_service, condition=lambda vm: vm.status == otypes.VmStatus.DOWN, ) else: # If VM is powering down, wait to be DOWN or UP. # VM can end in UP state in case there is no GA # or ACPI on the VM or shutdown operation crashed: wait( service=vm_service, condition=lambda vm: vm.status in [otypes.VmStatus.DOWN, otypes.VmStatus.UP], ) def main(): argument_spec = ovirt_full_argument_spec( state=dict(type='str', default='present', choices=['absent', 'next_run', 'present', 'registered', 'running', 'stopped', 'suspended']), name=dict(type='str'), id=dict(type='str'), cluster=dict(type='str'), allow_partial_import=dict(type='bool'), template=dict(type='str'), template_version=dict(type='int'), use_latest_template_version=dict(type='bool'), storage_domain=dict(type='str'), disk_format=dict(type='str', default='cow', choices=['cow', 'raw']), disks=dict(type='list', default=[]), memory=dict(type='str'), memory_guaranteed=dict(type='str'), cpu_sockets=dict(type='int'), cpu_cores=dict(type='int'), cpu_shares=dict(type='int'), cpu_threads=dict(type='int'), type=dict(type='str', choices=['server', 'desktop']), operating_system=dict(type='str', choices=[ 'rhel_6_ppc64', 'other', 'freebsd', 'windows_2003x64', 'windows_10', 'rhel_6x64', 'rhel_4x64', 'windows_2008x64', 'windows_2008R2x64', 'debian_7', 'windows_2012x64', 'ubuntu_14_04', 'ubuntu_12_04', 'ubuntu_13_10', 'windows_8x64', 'other_linux_ppc64', 'windows_2003', 'other_linux', 'windows_10x64', 'windows_2008', 'rhel_3', 'rhel_5', 'rhel_4', 'other_ppc64', 'sles_11', 'rhel_6', 'windows_xp', 'rhel_7x64', 'freebsdx64', 'rhel_7_ppc64', 'windows_7', 'rhel_5x64', 'ubuntu_14_04_ppc64', 'sles_11_ppc64', 'windows_8', 'windows_2012R2x64', 'windows_2008r2x64', 'ubuntu_13_04', 'ubuntu_12_10', 'windows_7x64', ]), cd_iso=dict(type='str'), boot_devices=dict(type='list'), vnic_profile_mappings=dict(default=[], type='list'), cluster_mappings=dict(default=[], type='list'), role_mappings=dict(default=[], type='list'), affinity_group_mappings=dict(default=[], type='list'), affinity_label_mappings=dict(default=[], type='list'), lun_mappings=dict(default=[], type='list'), domain_mappings=dict(default=[], type='list'), reassign_bad_macs=dict(default=None, type='bool'), high_availability=dict(type='bool'), lease=dict(type='str'), stateless=dict(type='bool'), delete_protected=dict(type='bool'), force=dict(type='bool', default=False), nics=dict(type='list', default=[]), cloud_init=dict(type='dict'), cloud_init_nics=dict(type='list', default=[]), sysprep=dict(type='dict'), host=dict(type='str'), clone=dict(type='bool', default=False), clone_permissions=dict(type='bool', default=False), kernel_path=dict(type='str'), initrd_path=dict(type='str'), kernel_params=dict(type='str'), instance_type=dict(type='str'), description=dict(type='str'), comment=dict(type='str'), timezone=dict(type='str'), serial_policy=dict(type='str', choices=['vm', 'host', 'custom']), serial_policy_value=dict(type='str'), vmware=dict(type='dict'), xen=dict(type='dict'), kvm=dict(type='dict'), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_one_of=[['id', 'name']], ) check_sdk(module) check_params(module) try: state = module.params['state'] auth = module.params.pop('auth') connection = create_connection(auth) vms_service = connection.system_service().vms_service() vms_module = VmsModule( connection=connection, module=module, service=vms_service, ) vm = vms_module.search_entity(list_params={'all_content': True}) control_state(vm, vms_service, module) if state in ('present', 'running', 'next_run'): if module.params['xen'] or module.params['kvm'] or module.params['vmware']: vms_module.changed = import_vm(module, connection) sysprep = module.params['sysprep'] cloud_init = module.params['cloud_init'] cloud_init_nics = module.params['cloud_init_nics'] or [] if cloud_init is not None: cloud_init_nics.append(cloud_init) # In case VM don't exist, wait for VM DOWN state, # otherwise don't wait for any state, just update VM: ret = vms_module.create( entity=vm, result_state=otypes.VmStatus.DOWN if vm is None else None, clone=module.params['clone'], clone_permissions=module.params['clone_permissions'], ) # Run the VM if it was just created, else don't run it: if state == 'running': initialization = _get_initialization(sysprep, cloud_init, cloud_init_nics) ret = vms_module.action( action='start', post_action=vms_module._post_start_action, action_condition=lambda vm: ( vm.status not in [ otypes.VmStatus.MIGRATING, otypes.VmStatus.POWERING_UP, otypes.VmStatus.REBOOT_IN_PROGRESS, otypes.VmStatus.WAIT_FOR_LAUNCH, otypes.VmStatus.UP, otypes.VmStatus.RESTORING_STATE, ] ), wait_condition=lambda vm: vm.status == otypes.VmStatus.UP, # Start action kwargs: use_cloud_init=cloud_init is not None or len(cloud_init_nics) > 0, use_sysprep=sysprep is not None, vm=otypes.Vm( placement_policy=otypes.VmPlacementPolicy( hosts=[otypes.Host(name=module.params['host'])] ) if module.params['host'] else None, initialization=initialization, os=otypes.OperatingSystem( cmdline=module.params.get('kernel_params'), initrd=module.params.get('initrd_path'), kernel=module.params.get('kernel_path'), ) if ( module.params.get('kernel_params') or module.params.get('initrd_path') or module.params.get('kernel_path') ) else None, ) if ( module.params.get('kernel_params') or module.params.get('initrd_path') or module.params.get('kernel_path') or module.params.get('host') or initialization ) else None, ) if state == 'next_run': # Apply next run configuration, if needed: vm = vms_service.vm_service(ret['id']).get() if vm.next_run_configuration_exists: ret = vms_module.action( action='reboot', entity=vm, action_condition=lambda vm: vm.status == otypes.VmStatus.UP, wait_condition=lambda vm: vm.status == otypes.VmStatus.UP, ) elif state == 'stopped': if module.params['xen'] or module.params['kvm'] or module.params['vmware']: vms_module.changed = import_vm(module, connection) ret = vms_module.create( result_state=otypes.VmStatus.DOWN if vm is None else None, clone=module.params['clone'], clone_permissions=module.params['clone_permissions'], ) if module.params['force']: ret = vms_module.action( action='stop', post_action=vms_module._attach_cd, action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN, wait_condition=vms_module.wait_for_down, ) else: ret = vms_module.action( action='shutdown', pre_action=vms_module._pre_shutdown_action, post_action=vms_module._attach_cd, action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN, wait_condition=vms_module.wait_for_down, ) elif state == 'suspended': vms_module.create( result_state=otypes.VmStatus.DOWN if vm is None else None, clone=module.params['clone'], clone_permissions=module.params['clone_permissions'], ) ret = vms_module.action( action='suspend', pre_action=vms_module._pre_suspend_action, action_condition=lambda vm: vm.status != otypes.VmStatus.SUSPENDED, wait_condition=lambda vm: vm.status == otypes.VmStatus.SUSPENDED, ) elif state == 'absent': ret = vms_module.remove() elif state == 'registered': storage_domains_service = connection.system_service().storage_domains_service() # Find the storage domain with unregistered VM: sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain']) storage_domain_service = storage_domains_service.storage_domain_service(sd_id) vms_service = storage_domain_service.vms_service() # Find the the unregistered VM we want to register: vms = vms_service.list(unregistered=True) vm = next( (vm for vm in vms if (vm.id == module.params['id'] or vm.name == module.params['name'])), None ) changed = False if vm is None: vm = vms_module.search_entity() if vm is None: raise ValueError( "VM '%s(%s)' wasn't found." % (module.params['name'], module.params['id']) ) else: # Register the vm into the system: changed = True vm_service = vms_service.vm_service(vm.id) vm_service.register( allow_partial_import=module.params['allow_partial_import'], cluster=otypes.Cluster( name=module.params['cluster'] ) if module.params['cluster'] else None, vnic_profile_mappings=_get_vnic_profile_mappings(module) if module.params['vnic_profile_mappings'] else None, reassign_bad_macs=module.params['reassign_bad_macs'] if module.params['reassign_bad_macs'] is not None else None, registration_configuration=otypes.RegistrationConfiguration( cluster_mappings=_get_cluster_mappings(module), role_mappings=_get_role_mappings(module), domain_mappings=_get_domain_mappings(module), lun_mappings=_get_lun_mappings(module), affinity_group_mappings=_get_affinity_group_mappings(module), affinity_label_mappings=_get_affinity_label_mappings(module), ) if (module.params['cluster_mappings'] or module.params['role_mappings'] or module.params['domain_mappings'] or module.params['lun_mappings'] or module.params['affinity_group_mappings'] or module.params['affinity_label_mappings']) else None ) if module.params['wait']: vm = vms_module.wait_for_import() else: # Fetch vm to initialize return. vm = vm_service.get() ret = { 'changed': changed, 'id': vm.id, 'vm': get_dict_of_struct(vm) } module.exit_json(**ret) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=auth.get('token') is None) if __name__ == "__main__": main()
Jumping Pillow Hours for Friday 2pm to 6pm. Socks and a shirt must be worn while on the pillow. Please follow all posted rules. All bikes must be parked in the bike racks and all golf carts/vehicles are asked to park by Rec Center parking area.
#!/usr/bin/env python '''Test that a scheduled function gets called every once with the correct time delta. ''' __docformat__ = 'restructuredtext' __version__ = '$Id: TICK.py 310 2006-12-23 15:56:35Z Alex.Holkner $' import time import unittest from pyglet import clock __noninteractive = True class SCHEDULE_ONCE(unittest.TestCase): callback_1_count = 0 callback_2_count = 0 callback_3_count = 0 def callback_1(self, dt): self.assertTrue(abs(dt - 0.1) < 0.01) self.callback_1_count += 1 def callback_2(self, dt): self.assertTrue(abs(dt - 0.35) < 0.01) self.callback_2_count += 1 def callback_3(self, dt): self.assertTrue(abs(dt - 0.07) < 0.01) self.callback_3_count += 1 def clear(self): self.callback_1_count = 0 self.callback_2_count = 0 self.callback_3_count = 0 def test_schedule_once(self): self.clear() clock.set_default(clock.Clock()) clock.schedule_once(self.callback_1, 0.1) clock.schedule_once(self.callback_2, 0.35) clock.schedule_once(self.callback_3, 0.07) t = 0 while t < 1: t += clock.tick() self.assertTrue(self.callback_1_count == 1) self.assertTrue(self.callback_2_count == 1) self.assertTrue(self.callback_3_count == 1) if __name__ == '__main__': unittest.main()
Website: Reasons Why You Should Feed Your Dog Raw Food. Description: Eating food in its natural, unadulterated state is best for health and wellness – for us and for our pets. Dogs and cats benefit from food that is as close to what their diet would be in nature. In the wild, our pets ancestors would catch their prey and eat every part of the animal raw, including muscle meat, organ meat, bone, and blood. When animals eat whole, raw food their health is optimized, paving the way for longevity and wellness.
#!/usr/bin/env python3 import requests frames = [] data = b"" found_first = False response = requests.get("http://navigation.local:8080/stream/video.h264", stream=True) for chunk in response.iter_content(chunk_size=1024): if chunk: starting_offset = len(data) if starting_offset >= 2: if data[-1] == b"\x00": print("last byte is zero, backing up one") starting_offset -= 1 if data[-2] == b"\x00": print("second to last byte is zero, backing up one more") starting_offset -= 1 data = data + chunk offset = data.find(b"\x00\x00\x01", starting_offset) if offset != -1: print("found frame") remaining = data[offset:] if not found_first: print("dropping partial first frame") found_first = True else: print("adding frame", len(frames) + 1) frames.append(data[:offset]) if len(frames) == 120: break data = remaining with open("navigation.h264", "wb") as out: out.write(b"\x00") for frame in frames: out.write(frame)
McCormack has been allowed to keep her job at South Tyneside District Hospital. A nurse from South Tyneside who stole £50 from her friend’s handbag on a night out has been allowed to keep her career. Donna McCormack, who worked in the accident and emergency department at South Tyneside District hospital, was caught on camera as helping herself to her mate’s cash on December 20, 2015. She was interviewed and given a police caution on January 12, after her friend reported it after seeing the footage. She claimed she had distanced herself from bad influences since then and apologised to the victim. The panel found McCormack displayed ‘genuine remorse’ and imposed a two-year caution order. This allows her to continue her nursing career unimpeded.
# -*- coding: utf-8 -*- """ Started on fri, jul 27th, 2018 @author: carlos.arana """ # Librerias utilizadas import pandas as pd import sys module_path = r'D:\PCCS\01_Dmine\Scripts' if module_path not in sys.path: sys.path.append(module_path) from VarInt.VarInt import VarInt from classes.Meta import Meta from Compilador.Compilador import compilar """ Las librerias locales utilizadas renglones arriba se encuentran disponibles en las siguientes direcciones: SCRIPT: | DISPONIBLE EN: ------ | ------------------------------------------------------------------------------------ VarInt | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/VarInt Meta | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/Classes Compilador | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/Compilador """ # Documentacion del Parametro --------------------------------------------------------------------------------------- # Descripciones del Parametro M = Meta M.ClaveParametro = 'P0221' M.NombreParametro = 'Emisiones anuales CO' M.DescParam = 'Monóxido de carbono' M.UnidadesParam = 'ton' M.TituloParametro = 'GEI' # Para nombrar la columna del parametro M.PeriodoParam = '2008' M.TipoInt = 2 # 1: Binaria; 2: Multivariable, 3: Integral # Handlings M.ParDtype = 'float' M.TipoVar = 'C' # (Tipos de Variable: [C]ontinua, [D]iscreta [O]rdinal, [B]inaria o [N]ominal) M.array = [] M.TipoAgr = 'sum' # Descripciones del proceso de Minería M.nomarchivodataset = 'P0218' M.extarchivodataset = 'xlsx' M.ContenidoHojaDatos = 'Toneladas de gases de efecto invernadero, clasificadas por gas, para el año 2008' M.ClaveDataset = 'SEMARNAT' M.ActDatos = '2008' M.Agregacion = 'Se sumó la cantidad de emisiones anuales de CO para los municipios que componen ' \ 'cada ciudad del SUN' \ # Descripciones generadas desde la clave del parámetro M.getmetafromds = 1 Meta.fillmeta(M) # Construccion del Parámetro ----------------------------------------------------------------------------------------- # Cargar dataset inicial dataset = pd.read_excel(M.DirFuente + '\\' + M.ArchivoDataset, sheetname='DATOS', dtype={'CVE_MUN': 'str'}) dataset.set_index('CVE_MUN', inplace=True) dataset = dataset.rename_axis('CVE_MUN') dataset.head(2) list(dataset) # Generar dataset para parámetro y Variable de Integridad var1 = 'CO' par_dataset = dataset[var1] par_dataset = dataset[var1].astype('float') par_dataset = par_dataset.to_frame(name = M.ClaveParametro) par_dataset, variables_dataset = VarInt(par_dataset, dataset, tipo=M.TipoInt) # Compilacion compilar(M, dataset, par_dataset, variables_dataset)
The luxurious version of the italian water taxi. The design is developed by the English company Harman Piket Naval Architects. The eyecatcher in the interior is the handcrafted expressomachine delivered by SI Coffee Solutions.
#!/usr/bin/python """ Distributor checks the queue and decides were the data should go to next. Workers can add hashes or files to the queue for services to search or analyze and the distrbutor will facilitate in that by offering a central point. In the future it will also make sure the samples are distributed to the corect worker and workers are being started according to the amount of samples. """ from malzoo.common.abstract import Distributor from malzoo.core.services.apis import * from malzoo.core.tools.general_info import GeneralInformation from malzoo.core.tools.database import MongoDatabase from malzoo.core.tools.signatures import Signatures class DistributeBot(Distributor): """ The distributeBot wants to receive the following info in a dict: md5, file(path), tag """ def distribute(self,sample): viper = ViperService() mongodb = MongoDatabase() filename = sample['filename'] yarasigs = Signatures() match = yarasigs.scan(sample['filename'], rule='unwanted.yara') if not match: if 'md5' in sample: if self.conf.get('settings','duplicatecheck') == 'viper': known = viper.search({'md5':sample['md5']}) elif self.conf.get('settings','duplicatecheck') == 'mongo': known = mongodb.search({'md5':sample['md5']}) else: known = False if known: self.log('distributor - {0} - already in db'.format(sample['md5'])) else: general_info = GeneralInformation(sample['filename']) ft = general_info.get_filetype() package = {'tags':sample['tag'],'file':sample['filename']} if self.conf.getboolean('viper','enabled'): viper.submit(package) #determine to which worker the file is assigned based on the mime match = yarasigs.scan(sample['filename'], rule='filetypes.yara') if match == 'office_docs': self.doc_q.put(sample) elif match == 'executable': self.pe_q.put(sample) elif ft == 'application/zip' and match != 'java_archive': self.zip_q.put(sample) else: self.other_q.put(sample) #add the package to the modules for custom operations self.mod_q.put(sample) else: self.log('distributor - {0} - no md5 given'.format(filename)) else: self.log('distributor - {0} - matched with yara unwanted signature'.format(filename)) return
The IconRunner is classified as adware (also known as ‘ad-supported’ software). It is made to make revenue. The IconRunner can show unwanted ads within the Edge, Firefox, Microsoft Internet Explorer and Google Chrome. As a result of the adware infection, your web-browser will be full of ads (pop-ups, banners, in-text links, offers to install an unwanted software) and your system will be slower and slower. It is likely that you might be worried with the IconRunner which cause undesired ads to appear. You should not disregard this undesired software. The ‘ad supported’ software might not only show intrusive ads, but redirect your internet browser to shady sites. What is more, the ‘ad supported’ software can analyze your browsing, and gain access to your privacy information and, subsequently, can use it for marketing purposes. Thus, there are more than enough reasons to get rid of IconRunner from your machine. In addition to that, as was mentioned earlier, the IconRunner adware to also modify all web-browsers shortcut files that located on your Desktop or Start menu. So, every affected shortcut will try to redirect your web-browser to intrusive ad sites, some of which might be malicious. It can make the whole computer more vulnerable to hacker attacks. It is important, do not use IconRunner software and don’t click on its ads. The best option is to use the steps posted below to free your PC system of the IconRunner adware and thus delete any unwanted ads. When the adware gets installed on your PC system without your knowledge, it’s not easy to remove. In most cases, there is no Uninstall program which simply delete the IconRunner adware that created to redirect your internet browser to various ad web pages from your system. So, we recommend using several well-proven free specialized tools like Zemana Anti Malware, MalwareBytes Free or HitmanPro. But the best solution to remove IconRunner adware will be to perform several manual steps, after that additionally use antimalware utilities. The few simple steps will help you delete IconRunner adware. These IconRunner removal steps work for the Microsoft Internet Explorer, Edge, Google Chrome and Firefox, as well as every version of Microsoft Windows operating system. First, you should try to identify and remove the program that causes the appearance of annoying advertisements or browser redirect, using the ‘Uninstall a program’ which is located in the ‘Control panel’. It will show a list of all apps installed on the computer. Scroll through the all list, and delete dubious and unknown applications. To quickly find the latest installed apps, we recommend sort software by date. If the Mozilla Firefox browser program is hijacked, then resetting its settings can help. The Reset feature is available on all modern version of Firefox. A reset can fix many issues by restoring Firefox settings such as startpage, search engine by default and new tab to its default state. It will save your personal information like saved passwords, bookmarks, and open tabs. First, run the Mozilla Firefox and press button. It will show the drop-down menu on the right-part of the browser. Further, press the Help button ( ) as on the image below. In the Help menu, select the “Troubleshooting Information” option. Another way to open the “Troubleshooting Information” screen – type “about:support” in the browser adress bar and press Enter. It will open the “Troubleshooting Information” page as shown in the following example. In the upper-right corner of this screen, click the “Refresh Firefox” button. It will show the confirmation dialog box. Further, click the “Refresh Firefox” button. The Firefox will start a procedure to fix your problems that caused by the IconRunner ‘ad supported’ software that redirects your web browser to annoying ad sites. After, it is finished, click the “Finish” button. Reset Google Chrome settings is a simple way to delete the browser hijackers, harmful and ‘ad-supported’ extensions, as well as to recover the web browser’s search provider, homepage and new tab page that have been replaced by IconRunner ad supported software that causes web browsers to display intrusive pop-up ads. First run the Chrome and press Menu button (small button in the form of three dots). It will show the Chrome main menu. Choose More Tools, then click Extensions. Once this procedure is finished, your browser’s home page, search provider and newtab page will be restored to their original defaults. The Internet Explorer reset is great if your web browser is hijacked or you have unwanted addo-ons or toolbars on your internet browser, that installed by an malware. First, launch the Internet Explorer, then press ‘gear’ icon . It will open the Tools drop-down menu on the right part of the internet browser, then click the “Internet Options” as shown in the figure below. In the “Internet Options” screen, select the “Advanced” tab, then click the “Reset” button. The IE will show the “Reset Internet Explorer settings” dialog box. Further, press the “Delete personal settings” check box to select it. Next, click the “Reset” button like below. When the task is complete, click “Close” button. Close the IE and reboot your PC system for the changes to take effect. This step will help you to restore your web-browser’s search provider by default, newtab page and start page to default state. Once installed, the IconRunner adware can add a task in to the Windows Task Scheduler Library. Due to this, every time when you launch your personal computer, it will open an intrusive web-site. So, you need to check the Task Scheduler Library and get rid of all harmful tasks that have been created by ‘ad-supported’ programs. Press Windows and R keys on your keyboard together. It will open a dialog box that titled as Run. In the text field, type “taskschd.msc” (without the quotes) and press OK. Task Scheduler window opens. In the left-hand side, press “Task Scheduler Library”, as displayed on the screen below. In the middle part you will see a list of installed tasks. Select the first task, its properties will be open just below automatically. Next, click the Actions tab. Necessary to look at the text which is written under Details. If you are not sure that executes the task, then google it. If it is a component of the malicious program, then this task also should be removed. Further click on it with the right mouse button and select Delete like below. Repeat this step, if you have found a few tasks that have been created by ‘ad-supported’ applications. Once is finished, close the Task Scheduler window. Open the properties of the web-browser shortcut file. Right click on the shortcut file of affected web browser and select the “Properties” option and it will open the properties of the shortcut. Next, choose the “Shortcut” tab and have a look at the Target field as displayed below. Another variant, an address has been added at the end of the line. In this case the Target field looks such as …Application\chrome.exe” http://site.address like below. In order to fix the infected shortcut file, you need to insert right path to the Target field or remove an address (if it has been added at the end). You can use the following information to fix your shortcut files that have been changed by IconRunner which redirects your browser to intrusive ad web-sites. Once is done, click OK to save changes. Repeat the step for all web browsers which are re-directed to the IconRunner ads. Manual removal is not always as effective as you might think. Often, even the most experienced users can not completely get rid of IconRunner adware that causes multiple annoying advertisements and pop-ups. So, we advise to scan your computer for any remaining malicious components with free adware removal applications below. We recommend using the Zemana Anti-malware which are completely clean your computer of the ad-supported software. The utility is an advanced malware removal program created by (c) Zemana lab. It is able to help you remove potentially unwanted apps, browser hijackers, malicious software, toolbars, ransomware and IconRunner which redirects your internet browser to annoying ad pages. Download Zemana on your PC system by clicking on the following link. Once the downloading process is finished, close all apps and windows on your machine. Open a folder in which you saved it. Double-click on the icon that’s named Zemana.AntiMalware.Setup. Once setup is complete, click the “Scan” button to scan for IconRunner adware that causes undesired advertisements. A system scan may take anywhere from 5 to 30 minutes, depending on your PC system. During the scan Zemana Anti Malware (ZAM) will detect threats present on your machine. Once Zemana Free completes the scan, Zemana Anti-Malware (ZAM) will open you the results. When you are ready, press “Next”. Once that process is done, you may be prompted to reboot your system. HitmanPro cleans your PC from browser hijackers, potentially unwanted software, unwanted toolbars, browser extensions and other unwanted programs like IconRunner adware which designed to redirect your browser to various ad web-pages. The free removal tool will help you enjoy your computer to its fullest. HitmanPro uses advanced behavioral detection technologies to search for if there are undesired apps in your computer. You can review the scan results, and choose the threats you want to uninstall. Download HitmanPro on your computer from the link below. When downloading is finished, launch the HitmanPro, double-click the HitmanPro.exe file. In the Hitman Pro window, click the “Next” to perform a system scan for the IconRunner adware that causes multiple unwanted advertisements and popups. Depending on your computer, the scan can take anywhere from a few minutes to close to an hour. While the HitmanPro program is scanning, you can see number of objects it has identified as threat. Once Hitman Pro has completed scanning, you can check all items found on your computer. Make sure all threats have ‘checkmark’ and click “Next”. Now, click the “Activate free license” button to begin the free 30 days trial to remove all malicious software found. We advise using the Malwarebytes Free that are completely clean your PC system of the ‘ad supported’ software. The free utility is an advanced malicious software removal program made by (c) Malwarebytes lab. This program uses the world’s most popular antimalware technology. It is able to help you get rid of unwanted IconRunner from your browsers, PUPs, malicious software, browser hijackers, toolbars, ransomware and other security threats from your system for free. Download MalwareBytes AntiMalware on your system from the link below. After downloading is done, close all applications and windows on your computer. Open a directory in which you saved it. Double-click on the icon that’s called mb3-setup as on the image below. Once installation is finished, you’ll see window as displayed on the screen below. Now click the “Scan Now” button. MalwareBytes AntiMalware (MBAM) application will scan through the whole system for the IconRunner adware which cause intrusive ads to appear. A system scan can take anywhere from 5 to 30 minutes, depending on your computer. When a threat is found, the count of the security threats will change accordingly. After the scan is done, MalwareBytes Anti Malware (MBAM) will prepare a list of unwanted and ad supported software. Review the report and then press “Quarantine Selected” button. The Malwarebytes will now delete IconRunner adware that causes multiple undesired ads and pop-ups. When that process is complete, you may be prompted to restart your computer. The following video explains guide on how to remove browser hijacker infection, adware and other malware with MalwareBytes Anti-Malware (MBAM). By installing an ad blocker program such as AdGuard, you are able to block autoplaying video ads and get rid of a large amount of distracting and intrusive ads on pages. Please go to the link below to download the latest version of AdGuard for Microsoft Windows. Save it to your Desktop. When the download is finished, launch the downloaded file. You will see the “Setup Wizard” program window. Follow the prompts. Once the installation is done, click “Skip” to close the setup program and use the default settings, or press “Get Started” to see an quick tutorial that will help you get to know AdGuard better. In most cases, the default settings are enough and you don’t need to change anything. Each time, when you run your PC, AdGuard will run automatically and stop pop up ads, as well as other harmful or misleading web-pages. For an overview of all the features of the program, or to change its settings you can simply double-click on the icon named AdGuard, that can be found on your desktop. After completing the steps shown above, your system should be clean from this adware and other malicious software. The Microsoft Edge, Chrome, Internet Explorer and Mozilla Firefox will no longer display intrusive IconRunner ads. Unfortunately, if the step by step guidance does not help you, then you have caught a new ad supported software, and then the best way – ask for help. Please start a new thread by using the “New Topic” button in the Spyware Removal forum. When posting your HJT log, try to give us some details about your problems, so we can try to help you more accurately. Wait for one of our trained “Security Team” or Site Administrator to provide you with knowledgeable assistance tailored to your problem with the annoying IconRunner adware.
#! /usr/bin/env python # -*- coding: utf-8 -*- import sys import os import subprocess import shutil import json import argparse from subprocess import call pluginDir = 'plugins/' paellaDir = 'src/' javascriptFile = 'javascript/paella_player.js' cssFile = 'plugins/plugins.css' arguments = argparse.ArgumentParser(description="Compile plugins, javascript and style sheet files.") arguments.add_argument('--src',help='Source directory') arguments.add_argument('--js',help='Javascript output file, with path') arguments.add_argument('--css',help='Stylesheet output file, with path') arguments.add_argument('--debug',action='store_true',help='do not minimize output javascript code') arguments.add_argument('--install',action='store_true',help='generate production output files') arguments.add_argument('--noplugins',action='store_true',help='add plugins') intermediatePath = 'tmp' if (not os.path.exists(intermediatePath)): os.makedirs(intermediatePath) args = arguments.parse_args() if args.src: pluginDir = args.src if args.js: javascriptFile = args.js if args.css: cssFile = args.css if args.install: jsOut = open(javascriptFile,'w') cssOut = open(cssFile,'w') else: jsOut = open(os.path.join(intermediatePath,'javascript_output.o'),'w') cssOut = open(os.path.join(intermediatePath,'css_output.o'),'w') paellaFiles = os.listdir(paellaDir) paellaFiles.sort() for file in paellaFiles: outPath = os.path.join(intermediatePath,file) outFile = open(outPath,'w') jsPath = paellaDir + file outFile.write(open(jsPath).read()) outFile.write('\n\n') outFile.close() pluginFiles = os.listdir(pluginDir) pluginFiles.sort() f = open(pluginDir + 'ignore.json') ignoreFiles = json.loads(f.read()) if not args.noplugins: for file in pluginFiles: jsPath = pluginDir + file fileName, fileExtension = os.path.splitext(jsPath); cssPath = fileName + '.css' if fileExtension=='.js' and not(file in ignoreFiles): outPath = os.path.join(intermediatePath,file) outFile = open(outPath,'w') outFile.write(open(jsPath).read()) outFile.write('\n\n') outFile.close() if os.path.exists(cssPath): cssOut.write(open(cssPath).read()) cssOut.write('\n\n') cssOut.close() intermediateFiles = os.listdir(intermediatePath) intermediateFiles.sort() for file in intermediateFiles: filePath = os.path.join(intermediatePath,file) fileName, fileExtension = os.path.splitext(filePath) if not args.debug and fileExtension=='.js': command = "java -jar yuicompressor.jar " + filePath + " -o " + filePath print command subprocess.check_call(command,shell=True) print "adding " + filePath + " to " + javascriptFile jsOut.write(open(filePath).read()) jsOut.close() shutil.rmtree(intermediatePath)
In 1995 the Australian, State and Territory Governments agreed to a program of competition policy reform. There has been substantial progress in the implementation of the National Competition Policy (NCP) over the past eight years, including in the related reform areas of electricity, gas, road transport and water. This report examines the impact of NCP and related reforms undertaken to date on the Australian economy and the Australian community more broadly. It includes a snapshot of progress in NCP implementation; contribution of NCP to Australia's recent economic performance; price and service quality outcomes for businesses and households and impacts on the financial performance of government business enterprises; social, employment, regional and environmental impacts; lessons learnt from NCP processes and outcomes that could help inform the development and implementation of a future nationally coordinated reform agenda; reform opportunities.
from distutils.core import setup import py2exe, sys, os sys.argv.append('py2exe') DATA=[('imageformats',[ 'C:\Users\yasoob\Anaconda\Lib\site-packages\PyQt4\plugins\imageformats\qjpeg4.dll', 'C:\Users\yasoob\Anaconda\Lib\site-packages\PyQt4\plugins\imageformats\qgif4.dll', 'C:\Users\yasoob\Anaconda\Lib\site-packages\PyQt4\plugins\imageformats\qico4.dll', 'C:\Users\yasoob\Anaconda\Lib\site-packages\PyQt4\plugins\imageformats\qmng4.dll', 'C:\Users\yasoob\Anaconda\Lib\site-packages\PyQt4\plugins\imageformats\qsvg4.dll', 'C:\Users\yasoob\Anaconda\Lib\site-packages\PyQt4\plugins\imageformats\qtiff4.dll', ]), ('', ['C:\Users\yasoob\Documents\GitHub\youtube-dl-GUI\\ffmpeg.exe'])] for files in os.listdir(os.path.join(os.getcwd(),'UI')): f1 = os.path.join(os.getcwd(),'UI', files) if os.path.isfile(f1): # skip directories f2 = 'UI', [f1] DATA.append(f2) setup( options = {'py2exe': {'compressed': True,"includes":["sip"]}}, windows = [{ 'script': "main.py", "icon_resources": [(0, os.path.join(os.getcwd(),"resources","converted_icon.ico"))], "dest_base":"youtube-gl", }], zipfile = None, data_files = DATA, )
The Fulfillment concept takes a list of requirements and compares it with a list that may satisfy those requirements to produce a summary of requirements that are fulfilled or satisfied as of a particular date. For Vehicle Inspection Tracking, the Inspection Schedule contains a list of Inspection Types will be considered the requirements. The Inspection History will be used to satisfy the requirements. Select a Matching process to proceed with the Fulfillment configuration. The next step, after configuring the Matching process, is to configure the Requirements and Fulfilled by sections. choose the Age override, this identifies how long a particular Inspection Type is good for. When an Inspection (of that type) is completed, how long until it needs to be completed again. For this solution optional requirements have not been identified and all Inspection Types in the Inspection Schedule will be considered mandatory. As a result the Requirements (optional) fields can be left blank or n/a. choose the End Date field that determines the end date the Inspection is valid to. In this case, the Inspect Again After field will identify how long the Inspection is valid for, so the End can can be left as n/a. Click Today to show a detailed listing of the Inspections.
from itertools import chain from string import printable import unicodedata __all__ = ["each_index_of_string", "identity", "is_printable", "list_packer", "safeint"] def each_index_of_string(string, corpus): """Finds all occurrences of a given string in a corpus. Args: string (str): the string to search corpus (str): the string to search in Yields: a start index for each occurrence of the string within the corpus, in ascending order """ start = -1 while True: start = corpus.find(string, start+1) if start < 0: return yield start def flatten(iterable): """Flattens an iterable yielding iterables into a single iterable.""" return chain.from_iterable(iterable) def identity(arg): """An identity function that simply returns its argument.""" return arg try: # Python 2.x from string import maketrans _is_printable_helper = maketrans(printable, ' '*len(printable)) except ImportError: # Python 3.x _is_printable_helper = printable.maketrans(printable, ' '*len(printable)) def is_printable(string): """Returns whether the given string consists of printable characters only. If the string is a Unicode string, this function uses the ``unicodedata`` to decide which characters are printable. If the string contains raw bytes, it considers the characters in ``string.printable`` as printable.""" if isinstance(string, unicode): return all(unicodedata.category(char) != 'Cc' for char in string) else: return all(_is_printable_helper[ord(char)] == ' ' for char in string) def list_packer(*args): """An identity function that creates a list from its arguments.""" return args def _safe_conversion(value, converter, default=None): """Pipes a value through a converter function and returns the converted value or the default value if there was an exception during the conversion. Args: value: the value to convert converter (callable): a callable that converts the value to the result. Must accept a single argument only. default: the default value to return in case of an unsuccessful conversion. Returns: the converted value if the conversion was successful or the default value otherwise. """ try: return converter(value) except: return default def safeint(value, default=None): """Tries to convert a value given as a string to an integer. Returns the default value if the value cannot be converted. Args: value (str): the value to turn into an integer default (object): the default value to return if the given value cannot be converted into an integer Returns (int or object): the integer value converted from the given value, or the default value if the conversion was unsuccessful. """ return _safe_conversion(value, int, default)
Deep Learning on visual data in the context of autonomous driving. We are offering a thesis in collaboration with BMW focusing on the application of deep learning on visual data in the context of autonomous driving. Therefore we are looking for motivated students that ideally already acquired some experience in computer vision and/or machine learning. If you are interested to work on a thesis in this field please contact Jakob Mayr or Federico Tombari. Abstract: We are offering a thesis in collaboration with BMW focusing on the application of deep learning on visual data in the context of autonomous driving. Therefore we are looking for motivated students that ideally already acquired some experience in computer vision and/or machine learning. If you are interested to work on a thesis in this field please contact Jakob Mayr or Federico Tombari.
# Copyright 2016 Sotera Defense Solutions Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import six if six.PY2 from ConfigParser import SafeConfigParser else from configparser import SafeConfigParser class AggregateMicroPathConfig: config_file = "" table_name = "" table_schema_id = "" table_schema_dt = "" table_schema_lat = "" table_schema_lon = "" time_filter = 0 distance_filter = 0 tripLat1 = 0 tripLon1 = 0 tripLat2 = 0 tripLon2 = 0 tripname = "" resolutionLat = 0 resolutionLon = 0 tripLatMin = 0 tripLatMax = 0 tripLonMin = 0 tripLonMax = 0 triplineBlankets = [] def __init__(self, config, basePath = "./"): configParser = SafeConfigParser() configParser.read(basePath + config) self.config_file = config self.database_name = configParser.get("AggregateMicroPath", "database_name") self.table_name = configParser.get("AggregateMicroPath", "table_name") self.table_schema_id = configParser.get("AggregateMicroPath", "table_schema_id") self.table_schema_dt = configParser.get("AggregateMicroPath", "table_schema_dt") self.table_schema_lat = configParser.get("AggregateMicroPath", "table_schema_lat") self.table_schema_lon = configParser.get("AggregateMicroPath", "table_schema_lon") self.time_filter = long(configParser.get("AggregateMicroPath", "time_filter")) self.distance_filter = long(configParser.get("AggregateMicroPath", "distance_filter")) self.tripLat1 = float(configParser.get("AggregateMicroPath", "lower_left_lat")) self.tripLon1 = float(configParser.get("AggregateMicroPath", "lower_left_lon")) self.tripLat2 = float(configParser.get("AggregateMicroPath", "upper_right_lat")) self.tripLon2 = float(configParser.get("AggregateMicroPath", "upper_right_lon")) self.tripname = configParser.get("AggregateMicroPath", "trip_name") self.resolutionLat = float(configParser.get("AggregateMicroPath", "resolution_lat")) self.resolutionLon = float(configParser.get("AggregateMicroPath", "resolution_lon")) self.tripLatMin = int(math.floor(self.tripLat1/self.resolutionLat))#6 self.tripLatMax = int(math.ceil(self.tripLat2/self.resolutionLat)) #7 self.tripLonMin = int(math.floor(self.tripLon1/self.resolutionLon)) #8 self.tripLonMax = int(math.ceil(self.tripLon2/self.resolutionLon)) #9 self.triplineBlankets.append([self.tripLat1,self.tripLon1,self.tripLat2,self.tripLon2,self.tripname,self.resolutionLat,self.resolutionLon,self.tripLatMin,self.tripLatMax,self.tripLonMin,self.tripLonMax]) self.temporal_split = configParser.get("AggregateMicroPath", "temporal_split")
We are a nation at war with itself, cleaved in two by racial, ethnic and cultural differences, each side convinced of its own righteousness. These tumultuous times cry out for a leader of wisdom and maturity, patience and moderation, vision and moral clarity. Instead, we have President Donald J. Trump. He misses no opportunity to widen the gulf between us, to pour fuel on the raging fires of social and racial conflict, to stoke the smoldering embers of historic tensions. He is the divider-in-chief. Let’s be clear: There is absolutely no reason for the president of the United States to comment on the decision of some football players to kneel during the national anthem. And Trump hasn’t just commented. He has railed. He has ranted. He has thrown verbal punches, using an expletive to insist that NFL owners fire any player who fails to stand. It is no mere coincidence, of course, that most of the players who kneel are black. The social movement started last year, when Colin Kaepernick, then quarterback for the San Francisco 49ers, sat while the “The Star-Spangled Banner” was played before a televised game. “I am not going to stand up to show pride in a flag for a country that oppresses black people and people of color. … There are bodies in the street and people getting paid leave and getting away with murder,” he later told reporters, referring to police shootings of unarmed black men. Police violence was already high on the list of issues that divide us. According to an Associated Press-NORC Center poll conducted in 2015, more than 80 percent of black Americans said police are too quick to use deadly force and are more likely to use it against blacks. By contrast, about two-thirds of whites said that police use deadly force only when necessary; about 60 percent of whites said race is not a factor. Statistics, however, help explain the fears that stalk black families, especially those with sons. While black men make up only about 6 percent of the population, they accounted for one-third of the unarmed people killed by police in 2016, according to data assembled by The Washington Post. The nation needs a president who is judicious, who can defend the difficult job that police officers do — and the police officers who do it well — while helping all Americans to see the racism that still stalks the criminal justice system. Trump is not that president. He ran a campaign that fanned fears of black crime among his base, and he followed up with an attorney general, Jeff Sessions, who has all but encouraged police violence. Though Trump has laughably claimed that athletes’ sideline protests have “nothing to do with race,” he knows exactly what he is doing. He is resorting to the ugliest sort of pandering to rally his angry and largely white base of cultural conservatives. And there is a pattern here. Whenever Trump is experiencing political and policy failures in Washington — and he has hardly had a string of victories — he fans the flames of racial and cultural resentment. In the last several days, his latest effort to repeal Obamacare failed, and his favored candidate for the GOP Senate race in Alabama, Luther Strange, lost. This polarizing president has managed to frustrate even members of his own party, many of whom wish for more discipline and restraint from the Oval Office. But Trump is the natural outgrowth of a strategy that the Republican Party has employed for decades: stoking the fears and resentments of conservative whites who are uncomfortable with the pace of social change. That unfortunate strategy has helped the GOP hold on to power, but at what cost? A country irretrievably broken?
import functools import inspect import urllib def maybe_quote(str_or_none): if str_or_none is None: return None return urllib.quote(str_or_none) class EndpointMixin(object): _endpoint_parent = None @classmethod def endpoint_parts(cls): parts = [] parent = cls._endpoint_parent if parent: if isinstance(parent, str): parent = getattr(inspect.getmodule(cls), parent) cls._endpoint_parent = parent names.extend(parent.endpoint_parts()) part = [cls] if hasattr(cls, 'endpoint_name'): part.append(cls.endpoint_name) else: part.append('%ss' % cls.__name__.lower()) parts.append(part) return parts @classmethod def endpoint_path(cls, *args): args = list(args) optional = object() args.append(optional) path_parts = [] for part_cls, part_name in cls.endpoint_parts(): path_parts.append(part_name) part_arg = args.pop(0) if part_arg is optional: part_arg = '' if isinstance(part_arg, part_cls): part_arg = part_arg.endpoint_id() if isinstance(part_arg, int): part_arg = str(part_arg) if not isinstance(part_arg, (int, basestring)): raise TypeError, 'unsupported path argument %s' % type(part_arg) path_parts.append(part_arg) path_parts.extend(args[:-1]) return '/'.join(path_parts) def endpoint_id(self): return self.id def endpoint_setup(parent=None, name=None): def decorator(cls): if parent: cls._endpoint_parent = parent if name: cls.endpoint_name = name return cls return decorator
Provide comfort for the family of Julita Clark with a meaningful gesture of sympathy. To send flowers in memory of Julita Capati Clark, please visit our Heartfelt Sympathies Store. We encourage you to share your most beloved memories of Julita here, so that the family and other loved ones can always see it. You can upload cherished photographs, or share your favorite stories, and can even comment on those shared by others.
''' Created on Jul 7, 2011 @author: Leo Andres (user) ''' import http import re import socket #import sys import time #import urllib import urllib.error import urllib.parse import urllib.request #import urllib2 class QueryEUtilsBase: http_error_codes = { 100: ('Continue', 'Request received, please continue'), 101: ('Switching Protocols', 'Switching to new protocol; obey Upgrade header'), 200: ('OK', 'Request fulfilled, document follows'), 201: ('Created', 'Document created, URL follows'), 202: ('Accepted', 'Request accepted, processing continues off-line'), 203: ('Non-Authoritative Information', 'Request fulfilled from cache'), 204: ('No Content', 'Request fulfilled, nothing follows'), 205: ('Reset Content', 'Clear input form for further input.'), 206: ('Partial Content', 'Partial content follows.'), 300: ('Multiple Choices', 'Object has several resources -- see URI list'), 301: ('Moved Permanently', 'Object moved permanently -- see URI list'), 302: ('Found', 'Object moved temporarily -- see URI list'), 303: ('See Other', 'Object moved -- see Method and URL list'), 304: ('Not Modified', 'Document has not changed since given time'), 305: ('Use Proxy', 'You must use proxy specified in Location to access this resource.'), 307: ('Temporary Redirect', 'Object moved temporarily -- see URI list'), 400: ('Bad Request', 'Bad request syntax or unsupported method'), 401: ('Unauthorized', 'No permission -- see authorization schemes'), 402: ('Payment Required', 'No payment -- see charging schemes'), 403: ('Forbidden', 'Request forbidden -- authorization will not help'), 404: ('Not Found', 'Nothing matches the given URI'), 405: ('Method Not Allowed', 'Specified method is invalid for this server.'), 406: ('Not Acceptable', 'URI not available in preferred format.'), 407: ('Proxy Authentication Required', 'You must authenticate with this proxy before proceeding.'), 408: ('Request Timeout', 'Request timed out; try again later.'), 409: ('Conflict', 'Request conflict.'), 410: ('Gone', 'URI no longer exists and has been permanently removed.'), 411: ('Length Required', 'Client must specify Content-Length.'), 412: ('Precondition Failed', 'Precondition in headers is false.'), 413: ('Request Entity Too Large', 'Entity is too large.'), 414: ('Request-URI Too Long', 'URI is too long.'), 415: ('Unsupported Media Type', 'Entity body in unsupported format.'), 416: ('Requested Range Not Satisfiable', 'Cannot satisfy request range.'), 417: ('Expectation Failed', 'Expect condition could not be satisfied.'), 500: ('Internal Server Error', 'Server got itself in trouble'), 501: ('Not Implemented', 'Server does not support this operation'), 502: ('Bad Gateway', 'Invalid responses from another server/proxy.'), 503: ('Service Unavailable', 'The server cannot process the request due to a high load'), 504: ('Gateway Timeout', 'The gateway server did not receive a timely response'), 505: ('HTTP Version Not Supported', 'Cannot fulfill request.')} base_eutils_url = None maximum_tries = None sleep_delay = None timeout = None maximum_url_length = None def __init__(self, eutils_url_in): self.base_eutils_url = eutils_url_in def set_base_eutils_url(self, eutils_url_in): self.base_eutils_url = eutils_url_in def get_base_eutils_url(self): return self.base_eutils_url def set_maximum_tries(self, maximum_tries_in): self.maximum_tries = int(maximum_tries_in) def get_maximum_tries(self): return self.maximum_tries def set_sleep_delay(self, sleep_delay_in): self.sleep_delay = int(sleep_delay_in) def get_sleep_delay(self): return self.sleep_delay def set_timeout(self, timeout_in): self.timeout = int(timeout_in) def get_timeout(self): return self.timeout def set_maximum_url_length(self, maximum_url_length_in): self.maximum_url_length = int(maximum_url_length_in) def get_maximum_url_lengt(self): return self.maximum_url_length def run_eutils_request(self, eutils_variables_in): attempt_number = 0 eutils_request_variables = {} for dict_key in eutils_variables_in: if eutils_variables_in[dict_key] != None: eutils_request_variables[dict_key] = eutils_variables_in[dict_key] print('\nDoing EUtilities request at ' + time.strftime('%a, %d %b %Y %H:%M:%S', time.localtime()) + '\n' + self.base_eutils_url + '?' + urllib.parse.urlencode(eutils_request_variables) + '\n') while True: xml_string = None time.sleep(self.sleep_delay) try: response = None if self.maximum_url_length != None and self.maximum_url_length <= 1600: response = urllib.request.urlopen(url = self.base_eutils_url + '?' + urllib.parse.urlencode(eutils_request_variables), timeout = self.timeout) else: response = urllib.request.urlopen(url = self.base_eutils_url, data = urllib.parse.urlencode(eutils_request_variables).encode('utf-8'), timeout = self.timeout) xml_string = response.read() except OSError as ose: if str(ose).strip() == '[Errno 11004] getaddrinfo failed': print('Network connection unavailable.') attempt_number -= 1 else: print('OSError') print(ose) except socket.error as se: # ADD SOMETHING HERE TO DESCRIBE THE ERROR BETTER print('socket.error') print(se) except urllib.error.HTTPError as er: print(str(er.code) + ": " + self.http_error_codes[er.code][1]) except urllib.error.URLError as er: print(er) except socket.timeout: print('Request timed out.') except http.client.BadStatusLine as bsl: # ADD SOMETHING HERE TO DESCRIBE THE ERROR BETTER print('Bad status line (?).') print(bsl) if xml_string != None: break attempt_number += 1 print('Search result invalid. Attempt ' + str(attempt_number) + '.') if self.maximum_tries < attempt_number: print('Maximum tries exceeded.') break return xml_string class IteratePubMedESearchResults(QueryEUtilsBase): result_count = 0 result_return_maximum = 0 result_return_start = 0 result_idlist_iter = None eutils_esearch_variables = None def __init__(self, esearch_settings_in): self.result_count = 0 self.result_return_maximum = 0 self.result_return_start = 0 self.eutils_esearch_variables = { 'rettype' : 'uilist', 'retstart' : 0, 'retmax' : None, 'db' : 'pubmed', 'usehistory' : None, 'term' : None, 'email' : None, 'tool' : None, 'query_key' : None, 'WebEnv' : None} for dict_key in self.eutils_esearch_variables: try: if self.eutils_esearch_variables[dict_key] == None and esearch_settings_in[dict_key] != None: self.eutils_esearch_variables[dict_key] = esearch_settings_in[dict_key] except KeyError: pass self.eutils_esearch_variables['query_key'] = None self.eutils_esearch_variables['retstart'] = 0 self.set_base_eutils_url(esearch_settings_in['base_address'] + '/esearch.fcgi') self.set_sleep_delay(esearch_settings_in['sleep_delay']) self.set_maximum_tries(esearch_settings_in['maximum_tries']) self.set_timeout(esearch_settings_in['timeout']) self.result_idlist_iter = None self.__run_eutils_esearch_request() def __iter__(self): return self def __next__(self): try: return self.result_idlist_iter.__next__().group(1).decode('utf-8') except StopIteration: if self.result_count <= (self.result_return_maximum + self.result_return_start): raise StopIteration else: self.eutils_esearch_variables['retstart'] = self.eutils_esearch_variables['retstart'] + self.eutils_esearch_variables['retmax'] if self.result_count <= self.eutils_esearch_variables['retstart'] + self.eutils_esearch_variables['retmax']: print('\nRetrieving Articles ' + str(self.eutils_esearch_variables['retstart'] + 1) + ' to ' + str(self.result_count) + '.') else: print('\nRetrieving Articles ' + str(self.eutils_esearch_variables['retstart'] + 1) + ' to ' + str(self.eutils_esearch_variables['retstart'] + self.eutils_esearch_variables['retmax']) + '.') self.result_idlist_iter = None self.__run_eutils_esearch_request() try: return self.result_idlist_iter.__next__().group(1).decode('utf-8') except StopIteration: raise StopIteration def get_query_key(self): return self.eutils_esearch_variables['query_key'] def get_web_env(self): return self.eutils_esearch_variables['WebEnv'] def get_result_count(self): return self.result_count def __run_eutils_esearch_request(self): while True: xml_string = self.run_eutils_request(self.eutils_esearch_variables) match = re.search(b'<Count>(\d+)</Count>.*?<RetMax>(\d+)</RetMax>.*?<RetStart>(\d+)</RetStart>.*?(<IdList>.*?</IdList>)', xml_string, re.DOTALL) if match: break match = re.search(b'<Count>(\d+)</Count>.*?<RetMax>(\d+)</RetMax>.*?<RetStart>(\d+)</RetStart>.*?(<IdList/>)', xml_string, re.DOTALL) if match: break self.result_count = int(match.group(1)) self.result_return_maximum = int(match.group(2)) self.result_return_start = int(match.group(3)) self.result_idlist_iter = re.finditer(b'<Id>(\d+)</Id>', match.group(4), re.DOTALL) try: self.eutils_esearch_variables['query_key'] = None except KeyError: pass try: self.eutils_esearch_variables['WebEnv'] = None except KeyError: pass try: if self.eutils_esearch_variables['usehistory'] == 'y': match = re.search(b'<Count>\d+</Count>.*?<RetMax>\d+</RetMax>.*?<RetStart>\d+</RetStart>.*?<QueryKey>(.*?)</QueryKey>.*?<WebEnv>(.*?)</WebEnv>.*?<IdList>', xml_string, re.DOTALL) if match: self.eutils_esearch_variables['query_key'] = match.group(1).strip() self.eutils_esearch_variables['WebEnv'] = match.group(2).strip() self.eutils_esearch_variables['term'] = None else: self.eutils_esearch_variables['usehistory'] = None self.eutils_esearch_variables['query_key'] = None self.eutils_esearch_variables['WebEnv'] = None except KeyError: pass class IteratePubMedEFetchData(QueryEUtilsBase): result_count = 0 result_return_maximum = 0 result_return_start = 0 efetch_pubmed_data_iter = None efetch_last_pubmed_id = None efetch_pubmed_id_iter = None eutils_efetch_variables = { 'retmode' : 'xml', 'retstart' : None, 'retmax' : None, 'db' : 'pubmed', 'usehistory' : None, 'email' : None, 'tool' : None, 'query_key' : None, 'WebEnv' : None} def __init__(self, efetch_settings_in, efetch_pubmed_id_iterable_in=None): for dict_key in self.eutils_efetch_variables: try: self.eutils_efetch_variables[dict_key] = efetch_settings_in[dict_key] except KeyError: pass if efetch_pubmed_id_iterable_in == None and self.eutils_efetch_variables['query_key'] != None: self.eutils_efetch_variables['retstart'] = 0 else: try: self.efetch_pubmed_id_iter = efetch_pubmed_id_iterable_in.__iter__() except AttributeError: self.efetch_pubmed_id_iter = [].__iter__() self.eutils_efetch_variables['query_key'] = None self.eutils_efetch_variables['retstart'] = None self.eutils_efetch_variables['retmax'] = None self.set_base_eutils_url(efetch_settings_in['base_address'] + '/efetch.fcgi') try: self.set_sleep_delay(int(efetch_settings_in['sleep_delay'])) except TypeError: pass try: self.set_maximum_tries(int(efetch_settings_in['maximum_tries'])) except TypeError: pass try: self.set_timeout(int(efetch_settings_in['timeout'])) except TypeError: pass try: self.set_maximum_url_length(int(efetch_settings_in['maximum_url_length'])) except TypeError: pass try: self.result_return_maximum = int(efetch_settings_in['retmax']) except TypeError: pass try: self.result_count = int(efetch_settings_in['result_count']) except TypeError: pass self.efetch_pubmed_data_iter = None def __iter__(self): return self def __next__(self): if self.eutils_efetch_variables['query_key'] != None: return self.next_by_query_key() else: return self.next_by_id_list() def next_by_query_key(self): try: return self.efetch_pubmed_data_iter.__next__().group(1) except StopIteration: self.eutils_efetch_variables['retstart'] = self.eutils_efetch_variables['retstart'] + self.eutils_efetch_variables['retmax'] except AttributeError: pass if self.eutils_efetch_variables['retstart'] >= self.result_count: self.efetch_pubmed_data_iter = [].__iter__() else: if self.result_count <= self.eutils_efetch_variables['retstart'] + self.eutils_efetch_variables['retmax']: print('\nRetrieving Articles ' + str(self.eutils_efetch_variables['retstart'] + 1) + ' to ' + str(self.result_count) + '.') else: print('\nRetrieving Articles ' + str(self.eutils_efetch_variables['retstart'] + 1) + ' to ' + str(self.eutils_efetch_variables['retstart'] + self.eutils_efetch_variables['retmax']) + '.') self.efetch_pubmed_data_iter = None self.__run_query_key_eutils_efetch_request() try: return self.efetch_pubmed_data_iter.__next__().group(1) except StopIteration: raise StopIteration def next_by_id_list(self): try: return self.efetch_pubmed_data_iter.__next__().group(1) except StopIteration: if self.maximum_url_length == None: raise StopIteration except AttributeError: pass self.efetch_pubmed_data_iter = None self.__run_id_list_eutils_efetch_request() try: return self.efetch_pubmed_data_iter.__next__().group(1) except StopIteration: raise StopIteration def __run_query_key_eutils_efetch_request(self): xml_string = None efetch_post_variables = {} for dict_key in self.eutils_efetch_variables: if self.eutils_efetch_variables[dict_key] != None: efetch_post_variables[dict_key] = self.eutils_efetch_variables[dict_key] while True: xml_string = self.run_eutils_request(efetch_post_variables) if xml_string != None: break self.efetch_pubmed_data_iter = re.finditer(b'(<PubmedArticle>.*?</PubmedArticle>)', xml_string, re.DOTALL) def __run_id_list_eutils_efetch_request(self): xml_string = None efetch_post_variables = {} efetch_pubmed_id_list = [] efetch_post_data = None for dict_key in self.eutils_efetch_variables: if self.eutils_efetch_variables[dict_key] != None: efetch_post_variables[dict_key] = self.eutils_efetch_variables[dict_key] if self.maximum_url_length == None: for list_item in self.efetch_pubmed_id_iter: efetch_pubmed_id_list.append(list_item) else: if self.efetch_last_pubmed_id != None: efetch_pubmed_id_list.append(self.efetch_last_pubmed_id) while True: efetch_post_variables['id'] = ','.join([str(list_item).strip() for list_item in efetch_pubmed_id_list]) self.efetch_last_pubmed_id = None try: self.efetch_last_pubmed_id = self.efetch_pubmed_id_iter.__next__() efetch_post_variables['id'] += ',' + str(self.efetch_last_pubmed_id).strip() except StopIteration: pass if self.maximum_url_length <= 1600: efetch_post_data = self.get_base_eutils_url() + '?' + urllib.parse.urlencode(efetch_post_variables) else: efetch_post_data = urllib.parse.urlencode(efetch_post_variables) if len(efetch_post_data) <= self.maximum_url_length: if self.efetch_last_pubmed_id != None: efetch_pubmed_id_list.append(self.efetch_last_pubmed_id) else: break else: break if len(efetch_pubmed_id_list) <= 0: self.efetch_pubmed_data_iter = [].__iter__() else: efetch_post_variables['id'] = ','.join([str(list_item).strip() for list_item in efetch_pubmed_id_list]) while True: xml_string = self.run_eutils_request(efetch_post_variables) if xml_string != None: break self.efetch_pubmed_data_iter = re.finditer(b'(<PubmedArticle>.*?</PubmedArticle>)', xml_string, re.DOTALL) class IteratePubMedCentralELinkCitedByPMCIDs(QueryEUtilsBase): elink_pmcid_iter = None base_address = None sleep_delay = None maximum_tries = None timeout = None eutils_elink_variables = { 'retmode' : 'xml', 'dbfrom' : 'pmc', 'db' : 'pmc', 'id' : None, 'email' : None, 'tool' : None} def __init__(self, elink_settings_in, elink_pmcids_in): for dict_key in self.eutils_elink_variables: try: self.eutils_elink_variables[dict_key] = elink_settings_in[dict_key] except KeyError: pass try: self.eutils_elink_variables['usehistory'] = None except KeyError: pass try: self.eutils_elink_variables['query_key'] = None except KeyError: pass try: self.eutils_elink_variables['WebEnv'] = None except KeyError: pass self.set_base_eutils_url(elink_settings_in['base_address'] + '/elink.fcgi') self.eutils_elink_variables['id'] = elink_pmcids_in self.eutils_elink_variables['db'] = 'pmc' try: self.set_sleep_delay(int(elink_settings_in['sleep_delay'])) except TypeError: pass try: self.set_maximum_tries(int(elink_settings_in['maximum_tries'])) except TypeError: pass try: self.set_timeout(int(elink_settings_in['timeout'])) except TypeError: pass self.elink_pmcid_iter = None def __iter__(self): return self def __next__(self): try: return self.elink_pmcid_iter.__next__().group(1).decode('utf-8') except StopIteration: raise StopIteration except AttributeError: try: self.__run_elink_request() return self.elink_pmcid_iter.__next__().group(1).decode('utf-8') except AttributeError: raise StopIteration def __run_elink_request(self): xml_string = None match = None self.elink_pmcid_iter = None while True: xml_string = self.run_eutils_request(self.eutils_elink_variables) if xml_string != None: break match = re.search(b'.*(<LinkSetDb>.*?<DbTo>pmc</DbTo>.*?<LinkName>pmc_pmc_citedby</LinkName>.*?</LinkSetDb>)', xml_string, re.DOTALL) if match: self.elink_pmcid_iter = re.finditer(b'<Id>(.*?)</Id>', match.group(1), re.DOTALL) class IteratePubMedIDELinkCitedByPubMedIDs(QueryEUtilsBase): elink_pmcid_iter = None base_address = None sleep_delay = None maximum_tries = None timeout = None eutils_elink_variables = { 'retmode' : 'xml', 'dbfrom' : 'pubmed', 'db' : 'pubmed', 'id' : None, 'email' : None, 'tool' : None} def __init__(self, elink_settings_in, elink_pubmed_ids_in): for dict_key in self.eutils_elink_variables: try: self.eutils_elink_variables[dict_key] = elink_settings_in[dict_key] except KeyError: pass try: self.eutils_elink_variables['usehistory'] = None except KeyError: pass try: self.eutils_elink_variables['query_key'] = None except KeyError: pass try: self.eutils_elink_variables['WebEnv'] = None except KeyError: pass self.set_base_eutils_url(elink_settings_in['base_address'] + '/elink.fcgi') self.eutils_elink_variables['id'] = elink_pubmed_ids_in self.eutils_elink_variables['db'] = 'pubmed' try: self.set_sleep_delay(int(elink_settings_in['sleep_delay'])) except TypeError: pass try: self.set_maximum_tries(int(elink_settings_in['maximum_tries'])) except TypeError: pass try: self.set_timeout(int(elink_settings_in['timeout'])) except TypeError: pass self.elink_pmcid_iter = None def __iter__(self): return self def __next__(self): try: return self.elink_pmcid_iter.__next__().group(1).decode('utf-8') except StopIteration: raise StopIteration except AttributeError: try: self.__run_elink_request() return self.elink_pmcid_iter.__next__().group(1).decode('utf-8') except AttributeError: raise StopIteration def __run_elink_request(self): xml_string = None match = None self.elink_pmcid_iter = None while True: xml_string = self.run_eutils_request(self.eutils_elink_variables) if xml_string != None: break match = re.search(b'.*(<LinkSetDb>.*?<DbTo>pubmed</DbTo>.*?<LinkName>pubmed_pubmed_citedin</LinkName>.*?</LinkSetDb>)', xml_string, re.DOTALL) if match: self.elink_pmcid_iter = re.finditer(b'<Id>(.*?)</Id>', match.group(1), re.DOTALL) class IteratePubMedCentralELinkToPubMedIDs(QueryEUtilsBase): elink_pmcid_iter = None elink_pubmed_id_iter = None elink_last_pubmed_id = None base_address = None sleep_delay = None maximum_tries = None timeout = None eutils_elink_variables = { 'retmode' : 'xml', 'dbfrom' : 'pmc', 'db' : 'pubmed', 'id' : None, 'email' : None, 'tool' : None} def __init__(self, elink_settings_in, elink_pmcid_iter_in): for dict_key in self.eutils_elink_variables: try: self.eutils_elink_variables[dict_key] = elink_settings_in[dict_key] except KeyError: pass try: self.eutils_elink_variables['usehistory'] = None except KeyError: pass try: self.eutils_elink_variables['query_key'] = None except KeyError: pass try: self.eutils_elink_variables['WebEnv'] = None except KeyError: pass self.set_base_eutils_url(elink_settings_in['base_address'] + '/elink.fcgi') self.elink_pmcid_iter = elink_pmcid_iter_in self.eutils_elink_variables['db'] = 'pubmed' try: self.set_sleep_delay(int(elink_settings_in['sleep_delay'])) except TypeError: pass try: self.set_maximum_tries(int(elink_settings_in['maximum_tries'])) except TypeError: pass try: self.set_timeout(int(elink_settings_in['timeout'])) except TypeError: pass self.elink_pubmed_id_iter = None def __iter__(self): return self def __next__(self): try: return self.elink_pubmed_id_iter.__next__().group(1).decode('utf-8') except StopIteration: raise StopIteration except AttributeError: xml_string = None match = None elink_post_variables = {} elink_pmcid_list = [] elink_post_data = None for dict_key in self.eutils_elink_variables: if self.eutils_elink_variables[dict_key] != None: elink_post_variables[dict_key] = self.eutils_elink_variables[dict_key] if self.maximum_url_length == None: for list_item in self.elink_pmcid_iter: elink_pmcid_list.append(list_item) else: if self.efetch_last_pubmed_id != None: elink_pmcid_list.append(self.elink_last_pubmed_id) while True: elink_post_variables['id'] = ','.join([str(list_item).strip() for list_item in elink_pmcid_list]) self.elink_last_pubmed_id = None try: self.elink_last_pubmed_id = self.elink_pmcid_iter.__next__() elink_post_variables['id'] += ',' + str(self.elink_last_pubmed_id).strip() except StopIteration: pass if self.maximum_url_length <= 1600: elink_post_data = self.get_base_eutils_url() + '?' + urllib.parse.urlencode(elink_post_variables) else: elink_post_data = urllib.parse.urlencode(elink_post_variables) if len(elink_post_data) <= self.maximum_url_length: if self.elink_last_pubmed_id != None: elink_pmcid_list.append(self.elink_last_pubmed_id) else: break else: break if len(elink_pmcid_list) <= 0: raise StopIteration else: self.elink_pubmed_id_iter = None self.eutils_elink_variables['id'] = ','.join([str(list_item).strip() for list_item in elink_pmcid_list]) while True: xml_string = self.run_eutils_request(self.eutils_elink_variables) if xml_string != None: break match = re.search(b'.*(<LinkSetDb>.*?<DbTo>pubmed</DbTo>.*?<LinkName>pmc_pubmed</LinkName>.*?</LinkSetDb>)', xml_string, re.DOTALL) if match: self.elink_pubmed_id_iter = re.finditer(b'<Id>(.*?)</Id>', match.group(1), re.DOTALL) try: return self.elink_pubmed_id_iter.__next__().group(1).decode('utf-8') except AttributeError: raise StopIteration class IteratePubMedIDELinkNeighborPubMedIDs(QueryEUtilsBase): base_address = None sleep_delay = None maximum_tries = None timeout = None eutils_elink_variables = { 'retmode' : 'xml', 'dbfrom' : 'pubmed', 'db' : 'pubmed', 'id' : None, 'cmd' : 'neighbor_score', 'email' : None, 'tool' : None} def __init__(self, elink_settings_in, elink_pubmed_ids_in): for dict_key in self.eutils_elink_variables: try: self.eutils_elink_variables[dict_key] = elink_settings_in[dict_key] except KeyError: pass try: self.eutils_elink_variables['usehistory'] = None except KeyError: pass try: self.eutils_elink_variables['query_key'] = None except KeyError: pass try: self.eutils_elink_variables['WebEnv'] = None except KeyError: pass self.set_base_eutils_url(elink_settings_in['base_address'] + '/elink.fcgi') self.eutils_elink_variables['id'] = elink_pubmed_ids_in self.eutils_elink_variables['db'] = 'pubmed' try: self.set_sleep_delay(int(elink_settings_in['sleep_delay'])) except TypeError: pass try: self.set_maximum_tries(int(elink_settings_in['maximum_tries'])) except TypeError: pass try: self.set_timeout(int(elink_settings_in['timeout'])) except TypeError: pass self.elink_pmcid_iter = None def __iter__(self): return self def __next__(self): next_item = None try: next_item = self.elink_pmcid_iter.__next__() except StopIteration: raise StopIteration except AttributeError: try: self.__run_elink_request() next_item = self.elink_pmcid_iter.__next__() except AttributeError: raise StopIteration if next_item: match = re.search(b'<Id>(.*?)</Id>.*?<Score>(.*?)</Score>', next_item.group(1), re.DOTALL) if match: return (match.group(1).decode('utf-8'), int(match.group(2).decode('utf-8'))) raise StopIteration def __run_elink_request(self): xml_string = None match = None self.elink_pmcid_iter = None while True: xml_string = self.run_eutils_request(self.eutils_elink_variables) if xml_string != None: break match = re.search(b'.*(<LinkSetDb>.*?<DbTo>pubmed</DbTo>.*?<LinkName>pubmed_pubmed</LinkName>.*?</LinkSetDb>)', xml_string, re.DOTALL) if match: self.elink_pmcid_iter = re.finditer(b'<Link>(.*?)</Link>', match.group(1), re.DOTALL) class EUtilsPubMed: ''' classdocs ''' eutils_settings = { 'base_address' : 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils', 'sleep_delay' : 1, 'maximum_tries' : 3, 'timeout' : 60, 'maximum_url_length' : None, 'retmax' : 100000, 'usehistory' : 'y', 'term' : None, 'email' : None, 'tool' : None, 'query_key' : None, 'WebEnv' : None, 'result_count' : None} def __init__(self): ''' Constructor ''' def set_eutils_address(self, address_in): self.eutils_settings['base_address'] = str(address_in).strip().rstrip('/') def get_eutils_address(self): return self.eutils_settings['base_address'] def set_sleep_delay(self, delay_in): self.eutils_settings['sleep_delay'] = int(delay_in) def get_sleep_delay(self): return self.eutils_settings['sleep_delay'] def set_maximum_tries(self, tries_in): self.eutils_settings['maximum_tries'] = int(tries_in) def get_maximum_tries(self): return self.eutils_settings['maximum_tries'] def set_timeout(self, timeout_in): self.eutils_settings['timeout'] = int(timeout_in) def get_timeout(self): return self.eutils_settings['timeout'] def set_maximum_url_length(self, length_in): self.eutils_settings['maximum_url_length'] = int(length_in) def get_maximum_url_length(self): return self.eutils_settings['maximum_url_length'] def set_return_maximum(self, maximum_in): self.eutils_settings['retmax'] = int(maximum_in) def get_return_maximum(self): return self.eutils_settings['retmax'] def get_eutils_database(self): return self.eutils_settings['db'] def set_eutils_use_history(self, history_in): if history_in: self.eutils_settings['usehistory'] = 'y' else: try: del(self.eutils_settings['usehistory']) except KeyError: pass def get_eutils_use_history(self): try: return self.eutils_settings['usehistory'] except KeyError: return None def set_email_address(self, email_in): self.eutils_settings['email'] = email_in def get_email_address(self): return self.eutils_settings['email'] def set_tool_name(self, name_in): self.eutils_settings['tool'] = name_in def get_tool_name(self): return self.eutils_settings['tool'] def pubmed_esearch_id_iter(self, esearch_term_in): self.eutils_settings['term'] = esearch_term_in pubmed_esearch_results = IteratePubMedESearchResults(self.eutils_settings) self.eutils_settings['query_key'] = pubmed_esearch_results.get_query_key() self.eutils_settings['WebEnv'] = pubmed_esearch_results.get_web_env() self.eutils_settings['result_count'] = pubmed_esearch_results.get_result_count() return pubmed_esearch_results def pubmed_efetch_data_iter(self, efetch_pubmed_id_iterable_in): return IteratePubMedEFetchData(self.eutils_settings, efetch_pubmed_id_iterable_in) def pubmed_esearch_data_iter(self, esearch_term_in): self.eutils_settings['WebEnv'] = None self.eutils_settings['query_key'] = None self.eutils_settings['result_count'] = None self.eutils_settings['term'] = esearch_term_in pubmed_esearch_results = IteratePubMedESearchResults(self.eutils_settings) try: if self.eutils_settings['usehistory'] == 'y': self.eutils_settings['WebEnv'] = pubmed_esearch_results.get_web_env() self.eutils_settings['query_key'] = pubmed_esearch_results.get_query_key() self.eutils_settings['result_count'] = pubmed_esearch_results.get_result_count() return IteratePubMedEFetchData(self.eutils_settings) except KeyError: return IteratePubMedEFetchData(self.eutils_settings, pubmed_esearch_results) def elink_pmcid_cited_by_pmcids(self, elink_pmcids_in): return_iter = [] try: return_iter = IteratePubMedCentralELinkCitedByPMCIDs(self.eutils_settings, elink_pmcids_in.strip()) except AttributeError: pass return return_iter def elink_pmcids_link_to_pubmed_ids(self, pmcid_iter_in): return_iter = [] try: return_iter = IteratePubMedCentralELinkToPubMedIDs(self.eutils_settings, pmcid_iter_in) except AttributeError: pass return return_iter def elink_pubmed_id_cited_by_pubmed_ids(self, elink_pubmed_ids_in): return_iter = [] try: return_iter = IteratePubMedIDELinkCitedByPubMedIDs(self.eutils_settings, elink_pubmed_ids_in.strip()) except AttributeError: pass return return_iter def elink_pubmed_id_neighbor_pubmed_ids(self, elink_pubmed_ids_in): return_iter = [] try: return_iter = IteratePubMedIDELinkNeighborPubMedIDs(self.eutils_settings, elink_pubmed_ids_in.strip()) except AttributeError: pass return return_iter
Located in New London, New Hampshire, Garden Life was officially launched in 2011. We are a fine gardening company specializing in landscape and garden rejuvenation projects, garden design and seasonal care. We take pride in our customer service, level of professionalism and knowledge and respect of plants and our environment. We use innovative solutions to answer New England's climate challenges and diverse site conditions while creating beautiful outdoor spaces. We provide a range of services to accommodate our clients’ needs from a short consultation to a full-fledged design, installation, and care service. We are passionate about establishing long-lasting relationships to ensure the health, longevity and enjoyment of your landscape and gardens. Garden Life’s team of experienced designers and fine gardening crews are guided by the principle “work smarter, not harder.” With proper design, implementation and followup care, maintenance can be reduced to fewer visits a year after just several growing seasons. These gardens are beyond sustainable—they are ecologically regenerative. These gardens, as living systems, create their own soil and mulch, provide food and shelter for wildlife, heal or fight plant diseases and require no additional watering or irrigation once established. Additionally, we aim to educate our clients in these practices so that they may participate in the growth of their gardens. Born out of a commitment to think globally and act locally, Garden Life has joined the non-profit 1% For the Planet, pledging to donate one percent of our annual sales to support organizations focused on the environment. With our wide range of skills and experiences, Garden Life is committed to helping you achieve both large and small landscape and garden goals whether through consultation, design, enhancement, care or garden coaching. Weeks Lawn Care & Stonework is focused on providing high-quality service and customer satisfaction - we will do everything we can to meet your expectations. Lawn Care and Fertilization Services in Massachusetts. Rutland Turf Care is the lawn care division of Rutland Nurseries, a premier landscape design and construction firm for over 60 years in New England. We take the guesswork out of establishing and maintaining a healthy, green lawn that stays beautiful all season long. With the harsh winters and extreme fluctuations in temperature in Massachusetts, it is very important to choose a turf management specialist that understands the climate and soil in MA. Our lawn care programs have been developed with over 60 years of experience in the landscaping business right here in New England. Our team of highly trained specialists understands the nutritional needs of Massachusetts turf , and uses the highest quality lawn fertilizer and turf products available only to professionals. Rutland Turf Care also offers Mosquito & Tick Control Programs to keep your family safe and enjoying the outdoors all summer long! NEW- we also now offer Deer Control Programs to protect your landscape design investment from hungry deer during the harsh winter months. Lawn Fertilizer Service Lawn fertilization companies in Massachusetts have a great challenge with our harsh winters and extreme summer temperatures. Timing is everything when it comes to applying the right types of fertilizer and other lawn care products at the right time. The key to a healthy, green lawn is the RIGHT amount of fertilizer and weed control at the RIGHT time of year and at the RIGHT application rate. Besides proper fertilization, our turf management specialists also offer treatment for these common lawn issues: crabgrass management, grub control, dandelion control, insect control, aeration services, overseeing, fungicide applications, herbicide applications, lime applications Lawn installations and restorations are also available. Dudley's Tree and Landscape is open year round. We are contractors for all landscaping construction and planning needs. This includes crane work for commercial uses other than landscape. 40+ years horticulture expertise in planting, transplanting, fertilizing, hydro seeding, new lawns, irrigation, land clearing, stump grinding and tree removal. The seasonal Garden center is open daily 9-5 for farm grown and finished retail flower and vegetable plants early Spring to mid Summer and for early Fall. In addition to the maintenance side of gardening, Weare, NH gardeners can help you with garden design and choosing specific plants and grass types for your climate and area. Find a Weare, NH gardener on Houzz. Narrow your search in the Professionals section of the website to Weare, NH gardeners. You can also look through Weare, NH photos to find examples of yards that you like, then contact the New Hampshire contractor who worked on them.
# Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved. # Copyright (c) 2006-2007 Sippy Software, Inc. All rights reserved. # # This file is part of SIPPY, a free RFC3261 SIP stack and B2BUA. # # SIPPY is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # For a license to use the SIPPY software under conditions # other than those described here, or to purchase support for this # software, please contact Sippy Software, Inc. by e-mail at the # following addresses: sales@sippysoft.com. # # SIPPY is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. from random import random from hashlib import md5 from time import time from .SipConf import SipConf from .SipGenericHF import SipGenericHF class SipCallId(SipGenericHF): hf_names = ('call-id', 'i') body = None def __init__(self, body = None): SipGenericHF.__init__(self, body) self.parsed = True if body == None: self.body = md5(str((random() * 1000000000) + time())).hexdigest() + '@' + str(SipConf.my_address) def __add__(self, other): return SipCallId(self.body + str(other)) def genCallId(self): self.body = md5(str((random() * 1000000000) + time())).hexdigest() + '@' + str(SipConf.my_address) def getCanName(self, name, compact = False): if compact: return 'i' return 'Call-ID'
Product must be purchased from Huawei Arabia/ UAE / Website. a)The exchange can be accepted only if the period from the date of signing the POD (proof of delivery) to the date of raising the exchange request is within 14 days. b)For further help, please contact us via Huawei Hotline 80066600 or email support.mea@huawei.com, or chat with us by clicking the chat bubble icon on the home page. a. The exchange service is obtainable only for the product with the quality issues. b. The exchange can be accepted only if the period from the date of signing the POD (proof of delivery) to the date of raising the exchange request is within 7 days. c. Free exchange is only for host (Phone body). Charger, battery and accessory are excluded. All product packages which have been unsealed must be inspected by Huawei and get certified on faulty issue before proceed to exchange. (a) The product is not sold by Huawei Arabia/ UAE website, according to the serial number or IMEI. (i) Software products are not eligible for warranty service of digital products in Huawei Arabia/ UAE. (a) Pictures on Huawei Arabia/ UAE web pages may vary slightly from the actual objects because of the light changes and display chromatic aberration. The effect presentation and diagram are synthetic and analog and are only for reference. The product appearance (including but not limited to the color) depends on the actual product. (b) Information provided in Huawei Arabia/ UAE (including but not limited to the product specifications and functions) may be limited and incomplete due to the limited space on websites. For detailed information, see related instruction manuals. (c) Huawei Arabia/ UAE do not handle the exchange or exchange due to problems caused by your incorrect operations or environment restrictions. (d) Huawei Arabia/ UAE do not provide the data export service for storage devices. Before exchanging storage devices to Huawei Arabia/ UAE, you need to be sure that data is exported. Huawei Arabia/ UAE do not assume liabilities for data loss, damage, or leakage. (e) For faults that can be rectified through software upgrades on products, send the product to Huawei Authorized After-Sales Service Outlets for software upgrades. In such cases, products do not support exchange. Huawei reserves the right for the modification and final interpretation of this Policy within the applicable law.
#------------------------------------------------------------------------------- # Name: HastyLine_Assignment.py # # Purpose: Create Quick Response Task stored in the Assignments layer from a # "Hasty Point" feature. # # Author: Don Ferguson # # Created: 06/25/2012 # Copyright: (c) Don Ferguson 2012 # Licence: # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # The GNU General Public License can be found at # <http://www.gnu.org/licenses/>. #------------------------------------------------------------------------------- #!/usr/bin/env python # Take courage my friend help is on the way import arcpy #workspc = arcpy.GetParameterAsText(0) #arcpy.env.workspace = workspc arcpy.env.overwriteOutput = "True" ##fc1="Hasty_Segments" fc1="Hasty_Line" fc2="Assignments" fieldName1="Length_miles" fieldName2="PointA_X" fieldName3="PointA_Y" fieldName4="PointB_X" fieldName5="PointB_Y" expression2 = "float(!shape.firstpoint!.split()[0])" expression3 = "float(!shape.firstpoint!.split()[1])" expression4 = "float(!shape.lastpoint!.split()[0])" expression5 = "float(!shape.lastpoint!.split()[1])" ##try: arcpy.CalculateField_management(fc1, fieldName1, "!SHAPE.length@miles!", "PYTHON") arcpy.CalculateField_management(fc1, fieldName2, expression2, "PYTHON") arcpy.CalculateField_management(fc1, fieldName3, expression3, "PYTHON") arcpy.CalculateField_management(fc1, fieldName4, expression4, "PYTHON") arcpy.CalculateField_management(fc1, fieldName5, expression5, "PYTHON") #shapeName = arcpy.Describe(fc1).ShapeFieldName rows1 = arcpy.SearchCursor(fc1) row1 = rows1.next() while row1: # you need to insert correct field names in your getvalue function Area_Name = row1.getValue("Area_Name") Length_miles = row1.getValue("Length_miles") Type = row1.getValue("Type") PtA_X = row1.getValue("PointA_X") PtA_Y = row1.getValue("PointA_Y") PtB_X = row1.getValue("PointB_X") PtB_Y = row1.getValue("PointB_Y") ## feat = row1.getValue(shapeName) ## #pnt = feat.getPart() ## ## # Print x,y coordinates of current point ## # ## #print pnt.X, pnt.Y ## ## fpointX = int(feat.firstPoint.X) ## fpointY = int(feat.firstPoint.Y) ## lpointX = int(feat.lastPoint.X) ## lpointY = int(feat.lastPoint.Y) Descrip1 = "Search along " + str(Area_Name) + " for a distance of " + str(int(Length_miles*100.0)/100.0) + " miles" Descrip2 = " between point 1: " + str(int(PtA_X)) + " " + str(int(PtA_Y)) + ", and point2: " Descrip3 = str(int(PtB_X)) + " " +str(int(PtB_Y)) + "." Descrip4 = " Sweep 10 - 20 ft on each side of road/trail. Look for decision points and location where someone may leave the trail." Descrip = Descrip1 + Descrip2 + Descrip3 + Descrip4 rows = arcpy.InsertCursor(fc2) x = 1 while x <= 1: row = rows.newRow() row.Description = Descrip row.Area_Name = Area_Name try: row.Priority = "High" except: pass row.Status = "Planned" row.Map_Scale = 24000 row.Create_Map = "No" row.Previous_Search = "No" arcpy.AddMessage(Area_Name) rows.insertRow(row) x += 1 del rows del row row1 = rows1.next() del row1 del rows1 ##except: ## # Get the tool error messages ## # ## msgs = "There was an error" ## ## # Return tool error messages for use with a script tool ## # ## arcpy.AddError(msgs) ## # Print tool error messages for use in Python/PythonWin ## # ## print msgs
the safety of the children and preventing accidents. Bonuses, great food, regular recognition for great performance, thank you notes and staff appreciation gifts…. routinely! Please email your resume [and cover letter] for consideration to hiring@childrensparadise.com. Please indicate the position and location preference in the subject line. For example; Lead Preschool Teacher – Vista or Instructional Assistant – Infant Toddler – All locations. Thank you for your interest! We can’t wait to hear from you!
#!/usr/bin/python # -*- coding: utf-8 -*- # David Art <david.madbox@gmail.com> # Program Arcade Games With Python And Pygame - Build a Platformer # http://programarcadegames.com import pygame import random WIDTH = 640 HEIGHT = 480 class Platform (pygame.sprite.Sprite): def __init__(self, width, height): pygame.sprite.Sprite.__init__(self) self.image = pygame.image.load('images/block.png') self.rect = self.image.get_rect() class Raspberry(pygame.sprite.Sprite): def __init__(self): pygame.sprite.Sprite.__init__(self) self.image = pygame.image.load('images/raspberry.png') self.rect = self.image.get_rect() class Player(pygame.sprite.Sprite): change_x = 0 change_y = 0 jump_ok = True frame_since_collision = 0 def __init__(self, x, y): pygame.sprite.Sprite.__init__(self) self.image = pygame.image.load('images/player.png') self.rect = self.image.get_rect() self.rect.x = x self.rect.y = y def update(self,blocks, raspberries): self.rect.x += self.change_x # check collision with raspberries block_hit_list = pygame.sprite.spritecollide(self, raspberries, False) for raspberry in block_hit_list: raspberries.remove(raspberry) # check collision with platform block_hit_list = pygame.sprite.spritecollide(self, blocks, False) for block in block_hit_list: # If we are moving right, set our right side to the left side of the item we hit if self.change_x > 0: self.rect.right = block.rect.left else: # Otherwise if we are moving left, do the opposite. self.rect.left = block.rect.right self.rect.y += self.change_y block_hit_list = pygame.sprite.spritecollide(self, blocks, False) for block in block_hit_list: if self.change_y > 0: self.jump_ok = True # Keep track of the last time we hit something self.frame_since_collision = 0 # Reset our position based on the top/bottom of the object. if self.change_y > 0: self.rect.bottom = block.rect.top else: self.rect.top = block.rect.bottom # Stop our vertical movement self.change_y = 0 # If we haven't hit anything in a while, allow us jump if self.frame_since_collision > 2: self.jump_ok = False # Increment frame counter self.frame_since_collision += 1 # Calculate effect of gravity. def calc_grav(self): self.change_y += .4 # See if we are on the ground. if self.rect.y >= HEIGHT-48 and self.change_y >= 0: self.change_y = 0 self.rect.y = HEIGHT-48 self.frame_since_collision = 0 self.jump_ok = True # Called when user hits 'jump' button def jump(self,blocks): # If it is ok to jump, set our speed upwards if self.jump_ok: self.change_y = -9.81 class Game(): def __init__(self, width=640, height=480, fullscreen=False): self.width = width self.height = height if fullscreen: flags = pygame.FULLSCREEN else: flags = 0 pygame.init() self.screen = pygame.display.set_mode([width, height], flags, 32) pygame.display.set_caption("RaspJam") self.scene = Scene() bself.lock_list = pygame.sprite.Group() self.all_sprites_list = pygame.sprite.Group() self.raspberry_list = pygame.sprite.Group() create_level1(self.block_list, self.all_sprites_list) self.player = Player(32, 32) self.player.rect.x = 240 self.player.rect.y = 0 self.all_sprites_list.add(self.player) def update(self): pass def draw(self): pass class Scene: def __init__(self): self.image = pygame.image.load('images/bg.png') def draw(self, screen): screen.blit(self.image, (0, 0)) # Create platforms def create_level1(block_list, all_sprites_list): block = Platform(128, 16) block.rect.x = 160 block.rect.y = 128 block_list.add(block) all_sprites_list.add(block) block = Platform(128, 16) block.rect.x = 352 block.rect.y = 128 block_list.add(block) all_sprites_list.add(block) block = Platform(128, 16) block.rect.x = 0 block.rect.y = 432 block_list.add(block) all_sprites_list.add(block) block = Platform(128, 16) block.rect.x = WIDTH - 128 block.rect.y = 432 block_list.add(block) all_sprites_list.add(block) block = Platform(128, 16) block.rect.x = 0 block.rect.y = 240 block_list.add(block) all_sprites_list.add(block) block = Platform(128, 16) block.rect.x = WIDTH - 128 block.rect.y = 240 block_list.add(block) all_sprites_list.add(block) block = Platform(128, 16) block.rect.x = 160 block.rect.y = 336 block_list.add(block) all_sprites_list.add(block) block = Platform(128, 16) block.rect.x = 352 block.rect.y = 336 block_list.add(block) all_sprites_list.add(block) # Initialize the window pygame.init() # Set the height and width of the screen screen = pygame.display.set_mode([WIDTH, HEIGHT], 0, 32) pygame.display.set_caption("RaspJam") background = pygame.image.load('images/bg.png') # Main program, create the blocks block_list = pygame.sprite.Group() all_sprites_list = pygame.sprite.Group() raspberry_list = pygame.sprite.Group() create_level1(block_list,all_sprites_list) player = Player(32, 32) player.rect.x = 240 player.rect.y = 0 all_sprites_list.add(player) for i in range(16): # This represents a block block = Raspberry() # Set a random location for the block block.rect.x = random.randrange(WIDTH/92)* 92 block.rect.y = random.randrange(HEIGHT/92)* 92 # Add the block to the list of objects raspberry_list.add(block) #~ all_sprites_list.add(block) #Loop until the user clicks the close button. done = False # Used to manage how fast the screen updates clock = pygame.time.Clock() # -------- Main Program Loop ----------- while not done: # --- Event Processing for event in pygame.event.get(): if event.type == pygame.QUIT: done = True if event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: done = True if event.key == pygame.K_LEFT: player.change_x = -6 if event.key == pygame.K_RIGHT: player.change_x = 6 if event.key == pygame.K_SPACE: player.jump(block_list) if event.type == pygame.KEYUP: if event.key == pygame.K_LEFT: player.change_x = 0 if event.key == pygame.K_RIGHT: player.change_x = 0 # --- Game Logic # Wrap player around the screen if player.rect.x >= WIDTH: player.rect.x = -15 if player.rect.x <= -16: player.rect.x = WIDTH player.calc_grav() player.update(block_list, raspberry_list) block_list.update() # --- Draw Frame #~ screen.fill(BLACK) screen.blit(background, (0, 0)) all_sprites_list.draw(screen) raspberry_list.draw(screen) pygame.display.flip() clock.tick(60) pygame.quit ()
Iwo Jima — the name may conjure harsh images of the famous Battle of Iwo Jima, which took place on the small island during World War II in 1945. It belongs to the Ogasawara Islands, a group of tropical and sub-tropical islands known for unique wildlife and flora, and is officially a part of Ogasawara Village, which is administered by the Tokyo Metropolitan government, which means technically, it’s part of Tokyo, although the island is located roughly 1,200km (about 745 miles) south of the Japanese capital city. While it isn’t inhabited by civilians, Iwo Jima is home to an air base that’s used by the Japan Maritime Self-Defense Force and the Japan Ground Self-Defense Force. There are a number of active bands in Japan’s Ground Self-Defense Force (GSDF), but at the top of them is the Central Band. In existence longer the GSDF itself, they are the go-to band in situations where it’s unacceptable to screw up, such as welcoming foreign dignitaries and ceremonies of the highest order. Then there’s “Senbonzakura”. This song was produced by WhiteFlame (aka Kurousa-P) and utilizes the artificial vocal talents of vocaloid Hatsune Miku. After its upload to video hosting site Niconico Douga in 2011, it became a modern classic of the vocaloid genre spawning several remixes and covers online and in karaoke rooms. So what happens when the rigid discipline and tradition of the GSDF Central Band meets the wild pop of Senbonzakura? Let’s take a listen! Cuteness sells, even if what you’re selling is recruitment for the Japan Self-Defense Force (JSDF) and the National Defense Academy (NDA). Since last July, the JSDF’s Okayama Provincial Cooperation Office has been using three adorable mascot girls to raise awareness for the JSDF. The girls represent each military branch of the forces, sporting the uniforms for each. Momoe Kibi represents the Japan Ground Self-Defense Force (Army), Mizuho Seto represents the Japan Maritime Self-Defense Force (Navy), while Airi Bizen represents the Japan Air Self Defense Force (Air Force). If the design aesthetic looks familiar, it’s because the girls were created by Humikane Shimada, the original character designer behind such military-inspired moe series like Strike Witches and Girls und Panzer. 5-4-3-2-1 Thunderbirds and Japan’s Self Defense Force are Go!? Word has been circulating that the famous British sci-fi puppetry program Thunderbirds and Japan’s pseudo-military, The Self Defense Force (SDF) are teaming up for a promotional campaign.
import sys import os from copy import deepcopy import subprocess as sp from mython import gen_qsub def writelines(lines): strlines = [" ".join(line) for line in lines] def submit_job(base,lines,cwd): if not os.path.exists(base): os.mkdir(base) os.chdir(base) with open("%s.inp"%base,'w') as outf: outf.write("\n".join([" ".join(l) for l in lines])) pc = ["module load openmpi/1.4-gcc+ifort"] qin = gen_qsub("~/bin/pw.x < %s.inp"%(base), stdout="%s.out"%(base), queue="physics", name="%s/%s.out"%(cwd,base), time="72:00:00", nn=1, prep_commands=pc) print sp.check_output("qsub %s"%qin,shell=True) os.chdir(cwd) if len(sys.argv) < 2: print "You need to enter a base file, dummy!" exit(1) else: basefn = sys.argv[1] baselines = [] with open(basefn,'r') as basef: for line in basef: baselines.append(line.split()) # Make base file for easy comparison. with open("base.inp",'w') as outf: outf.write("\n".join([" ".join(l) for l in baselines])) cwd = os.getcwd() changes = { #"lambda":[0.01,0.02,0.03,0.1,0.2] #"kpoint":[2,4,6,8,10], #"ecutwfc":[50,60,70,80,90,100], #"ecutrho":[400] #"psuedo":[ # ("Si","Si.pbe-n-rrkjus_psl.0.1.UPF"), # ("Si","Si.rel-pbe-n-rrkjus_psl.0.1.UPF"), # ("Si","Si.pz-vbc.UPF") "nqx":[1] } # Now's the part where you mess with something. for key in changes.keys(): lines = deepcopy(baselines) if key=="kpoint": for newval in changes["kpoint"]: lines[-1] = [str(newval),str(newval),str(newval),"0","0","0"] base = "conv_%s_%s"%(key,newval) submit_job(base,lines,cwd) continue if key=="psuedo": start,end = 0,0 # Find where pseudos are chosen. for li, line in enumerate(lines): if "ATOMIC_SPECIES" in line: start = li+1 for li, line in enumerate(lines): if "ATOMIC_POSITIONS" in line: end = li-1 # Replace for every species. for atom,pot in changes[key]: poss = [] for li in range(start,end): if atom in lines[li][0]: poss.append(li) for pos in poss: lines[pos][-1] = pot base = "pseudo_%s"%pot submit_job(base,lines,cwd) if key=="ecutwfc": ecutwfc,ecutrho=0,0 for li,line in enumerate(lines): if "ecutwfc" in line: ecutwfc=li if "ecutrho" in line: ecutrho=li for newval in changes[key]: lines[ecutwfc][-1] = str(newval) lines[ecutrho][-1] = str(10*newval) base = "conv_%s_%s"%(key,newval) submit_job(base,lines,cwd) continue # TODO: test new configuration. if key=="nqx": for newval in changes[key]: for line in lines: if any(["nqx" in word for word in line]): line[-1] = str(newval) base = "conv_%s_%s"%(key,newval) submit_job(base,lines,cwd) # Basic key replacement. for line in lines: if key in line: for newval in changes[key]: line[-1] = str(newval) base = "conv_%s_%s"%(key,newval) submit_job(base,lines,cwd)
that is quite unknown to many. and I urge everyone to try it out!! other than what my mom makes! but has lot more ingredients! this is place for you!
# Standard Python modules # ======================= import os # Standard Python modules # ======================= from PyQt5.QtCore import QObject, pyqtProperty, pyqtSignal, pyqtSlot, qDebug, QUrl, QAbstractListModel, QModelIndex, \ Qt, QVariant, QUrl from PyQt5.QtQuick import QQuickItem # DICE modules # ============ from dice.app_helper.file_operations import FileOperations class CoreApp(QObject, FileOperations): def __init__(self, parent=None): super(CoreApp, self).__init__(parent) # all CoreApps are instantiated by the dice instance self.dice = parent # by default the image is in the images folder as lower-case svg and the qml file is the name itself self.__image = os.path.join("images", self.name.lower()+".svg") self.__page_location = self.name+".qml" self.view = None # the QML item that is assigned to this CoreApp def setParent(self, q_object): super().setParent(q_object) self.dice = q_object def load(self): pass name_changed = pyqtSignal() @pyqtProperty("QString", notify=name_changed) def name(self): return self.__class__.__name__ image_changed = pyqtSignal(name="imageChanged") @pyqtProperty(QUrl, notify=image_changed) def image(self): # adjust the location as it is needed by the loader return QUrl(os.path.join("../../../core_apps", self.name, "view", self.__image)) @image.setter def image(self, image): if self.__image != image: self.__image = image self.image_changed.emit() page_location_changed = pyqtSignal(name="pageLocationChanged") @property def page_location(self): # adjust the location as it is needed by the loader return QUrl(os.path.join("../../../core_apps", self.name, "view", self.__page_location)) @page_location.setter def page_location(self, page_location): if self.__page_location != page_location: self.__page_location = page_location self.page_location_changed.emit() pageLocation = pyqtProperty(QUrl, fget=page_location.fget, fset=page_location.fset, notify=page_location_changed) @pyqtSlot(QQuickItem, name="setView") def set_view(self, qml_item): self.view = qml_item completed = pyqtSignal() # this signal is sent from QML when the Component has finished loading @staticmethod def debug(msg): qDebug(msg) class CoreAppListModel(QAbstractListModel): NameRole = Qt.UserRole + 1 ImageRole = Qt.UserRole + 2 PageLocationRole = Qt.UserRole + 3 CoreAppRole = Qt.UserRole + 4 _roles = {NameRole: "name", ImageRole: "image", PageLocationRole: "pageLocation", CoreAppRole: "coreApp"} def __init__(self, parent=None): super(CoreAppListModel, self).__init__(parent) self.__core_apps = [] def add_core_app(self, core_app): self.beginInsertRows(QModelIndex(), self.rowCount(), self.rowCount()) self.__core_apps.append(core_app) self.endInsertRows() self.count_changed.emit() def append(self, core_app): self.add_core_app(core_app) def rowCount(self, parent=QModelIndex()): return len(self.__core_apps) def data(self, index, role=Qt.DisplayRole): try: core_app = self.__core_apps[index.row()] except IndexError: return QVariant() if role == self.NameRole: return core_app.name if role == self.ImageRole: return QUrl(core_app.image) if role == self.PageLocationRole: return QUrl(core_app.page_location) if role == self.CoreAppRole: return core_app return QVariant() def roleNames(self): return self._roles count_changed = pyqtSignal(name="countChanged") @pyqtProperty(int, notify=count_changed) def count(self): return len(self.__core_apps) @pyqtSlot(int, result=CoreApp) def get(self, index): try: return self.__core_apps[index] except IndexError: return CoreApp() @pyqtSlot("QString", result=CoreApp, name="getByName") def get_by_name(self, name): for core_app in self.__core_apps: if core_app.name == name: return core_app else: return CoreApp()
KMS believes that it is hard to talk about racial reconciliation if you don’t take time to understand the history and reasons for the sin, hurt, and abuse among brothers and sisters across different races. America has a very particular problem with race. For many years and in many ways there is a real church divide between more predominantly black churches and more predominantly white ones. There are some hopeful exceptions, but this division remains the norm. You have attempts and works of God to bridge the divide, but in so far as many multi-cultural churches exist and many other works of unity, this is again more the exception than the norm. Our prayer echoes the words of St. Paul when he prays that as Christians we would make room in our hearts for the other. KMS hosts half-day symposiums featuring local speakers to kick off conversations on racial reconciliation. For a sample of a past event, click here. Please contact us if you are interested in hosting this unique event. In the immediate aftermath of Charlottesville, Evangelical, Roman Catholic, Pentecostal, and Orthodox leaders for the most part were willing to speak out against the demonstration by the modern day klan. There was a strong response both from those engaged in pastoral ministry and those engaged in the policy world. Several Roman Catholic Cardinals and Bishops like Cardinal Cupich, Cardinal DiNardo, and Archbishop Wilton Gregory and Bishop Robert Barron all had very strong statements and developed resources on Charlottesville. Some of which can be accessed here, here, and here. Those engaged in the conservative policy world like Robert P. George and Ryan T. Anderson among others were also very outspoken, with Robert P. George going so far as suggesting that Bishops ought to force any Roman Catholics who marched with the modern day klan to renounce racism on pain of excommunication. Several Evangelical and Pentecostal leaders like Ed Stetzer, Jack Graham, and Tim Keller among others were fairly outspoken. Some of what they said can be access here, here, here, and here. Those engaged in the policy world like Russell Moore and Ralph Reed had statements. Many Evangelicals and Pentecostals more directly associated with the President, including those on the President’s Evangelical Advisory Board said very little on race in particular with few exceptions. A partial list of responses can be found here. In the Mainline Protestant world leaders like Presiding Bishop Elizabeth Eaton of the Evangelical Lutheran Church in America, Presiding Bishop Michael Curry of the Episcopal Church, Bishop Bruce Ough President of the United Methodist Council of Bishops, and Rev. Dr. J. Herbert Nelson, II the Stated Clerk of the Presbyterian Church USA all had strong statements and resources which can be found here, here, here, and here. Those engaged in the policy world were also equally vocal. In the Orthodox world, the Bishops of the Orthodox Church in America and the Assembly of Canonical Bishops of the United States of America both issued strong statements here and here. The responses across the Christian world have been fairly good, but what is often missing is sustained ministries that help heal racial divisions between white and black Christians both in the historic contexts meaning within historically black churches and denominations and their counterparts (listed above), and within traditions that have a significant diverse racial presence. The divisions between black culture and the predominant white one are not easily healed, getting churches to work across dividing lines for common witness is also fairly difficult. Within a few days, many of the leaders mentioned went back to their everyday ministries and tasks, as is natural, but what is needed in this moment is new and sustained efforts to help churches build more collegial relations around issues of common cause and justice but also as importantly on the congregational and parish level between churches and people who often won’t and don’t associate. What we are currently doing is too small to heal the lasting divisions. How many more severe moments in the culture will it take before statements translate into sustained actions focused on building some relational muscle between different kinds of Christians. Racism is first a scandal in the life of the church, long before it becomes a lasting scandal in the life of the culture. For this ministry, the last few years have been enough, and Charlottesville was more than enough. Starting now, KMS will be developing video resources on race and launching into justice initiatives as we hope to build our own muscles and witness along this lasting American Christian challenge. In particular we want to focus our work on getting Christians involved in new areas where our combined witness can help tackle some of the systemic racial challenges in our culture, and on resources that can be used across a broad spectrum of the church.
#!/usr/bin/env python # encoding:utf-8 # __author__: commons # date: 2016/10/2 13:47 # blog: http://huxianglin.cnblogs.com/ http://xianglinhu.blog.51cto.com/ import hashlib import time import os from conf import settings def encrypt_passwd(passwd): """加密密码""" sha256_obj=hashlib.sha256() sha256_obj.update(passwd.encode("utf-8")) return sha256_obj.hexdigest() # print(encrypt_passwd("123456")) def get_file_hash(filepath): file_size=os.stat(filepath).st_size read_size=0 md5_obj=hashlib.md5() with open(filepath,"rb") as f: while read_size != file_size: data=f.read(1024) md5_obj.update(data) read_size+=len(data) return md5_obj.hexdigest() # filepath=os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),"source","client_user.py") # print(get_file_hash(filepath)) # with open(filepath,"rb") as f: # f.seek(10) # print(f.read(1024)) if isinstance(settings.PROGRESS_LINE_SIZE,int): if 1 <= settings.PROGRESS_LINE_SIZE <= 100: star_list=[i for i in range(1,settings.PROGRESS_LINE_SIZE+1)] else: raise Exception("参数设置错误...PROGRESS_LINE_SIZE范围在1-100") else: raise Exception("参数设置错误...PROGRESS_LINE_SIZE必须是整数") def show_progress_line(send_que,totol_size): """进度条""" send_size=send_que.get() while send_size/totol_size<=1: time.sleep(settings.PROGRESS_LINE_PERIOD) percent=int(send_size * 100 / totol_size) space_len=" "*int(settings.PROGRESS_LINE_SIZE-settings.PROGRESS_LINE_SIZE*(percent/100)+3) message="\t".join((space_len.join(("".join(("="*int(percent/100*settings.PROGRESS_LINE_SIZE),">")),"{percent}%".format(percent=percent))),time.ctime())) print(message,end="\n",flush=True) if send_size==totol_size: print("Translate is finish!") break send_size=send_que.get()
Award winning eliquid creators Vjuice, are immensely proud to present the all new range of eliquids...... The Army Range. Created, mixed and tested in our very own UK clean labs, this fabulous new range is being released with 4 initial flavours, with more to follow. Irn Screw - A bold ice-cream & screwball twist on the classic Iron-Bru drink classically reinvented. Lieutenant Lemon - A beautiful, celebratory, lemon and pancake concoction. Private Berryman - A fresher than fresh blackcurrant and lemonade twist. The flavour profiles cross-section a variety of taste palettes, so we are sure that you will find one of this new range very appealing. We are very excited to bring you this new range. Vjuice are all about keeping the vape market fresh, creating new profiles and releasing premium branded products. The Army Range is another of these ranges, and we have combined tried and tested underlying profiles with popular branding to bring you this great product that we hope all our vapers will love.
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import logging import argparse import mxnet as mx from common import modelzoo import gluoncv from gluoncv.model_zoo import get_model from mxnet.contrib.amp import amp import numpy as np def download_model(model_name, logger=None): dir_path = os.path.dirname(os.path.realpath(__file__)) model_path = os.path.join(dir_path, 'model') if logger is not None: logger.info('Downloading model {}... into path {}'.format(model_name, model_path)) return modelzoo.download_model(args.model, os.path.join(dir_path, 'model')) def save_symbol(fname, sym, logger=None): if logger is not None: logger.info('Saving symbol into file at {}'.format(fname)) sym.save(fname, remove_amp_cast=False) def save_params(fname, arg_params, aux_params, logger=None): if logger is not None: logger.info('Saving params into file at {}'.format(fname)) save_dict = {('arg:%s' % k): v.as_in_context(mx.cpu()) for k, v in arg_params.items()} save_dict.update({('aux:%s' % k): v.as_in_context(mx.cpu()) for k, v in aux_params.items()}) mx.nd.save(fname, save_dict) if __name__ == '__main__': symbolic_models = ['imagenet1k-resnet-152', 'imagenet1k-resnet-18', 'imagenet1k-resnet-34', 'imagenet1k-resnet-50', 'imagenet1k-resnet-101', 'imagenet1k-resnext-50', 'imagenet1k-resnext-101', 'imagenet1k-resnext-101-64x4d', 'imagenet11k-place365ch-resnet-152', 'imagenet11k-place365ch-resnet-50'] gluon_models = ['resnet18_v1', 'resnet50_v1', 'resnet101_v1', 'squeezenet1.0', 'mobilenet1.0', 'mobilenetv2_1.0', 'inceptionv3'] models = symbolic_models + gluon_models parser = argparse.ArgumentParser(description='Convert a provided FP32 model to a mixed precision model') parser.add_argument('--model', type=str, choices=models) parser.add_argument('--run-dummy-inference', action='store_true', default=False, help='Will generate random input of shape (1, 3, 224, 224) ' 'and run a dummy inference forward pass') parser.add_argument('--use-gluon-model', action='store_true', default=False, help='If enabled, will download pretrained model from Gluon-CV ' 'and convert to mixed precision model ') parser.add_argument('--cast-optional-params', action='store_true', default=False, help='If enabled, will try to cast params to target dtype wherever possible') args = parser.parse_args() logging.basicConfig() logger = logging.getLogger('logger') logger.setLevel(logging.INFO) if not args.use_gluon_model: assert args.model in symbolic_models, "Please choose one of the available symbolic models: {} \ If you want to use gluon use the script with --use-gluon-model".format(symbolic_models) prefix, epoch = download_model(model_name=args.model, logger=logger) sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch) result_sym, result_arg_params, result_aux_params = amp.convert_model(sym, arg_params, aux_params, cast_optional_params=args.cast_optional_params) sym_name = "%s-amp-symbol.json" % (prefix) save_symbol(sym_name, result_sym, logger) param_name = '%s-%04d.params' % (prefix + '-amp', epoch) save_params(param_name, result_arg_params, result_aux_params, logger) if args.run_dummy_inference: logger.info("Running inference on the mixed precision model with dummy input, batch size: 1") mod = mx.mod.Module(result_sym, data_names=['data'], label_names=['softmax_label'], context=mx.gpu(0)) mod.bind(data_shapes=[['data', (1, 3, 224, 224)]], label_shapes=[['softmax_label', (1,)]]) mod.set_params(arg_params, aux_params) mod.forward(mx.io.DataBatch(data=[mx.nd.ones((1, 3, 224, 224))], label=[mx.nd.ones((1,))])) result = mod.get_outputs()[0].asnumpy() logger.info("Inference run successfully") else: assert args.model in gluon_models, "Please choose one of the available gluon models: {} \ If you want to use symbolic model instead, remove --use-gluon-model when running the script".format(gluon_models) net = gluoncv.model_zoo.get_model(args.model, pretrained=True) net.hybridize() result_before1 = net.forward(mx.nd.zeros((1, 3, 224, 224))) net.export("{}".format(args.model)) net = amp.convert_hybrid_block(net, cast_optional_params=args.cast_optional_params) net.export("{}-amp".format(args.model), remove_amp_cast=False) if args.run_dummy_inference: logger.info("Running inference on the mixed precision model with dummy inputs, batch size: 1") result_after = net.forward(mx.nd.zeros((1, 3, 224, 224), dtype=np.float32, ctx=mx.gpu(0))) result_after = net.forward(mx.nd.zeros((1, 3, 224, 224), dtype=np.float32, ctx=mx.gpu(0))) logger.info("Inference run successfully")
Webcasts from the International Foundation offer timely, quality information on current issues facing the benefits industry. Train your entire team or just listen in at your desk with the flexibility of webcasts. Members have FREE access to all webcasts, both live and on-demand. With this new benefit included in your membership, you no longer have to go through hoops to get approval, just click and listen! Not a member? Join now. Here’s a list of currently scheduled webcasts. Check this page daily for updates. • What changes have been made recently to legislation regarding DC plans? • The new initiatives in the federal budget aimed at pensioners–What are they and how will the provinces respond? • What do all these changes mean for DC plan sponsors? This webcast is for those who are registered for or interested in attending the German Apprenticeship Tour sponsored by the International Foundation of Employee Benefit Plans, June 3-7, 2019. The webcast will feature an overview of the German Apprentice system which has relied on industry-based relationships that include the secondary educational level to produce most of their journeyworkers. The presentation will include a review of the program tour schedule and logistics. • The results since launch, including impact on the fund office, feedback from the 8,500+ members using the app and the unexpected benefits that have also emerged. Missed the live event? No problem - watch at your own convenience with webcasts on-demand. Webcasts are posted soon after the live event and members have free access to all recent presentations. This webcast will explore interviews with apprenticeship stakeholders conducted by the Canadian Apprenticeship Forum, along with its partners in the United Kingdom, Germany, Bulgaria and Poland, about their barriers to mentoring, training and engaging youth in apprenticeship programs. This will also look at how these European countries conducted pilot programs and were successful in encouraging small to medium-sized businesses to hire apprentices by providing a combination of in-person, hands-on and financial support. Resilience is the process of successfully bouncing back from stress—and it can be learned and developed in anyone! Join us for this webcast on National Employee Benefits Day to learn how to build resilience in your workforce and create a positive work culture that thrives on challenge and stress. And guess what? This is what business leaders have always wanted—heathy staff who grow, enhance productivity and make work exciting! Employers are under increased pressure to respond to employee behavioral/mental health issues, the opioid epidemic and rising employee stress. A well-positioned employee assistance program (EAP) can be an effective resource to meet these challenges. This session will demonstrate the dollar return on implementing a comprehensive EAP. This program will cover: • Financial impact of employee stress and depression • Calculating the combined costs of: • Lost productivity from presenteeism, absenteeism, turnover and accidents • Medical spend on mental health • EAP utilization impact on the reduction of costs • Positioning your EAP to be effective. Please join us for a robust discussion on the topic of Environmental, Social and Governance (ESG) investing. Two expert speakers will address the misperceptions and the realities of ESG investing—and how this approach to long-term investing is not only about risk-mitigation, but also seeking better risk-adjusted returns. Hear about ESG integration and public equities investing as well as ESG considerations from the fixed income perspective spanning corporate, sovereign, securitized and municipal markets. This presentation is intended to not only be educational but provide context within an investment framework for fiduciaries as the universal owner for the generations to come. It can be confusing trying to figure out who has ERISA fiduciary status right now and what their responsibilities are. In 2016, the Department of Labor (DOL) issued a rule redefining the meaning of the term “fiduciary” as it applies to investment professionals. In 2018, before the rule was fully in effect, a federal court vacated the rule. This session will cover who is considered an ERISA benefit plan fiduciary today and what one should be doing. • Who is an ERISA fiduciary? • What are the different types of ERISA fiduciaries? • What does it mean that the definition of ERISA fiduciary is functional? • What types of decisions do ERISA fiduciary duties apply to? • What are the ERISA fiduciary duties? • What is ERISA co-fiduciary liability? • What are the consequences of violating ERISA fiduciary duties? • What can ERISA fiduciaries do to protect themselves from liability? How public sector employers report their pension obligations and costs may be changing. The Public Sector Accounting Board (PSAB) that sets accounting standards for governments and other public sector entities has an employment benefits project that may result in changes in pension accounting. PSAB has issued three invitations to seek stakeholder feedback on the potential changes. • How your organization can influence the future of pension accounting. The advancement of cancer genetics allows us to pursue health care in an entirely new way. But the coverage and cost surrounding this health care space can still be difficult to decipher. Is genetic testing for increased risk of cancer typically covered under benefit plans? What coverage is available to participants who discover that they have a genetic mutation such as BRCA or Lynch syndrome? This webcast will dive into the area of cancer genetic testing and what happens after a patient tests positive for an inherited mutation that increases his or her risk of future disease. A Special Edition Webcast brought you LIVE from Washington D.C. Preregistration is not required for online viewing and reminder emails will ​not be sent. Please join us for EBRI's 84th Policy Forum. This Policy Forum will examine the latest EBRI research on Retirement, Health and Financial Wellness.​ Sessions Include: -How Do Financial Wellness Initiatives Move the Dial? -Considerations in the Retirement Income Phase of Life -What Does the Future Hold for the Employment-Based Health Benefits System? Managing your pharmacy benefits plan can be challenging. This session will explore the common trials of the evolving PBM industry such as the traps of transparent versus traditional contracts, navigating specialty drug benefits and making the most of your PBM through effective plan design. Participants will gain clarity on popular plan management strategies, including the benefits and trade-offs of each. It’s well understood that human emotions play a role, both positive and negative, in physical and mental health. The surprising new finding is that these same emotions can sometimes act as barriers for people struggling with financial problems. In this session, we will first share Manulife’s latest research revealing that the impacts of emotional barriers to financial wellness are far-reaching. While important, education and financial literacy programs aren’t enough to help the large percentage of Canadians who are financially unwell. Then we will discuss what organizations can do to help their employees now and in the future. And finally we will focus on the positive change we can bring when we apply practical and evidence-based strategies for workplace mental health. Learn how one of the largest multi-institutional health care delivery systems in the nation developed a standard approach to total rewards onboarding for acquisitions. This webcast will cover the journey of transitioning seven organizations acquired since 2015 to a new total rewards package using best-practice models that can be translated to fit your organization. Attendees will learn the pitfalls to avoid, key steps to include and important considerations for custom tailoring a total rewards onboarding approach. Have you considered how your company's understanding and compliance with family discrimination laws affect your employees with caregiving responsibilities? Learn the latest information about employee caregivers and federal and state employment discrimination laws, such as Title VII of the Civil Rights Act of 1964, the Americans with Disabilities Act of 1990, and the Family and Medical Leave Act. One of the challenges of working in a global and mobile workforce is ensuring an employee's health and safety while away from their regularly assigned work location. If not set up and administered correctly, the company exposes itself to severe financial and reputational risk while exposing its traveling employees to financial and physical risk. This session will include information and guidance on various lines of travel benefit coverage, how to ensure you have the right coverage, how to avoid redundant coverage and cost issues, how to create a travel benefit reference matrix for your organization, and employee communication approaches. Case study examples will be used. Both Canada and the U.S. have voluntary standards for a psychologically healthy workplace. We will review the standards in both countries, which serve to provide a comprehensive framework for creating work environments in which both employees and organizations thrive. In this webcast, participants will explore the link between healthy workplace practices, employee well-being and organizational performance. Discussion will include the types of practices that foster a healthy workplace, key factors that can increase the likelihood of positive outcomes and strategies for addressing barriers to successful implementation. As a start-up global specialty chemical company with 3,500 employees in over 30 countries, Solenis designed, developed and implemented an innovative wellness program that in just one year changed its corporate culture in a very positive way. The program design promotes the engagement of employees from their date of hire and engages and rewards employees for engaging in formal wellness program education activities. The company committed to financially invest in the global expansion of the wellness program after just one year of a very successful program launch in the United States During this session the audience will learn about this global start-up company’s wellness journey. Public sector pension envy no more! Two powerhouse pension funds—the Colleges of Applied Arts and Technology (CAAT) and the Ontario Public Sector Employees’ Union Pension Plan (OPTrust)—have introduced new, innovative defined benefit (DB) pension plans designed to bring retirement security and guaranteed benefits to workers outside their traditional core sectors. You’ve launched a great wellness program but you’re not getting the participation and engagement you projected. You may even hear grumblings from some employees. With so many employers vested in wellness, why do employees dislike wellness programs? Surprisingly, it’s not wellness they dislike. There are many wellness programs that attain high participation rates. What’s not to love about a fun cooking demo or the opportunity to work up a sweat at an on-site fitness class? During this webinar, we’ll take an in-depth look at developing a comprehensive program that your employees will love. We’ll discuss the approaches that should be taken and the ones that should not. You’ll leave with practical ideas you can implement at your organization and simple adjustments you can make to your program. Before you know it, your employees will be 'jumping' at the opportunity to improve their health. Investment fees matter for two reasons. One is well-known: Fees reduce clients’ returns. But this is not the only effect they have—Fees also influence your investment manager’s decisions. Lately, institutions ranging from the United Kingdom regulator to academic researchers to the world’s largest pension fund have started to pay more attention to the importance of fee structures (not just fee levels). That focus should be welcomed by investors. By developing a better understanding of different fee structures and the incentives they create, we believe clients can improve their chance of finding managers who can deliver long-term value for money. This webcast will compare how U.S. fiduciary requirements compare to international requirements for management of Defined Contribution retirement plans. To what extent should your employer approach the governance of DC plans outside the U.S. given the knowledge of U.S. 401(k) requirements? Hear recommendations and best practices other employers have implemented, as well as options to mitigate risks. As healthcare costs continue to soar, and access to quality healthcare becomes more rare, many employers and multiemployer plans are bringing healthcare directly to the worksite. Especially for sponsors with self-funded health plans, offering primary care onsite is a strategy to improve employee health and reduce health costs. But there are a handful of key strategic decisions every plan sponsor must make before launching an onsite clinic. This session will help sponsors navigate the major considerations associated with bringing healthcare onsite. Subsection 25(2.1) of the Ontario Human Rights Code and related provisions in the Employment Standards Act and its regulations have permitted employers to cut or reduce benefits to workers aged 65 and older without any justification. In Talos v. Grand Erie District School Board, the Human Rights Tribunal of Ontario (HRTO) found that the legislative scheme amounts to age discrimination and violates Section 15 of the Canadian Charter of Rights and Freedoms. The decision relates to group health, dental and life insurance benefits. In order to better support educators who are on disability claims, The BC Teachers' Federation implemented an online Cognitive Behavioural Therapy (CBT) program to help teachers better manager stress, anxiety and sadness to improve their mental resilience. Those on disability are enrolled in a 28-day program where they immediately begin focusing on a return-to-work date. Learn about case study findings that highlight the effectiveness of the new program in supporting mental health and reducing disability costs. The takeaways are relevant for Canadian and U.S. plan sponsors and trustees. Employers are increasingly turning to value-based reimbursement arrangements. In fact, according to an August 2017 NBGH survey 21% of companies are planning to contract with accountable care organizations (ACOs) this year and the NBGH predicts the number of employers offering ACOs could double by 2020. The federal government (Medicare) and many states (Medicaid) have also been transforming to value-based reimbursement models. The purpose of the webinar is to help benefits professionals understand what value-based reimbursement is, how it is transforming health care delivery, and how the transformation could help reduce costs and improve quality. This knowledge will help them better understand the potential implications for their organizations and employees, what to look for and expect from their payers/carriers and providers, and potential strategic opportunities. T-Mobile sought to find a better way to connect employees in a simple and efficient way to expert resources to help them be their best self. In their desire to stay true to their culture and engage their high millennial population they wanted a solution that was mobile, socially driven and fun! They also needed to find a way to eliminate the negative stigma that is often associated with EAP solutions. From these objectives, LiveMagenta! was born as a means to drive engagement through organic adoption and simple yet engaging sourcing of resources. Living with stress and anxiety is much more prevalent than you might imagine. Millions of people are burdened by life’s challenges every single day–from difficult life events and balancing professional and personal responsibilities to acute pain, illness, and chronic health conditions. These challenges show up at work, and the health consequences show up in the bottom line, with pressures on health care claims from health conditions that could be mitigated through prevention activities, including mindfulness based stress management. Stress at work is caused by many factors. It can be difficult, if not impossible for us to talk about our stress and anxiety or face its causes. Most employees won't seek help for their suffering outside of the workplace. In our culture of minimization, avoidance, and denial of our fears and feelings, we dig ourselves deeper into day to day pain and suffering. Creating a corporate wellness focus that encourages stress management practices and sharing them in a science based way allows a forward-thinking employer a wellness culture that encourages self care. This creates the opportunity for optimal performance at work, by offering simple, straight forward, multi culturally appropriate approaches to encourage mental ease. A Special Edition Webcast brought you LIVE from Washington D.C.—Please join us for EBRI's 83rd Policy Forum. This Policy Forum will examine the latest EBRI research on Retirement, Health and Financial Wellbeing.? Student Loans: Is it Time for Employers to Step In? On what basis are commuted values payable from TBPs calculated. No one wants to talk about it, let alone in the workplace...depression. But depression isn't a topic only for our personal lives. The impact of depression goes beyond the personal realm to significantly impact the bottom line of organizations from every sector, ultimately costing employers billions of dollars each year. Not only should employers learn to recognize depression, but in order to thrive, should help employees get proper treatment. Gain an understanding of the burden of depression in the workplace and how depression manifests itself and affects employee performance and productivity. Attain insights into the barriers to addressing depression, including stigma and lack of resources available to employers. Learn how to spot depression in the workplace. Hear about tools and resources employers can utilize to better support mental health and productivity of their workforce. As an example, a case study of a successful mental health initiative by Kent State University will be discussed, including initial steps to implementation; overcoming challenges and barriers; and results, outcomes and next steps. Don't miss this opportunity to learn how to identify and address this pervasive condition and hear what others have implemented.
# Copyright 2016 United States Government as represented by the Administrator # of the National Aeronautics and Space Administration. All Rights Reserved. # # Portion of this code is Copyright Geoscience Australia, Licensed under the # Apache License, Version 2.0 (the "License"); you may not use this file # except in compliance with the License. You may obtain a copy of the License # at # # http://www.apache.org/licenses/LICENSE-2.0 # # The CEOS 2 platform is licensed under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.db import models from apps.dc_algorithm.models import Area, Compositor, Satellite from apps.dc_algorithm.models import (Query as BaseQuery, Metadata as BaseMetadata, Result as BaseResult, ResultType as BaseResultType, UserHistory as BaseUserHistory, AnimationType as BaseAnimationType, ToolInfo as BaseToolInfo) from utils.data_cube_utilities.dc_mosaic import (create_mosaic, create_median_mosaic, create_max_ndvi_mosaic, create_min_ndvi_mosaic) import numpy as np class UserHistory(BaseUserHistory): """ Extends the base user history adding additional fields See the dc_algorithm.UserHistory docstring for more information """ pass class ToolInfo(BaseToolInfo): """ Extends the base ToolInfo adding additional fields See the dc_algorithm.ToolInfo docstring for more information """ pass class Query(BaseQuery): """ Extends base query, adds app specific elements. See the dc_algorithm.Query docstring for more information Defines the get_or_create_query_from_post as required, adds new fields, recreates the unique together field, and resets the abstract property. Functions are added to get human readable names for various properties, foreign keys should define __str__ for a human readable name. """ compositor = models.ForeignKey(Compositor) base_result_dir = '/datacube/ui_results/fractional_cover' class Meta(BaseQuery.Meta): unique_together = (('satellite', 'area_id', 'time_start', 'time_end', 'latitude_max', 'latitude_min', 'longitude_max', 'longitude_min', 'title', 'description', 'compositor')) abstract = True def get_fields_with_labels(self, labels, field_names): for idx, label in enumerate(labels): yield [label, getattr(self, field_names[idx])] def get_chunk_size(self): """Implements get_chunk_size as required by the base class See the base query class docstring for more information. """ if not self.compositor.is_iterative(): return {'time': None, 'geographic': 0.05} return {'time': 50, 'geographic': 0.1} def get_iterative(self): """implements get_iterative as required by the base class See the base query class docstring for more information. """ return self.compositor.id != "median_pixel" def get_reverse_time(self): """implements get_reverse_time as required by the base class See the base query class docstring for more information. """ return self.compositor.id == "most_recent" def get_processing_method(self): """implements get_processing_method as required by the base class See the base query class docstring for more information. """ processing_methods = { 'most_recent': create_mosaic, 'least_recent': create_mosaic, 'max_ndvi': create_max_ndvi_mosaic, 'min_ndvi': create_min_ndvi_mosaic, 'median_pixel': create_median_mosaic } return processing_methods.get(self.compositor.id, create_mosaic) @classmethod def get_or_create_query_from_post(cls, form_data, pixel_drill=False): """Implements the get_or_create_query_from_post func required by base class See the get_or_create_query_from_post docstring for more information. Parses out the time start/end, creates the product, and formats the title/description Args: form_data: python dict containing either a single obj or a list formatted with post_data_to_dict Returns: Tuple containing the query model and a boolean value signifying if it was created or loaded. """ query_data = form_data query_data['title'] = "Fractional Cover Query" if 'title' not in form_data or form_data[ 'title'] == '' else form_data['title'] query_data['description'] = "None" if 'description' not in form_data or form_data[ 'description'] == '' else form_data['description'] valid_query_fields = [field.name for field in cls._meta.get_fields()] query_data = {key: query_data[key] for key in valid_query_fields if key in query_data} try: query = cls.objects.get(pixel_drill_task=pixel_drill, **query_data) return query, False except cls.DoesNotExist: query = cls(pixel_drill_task=pixel_drill, **query_data) query.save() return query, True class Metadata(BaseMetadata): """ Extends base metadata, adding additional fields and adding abstract=True. zipped_metadata_fields is required. See the dc_algorithm.Metadata docstring for more information """ satellite_list = models.CharField(max_length=100000, default="") zipped_metadata_fields = [ 'acquisition_list', 'clean_pixels_per_acquisition', 'clean_pixel_percentages_per_acquisition', 'satellite_list' ] class Meta(BaseMetadata.Meta): abstract = True def metadata_from_dataset(self, metadata, dataset, clear_mask, parameters): """implements metadata_from_dataset as required by the base class See the base metadata class docstring for more information. """ for metadata_index, time in enumerate(dataset.time.values.astype('M8[ms]').tolist()): clean_pixels = np.sum(clear_mask[metadata_index, :, :] == True) if time not in metadata: metadata[time] = {} metadata[time]['clean_pixels'] = 0 metadata[time]['satellite'] = parameters['platforms'][np.unique( dataset.satellite.isel(time=metadata_index).values)[0]] if np.unique( dataset.satellite.isel(time=metadata_index).values)[0] > -1 else "NODATA" metadata[time]['clean_pixels'] += clean_pixels return metadata def combine_metadata(self, old, new): """implements combine_metadata as required by the base class See the base metadata class docstring for more information. """ for key in new: if key in old: old[key]['clean_pixels'] += new[key]['clean_pixels'] continue old[key] = new[key] return old def final_metadata_from_dataset(self, dataset): """implements final_metadata_from_dataset as required by the base class See the base metadata class docstring for more information. """ self.pixel_count = len(dataset.latitude) * len(dataset.longitude) self.clean_pixel_count = np.sum(dataset[list(dataset.data_vars)[0]].values != -9999) self.percentage_clean_pixels = (self.clean_pixel_count / self.pixel_count) * 100 self.save() def metadata_from_dict(self, metadata_dict): """implements metadata_from_dict as required by the base class See the base metadata class docstring for more information. """ dates = list(metadata_dict.keys()) dates.sort(reverse=True) self.total_scenes = len(dates) self.scenes_processed = len(dates) self.acquisition_list = ",".join([date.strftime("%m/%d/%Y") for date in dates]) self.satellite_list = ",".join([metadata_dict[date]['satellite'] for date in dates]) self.clean_pixels_per_acquisition = ",".join([str(metadata_dict[date]['clean_pixels']) for date in dates]) self.clean_pixel_percentages_per_acquisition = ",".join( [str((metadata_dict[date]['clean_pixels'] * 100) / self.pixel_count) for date in dates]) self.save() class Result(BaseResult): """ Extends base result, adding additional fields and adding abstract=True See the dc_algorithm.Result docstring for more information """ # result path + other data. More to come. mosaic_path = models.CharField(max_length=250, default="") plot_path = models.CharField(max_length=250, default="") data_path = models.CharField(max_length=250, default="") data_netcdf_path = models.CharField(max_length=250, default="") class Meta(BaseResult.Meta): abstract = True class FractionalCoverTask(Query, Metadata, Result): """ Combines the Query, Metadata, and Result abstract models """ pass
The year started with last year’s most popular stars Dileep and Rima Kallingal, coming together for the first time for the film’ Kammath & Kammath. Though the movie didn’t made what it was expected to, the duo will be cast together once again for the new movie titled as ‘Ezhu Sundara Rathrikal’. Coming from hit maker Laljose , the movie is scripted by James Albert. Dileep who plays an ad film maker in the film will have one more heroine for the movie, whose name is yet to be finalised. The director-scriptwriter duo has earlier worked together for the big hit ‘Classmates’. ‘Ezhu Sundara Rathrikal’ will be the Christmas release of Dileep. Trisha hits 40 with Mankatha!
from __future__ import print_function, division from sympy.logic.boolalg import And from sympy.core import oo from sympy.core.basic import Basic from sympy.core.compatibility import as_int, with_metaclass, range from sympy.sets.sets import (Set, Interval, Intersection, EmptySet, Union, FiniteSet) from sympy.core.singleton import Singleton, S from sympy.core.sympify import _sympify from sympy.core.decorators import deprecated from sympy.core.function import Lambda class Naturals(with_metaclass(Singleton, Set)): """ Represents the natural numbers (or counting numbers) which are all positive integers starting from 1. This set is also available as the Singleton, S.Naturals. Examples ======== >>> from sympy import S, Interval, pprint >>> 5 in S.Naturals True >>> iterable = iter(S.Naturals) >>> next(iterable) 1 >>> next(iterable) 2 >>> next(iterable) 3 >>> pprint(S.Naturals.intersect(Interval(0, 10))) {1, 2, ..., 10} See Also ======== Naturals0 : non-negative integers (i.e. includes 0, too) Integers : also includes negative integers """ is_iterable = True _inf = S.One _sup = S.Infinity def _intersect(self, other): if other.is_Interval: return Intersection( S.Integers, other, Interval(self._inf, S.Infinity)) return None def _contains(self, other): if other.is_positive and other.is_integer: return S.true elif other.is_integer is False or other.is_positive is False: return S.false def __iter__(self): i = self._inf while True: yield i i = i + 1 @property def _boundary(self): return self class Naturals0(Naturals): """Represents the whole numbers which are all the non-negative integers, inclusive of zero. See Also ======== Naturals : positive integers; does not include 0 Integers : also includes the negative integers """ _inf = S.Zero def _contains(self, other): if other.is_integer and other.is_nonnegative: return S.true elif other.is_integer is False or other.is_nonnegative is False: return S.false class Integers(with_metaclass(Singleton, Set)): """ Represents all integers: positive, negative and zero. This set is also available as the Singleton, S.Integers. Examples ======== >>> from sympy import S, Interval, pprint >>> 5 in S.Naturals True >>> iterable = iter(S.Integers) >>> next(iterable) 0 >>> next(iterable) 1 >>> next(iterable) -1 >>> next(iterable) 2 >>> pprint(S.Integers.intersect(Interval(-4, 4))) {-4, -3, ..., 4} See Also ======== Naturals0 : non-negative integers Integers : positive and negative integers and zero """ is_iterable = True def _intersect(self, other): from sympy.functions.elementary.integers import floor, ceiling if other is Interval(S.NegativeInfinity, S.Infinity) or other is S.Reals: return self elif other.is_Interval: s = Range(ceiling(other.left), floor(other.right) + 1) return s.intersect(other) # take out endpoints if open interval return None def _contains(self, other): if other.is_integer: return S.true elif other.is_integer is False: return S.false def __iter__(self): yield S.Zero i = S(1) while True: yield i yield -i i = i + 1 @property def _inf(self): return -S.Infinity @property def _sup(self): return S.Infinity @property def _boundary(self): return self def _eval_imageset(self, f): from sympy import Wild expr = f.expr if len(f.variables) > 1: return n = f.variables[0] a = Wild('a') b = Wild('b') match = expr.match(a*n + b) if match[a].is_negative: expr = -expr match = expr.match(a*n + b) if match[a] is S.One and match[b].is_integer: expr = expr - match[b] return ImageSet(Lambda(n, expr), S.Integers) class Reals(with_metaclass(Singleton, Interval)): def __new__(cls): return Interval.__new__(cls, -S.Infinity, S.Infinity) def __eq__(self, other): return other == Interval(-S.Infinity, S.Infinity) def __hash__(self): return hash(Interval(-S.Infinity, S.Infinity)) class ImageSet(Set): """ Image of a set under a mathematical function Examples ======== >>> from sympy import Symbol, S, ImageSet, FiniteSet, Lambda >>> x = Symbol('x') >>> N = S.Naturals >>> squares = ImageSet(Lambda(x, x**2), N) # {x**2 for x in N} >>> 4 in squares True >>> 5 in squares False >>> FiniteSet(0, 1, 2, 3, 4, 5, 6, 7, 9, 10).intersect(squares) {1, 4, 9} >>> square_iterable = iter(squares) >>> for i in range(4): ... next(square_iterable) 1 4 9 16 """ def __new__(cls, lamda, base_set): return Basic.__new__(cls, lamda, base_set) lamda = property(lambda self: self.args[0]) base_set = property(lambda self: self.args[1]) def __iter__(self): already_seen = set() for i in self.base_set: val = self.lamda(i) if val in already_seen: continue else: already_seen.add(val) yield val def _is_multivariate(self): return len(self.lamda.variables) > 1 def _contains(self, other): from sympy.solvers.solveset import solveset, linsolve L = self.lamda if self._is_multivariate(): solns = list(linsolve([expr - val for val, expr in zip(other, L.expr)], L.variables).args[0]) else: solns = list(solveset(L.expr - other, L.variables[0])) for soln in solns: try: if soln in self.base_set: return S.true except TypeError: return self.base_set.contains(soln.evalf()) return S.false @property def is_iterable(self): return self.base_set.is_iterable def _intersect(self, other): from sympy import Dummy from sympy.solvers.diophantine import diophantine from sympy.sets.sets import imageset if self.base_set is S.Integers: if isinstance(other, ImageSet) and other.base_set is S.Integers: f, g = self.lamda.expr, other.lamda.expr n, m = self.lamda.variables[0], other.lamda.variables[0] # Diophantine sorts the solutions according to the alphabetic # order of the variable names, since the result should not depend # on the variable name, they are replaced by the dummy variables # below a, b = Dummy('a'), Dummy('b') f, g = f.subs(n, a), g.subs(m, b) solns_set = diophantine(f - g) if solns_set == set(): return EmptySet() solns = list(diophantine(f - g)) if len(solns) == 1: t = list(solns[0][0].free_symbols)[0] else: return None # since 'a' < 'b' return imageset(Lambda(t, f.subs(a, solns[0][0])), S.Integers) if other == S.Reals: from sympy.solvers.solveset import solveset_real from sympy.core.function import expand_complex if len(self.lamda.variables) > 1: return None f = self.lamda.expr n = self.lamda.variables[0] n_ = Dummy(n.name, real=True) f_ = f.subs(n, n_) re, im = f_.as_real_imag() im = expand_complex(im) return imageset(Lambda(n_, re), self.base_set.intersect( solveset_real(im, n_))) @deprecated(useinstead="ImageSet", issue=7057, deprecated_since_version="0.7.4") def TransformationSet(*args, **kwargs): """Deprecated alias for the ImageSet constructor.""" return ImageSet(*args, **kwargs) class Range(Set): """ Represents a range of integers. Examples ======== >>> from sympy import Range >>> list(Range(5)) # 0 to 5 [0, 1, 2, 3, 4] >>> list(Range(10, 15)) # 10 to 15 [10, 11, 12, 13, 14] >>> list(Range(10, 20, 2)) # 10 to 20 in steps of 2 [10, 12, 14, 16, 18] >>> list(Range(20, 10, -2)) # 20 to 10 backward in steps of 2 [12, 14, 16, 18, 20] """ is_iterable = True def __new__(cls, *args): from sympy.functions.elementary.integers import ceiling # expand range slc = slice(*args) start, stop, step = slc.start or 0, slc.stop, slc.step or 1 try: start, stop, step = [w if w in [S.NegativeInfinity, S.Infinity] else S(as_int(w)) for w in (start, stop, step)] except ValueError: raise ValueError("Inputs to Range must be Integer Valued\n" + "Use ImageSets of Ranges for other cases") if not step.is_finite: raise ValueError("Infinite step is not allowed") if start == stop: return S.EmptySet n = ceiling((stop - start)/step) if n <= 0: return S.EmptySet # normalize args: regardless of how they are entered they will show # canonically as Range(inf, sup, step) with step > 0 if n.is_finite: start, stop = sorted((start, start + (n - 1)*step)) else: start, stop = sorted((start, stop - step)) step = abs(step) if (start, stop) == (S.NegativeInfinity, S.Infinity): raise ValueError("Both the start and end value of " "Range cannot be unbounded") else: return Basic.__new__(cls, start, stop + step, step) start = property(lambda self: self.args[0]) stop = property(lambda self: self.args[1]) step = property(lambda self: self.args[2]) def _intersect(self, other): from sympy.functions.elementary.integers import floor, ceiling from sympy.functions.elementary.miscellaneous import Min, Max if other.is_Interval: osup = other.sup oinf = other.inf # if other is [0, 10) we can only go up to 9 if osup.is_integer and other.right_open: osup -= 1 if oinf.is_integer and other.left_open: oinf += 1 # Take the most restrictive of the bounds set by the two sets # round inwards inf = ceiling(Max(self.inf, oinf)) sup = floor(Min(self.sup, osup)) # if we are off the sequence, get back on if inf.is_finite and self.inf.is_finite: off = (inf - self.inf) % self.step else: off = S.Zero if off: inf += self.step - off return Range(inf, sup + 1, self.step) if other == S.Naturals: return self._intersect(Interval(1, S.Infinity)) if other == S.Integers: return self return None def _contains(self, other): if (((self.start - other)/self.step).is_integer or ((self.stop - other)/self.step).is_integer): return _sympify(other >= self.inf and other <= self.sup) elif (((self.start - other)/self.step).is_integer is False and ((self.stop - other)/self.step).is_integer is False): return S.false def __iter__(self): if self.start is S.NegativeInfinity: i = self.stop - self.step step = -self.step else: i = self.start step = self.step while(i < self.stop and i >= self.start): yield i i += step def __len__(self): return (self.stop - self.start)//self.step def __nonzero__(self): return True __bool__ = __nonzero__ def _ith_element(self, i): return self.start + i*self.step @property def _last_element(self): if self.stop is S.Infinity: return S.Infinity elif self.start is S.NegativeInfinity: return self.stop - self.step else: return self._ith_element(len(self) - 1) @property def _inf(self): return self.start @property def _sup(self): return self.stop - self.step @property def _boundary(self): return self def normalize_theta_set(theta): """ Normalize a Real Set theta in the Interval [0, 2*pi). It currently supports Interval and FiniteSet. It Returns a the normalized value of theta in the Set. For Interval, a maximum of one cycle [0, 2*pi], is returned i.e. for theta equal to [0, 10*pi], returned normalized value would be [0, 2*pi). As of now it supports theta as FiniteSet and Interval. Raises ====== NotImplementedError The algorithms for Normalizing theta Set are not yet implemented. ValueError The input is not valid, i.e. the input is not a real set. RuntimeError It is a bug, please report to the github issue tracker. Examples ======== >>> from sympy.sets.fancysets import normalize_theta_set >>> from sympy import Interval, FiniteSet, pi >>> normalize_theta_set(Interval(9*pi/2, 5*pi)) [pi/2, pi] >>> normalize_theta_set(Interval(-3*pi/2, pi/2)) [0, 2*pi) >>> normalize_theta_set(Interval(-pi/2, pi/2)) [0, pi/2] U [3*pi/2, 2*pi) >>> normalize_theta_set(Interval(-4*pi, 3*pi)) [0, 2*pi) >>> normalize_theta_set(Interval(-3*pi/2, -pi/2)) [pi/2, 3*pi/2] >>> normalize_theta_set(FiniteSet(0, pi, 3*pi)) {0, pi} """ from sympy.functions.elementary.trigonometric import _pi_coeff as coeff from sympy.functions.elementary.complexes import Abs if theta.is_Interval: # one complete circle if Abs(theta.args[0] - theta.args[1]) >= 2*S.Pi: return Interval(0, 2*S.Pi, False, True) new_theta = [] for val in [theta.args[0], theta.args[1]]: k = coeff(val) if (not k) and (k != S.Zero): raise NotImplementedError('Normalizing theta without pi as' 'coefficient, is not Implemented.') elif k == S.Zero: if val == S.Zero: new_theta.append(S.Zero) else: # when theta is n*pi new_theta.append(2*S.Pi) else: new_theta.append(k*S.Pi) # for negative theta if new_theta[0] > new_theta[1]: return Union(Interval(S(0), new_theta[1]), Interval(new_theta[0], 2*S.Pi, False, True)) else: return Interval(*new_theta) elif theta.is_FiniteSet: new_theta = [] for element in theta: k = coeff(element) if (not k) and (k != S.Zero): raise NotImplementedError('Normalizing theta without pi as' 'coefficient, is not Implemented.') elif k == S.Zero: if element == S.Zero: new_theta.append(S.Zero) else: new_theta.append(k*S.Pi) return FiniteSet(*new_theta) elif theta.is_subset(S.Reals): raise NotImplementedError("Normalizing theta when, its %s is not" "Implemented" % type(theta)) else: raise ValueError(" %s is not a real set" % (theta)) class ComplexRegion(Set): """ Represents the Set of all Complex Numbers. It can represent a region of Complex Plane in both the standard forms Polar and Rectangular coordinates. * Polar Form Input is in the form of the ProductSet or Union of ProductSets of the intervals of r and theta, & use the flag polar=True. Z = {z in C | z = r*[cos(theta) + I*sin(theta)], r in [r], theta in [theta]} * Rectangular Form Input is in the form of the ProductSet or Union of ProductSets of interval of x and y the of the Complex numbers in a Plane. Default input type is in rectangular form. Z = {z in C | z = x + I*y, x in [Re(z)], y in [Im(z)]} Examples ======== >>> from sympy.sets.fancysets import ComplexRegion >>> from sympy.sets import Interval >>> from sympy import S, I, Union >>> a = Interval(2, 3) >>> b = Interval(4, 6) >>> c = Interval(1, 8) >>> c1 = ComplexRegion(a*b) # Rectangular Form >>> c1 ComplexRegion(Lambda((_x, _y), _x + _y*I), [2, 3] x [4, 6]) * c1 represents the rectangular region in complex plane surrounded by the coordinates (2, 4), (3, 4), (3, 6) and (2, 6), of the four vertices. >>> c2 = ComplexRegion(Union(a*b, b*c)) >>> c2 ComplexRegion(Lambda((_x, _y), _x + _y*I), [2, 3] x [4, 6] U [4, 6] x [1, 8]) * c2 represents the Union of two rectangular regions in complex plane. One of them surrounded by the coordinates of c1 and other surrounded by the coordinates (4, 1), (6, 1), (6, 8) and (4, 8). >>> 2.5 + 4.5*I in c1 True >>> 2.5 + 6.5*I in c1 False >>> r = Interval(0, 1) >>> theta = Interval(0, 2*S.Pi) >>> c2 = ComplexRegion(r*theta, polar=True) # Polar Form >>> c2 # unit Disk ComplexRegion(Lambda((_r, _theta), _r*(I*sin(_theta) + cos(_theta))), [0, 1] x [0, 2*pi)) * c2 represents the region in complex plane inside the Unit Disk centered at the origin. >>> 0.5 + 0.5*I in c2 True >>> 1 + 2*I in c2 False >>> unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, 2*S.Pi), polar=True) >>> upper_half_unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, S.Pi), polar=True) >>> intersection = unit_disk.intersect(upper_half_unit_disk) >>> intersection ComplexRegion(Lambda((_r, _theta), _r*(I*sin(_theta) + cos(_theta))), [0, 1] x [0, pi]) >>> intersection == upper_half_unit_disk True See Also ======== Reals """ is_ComplexRegion = True def __new__(cls, sets, polar=False): from sympy import symbols, Dummy x, y, r, theta = symbols('x, y, r, theta', cls=Dummy) I = S.ImaginaryUnit # Rectangular Form if polar is False: if all(_a.is_FiniteSet for _a in sets.args) and (len(sets.args) == 2): # ** ProductSet of FiniteSets in the Complex Plane. ** # For Cases like ComplexRegion({2, 4}*{3}), It # would return {2 + 3*I, 4 + 3*I} complex_num = [] for x in sets.args[0]: for y in sets.args[1]: complex_num.append(x + I*y) obj = FiniteSet(*complex_num) else: obj = ImageSet.__new__(cls, Lambda((x, y), x + I*y), sets) # Polar Form elif polar is True: new_sets = [] # sets is Union of ProductSets if not sets.is_ProductSet: for k in sets.args: new_sets.append(k) # sets is ProductSets else: new_sets.append(sets) # Normalize input theta for k, v in enumerate(new_sets): from sympy.sets import ProductSet new_sets[k] = ProductSet(v.args[0], normalize_theta_set(v.args[1])) sets = Union(*new_sets) from sympy import cos, sin obj = ImageSet.__new__(cls, Lambda((r, theta), r*(cos(theta) + I*sin(theta))), sets) return obj @property def sets(self): """ Return raw input sets to the self. Examples ======== >>> from sympy import Interval, ComplexRegion, Union >>> a = Interval(2, 3) >>> b = Interval(4, 5) >>> c = Interval(1, 7) >>> C1 = ComplexRegion(a*b) >>> C1.sets [2, 3] x [4, 5] >>> C2 = ComplexRegion(Union(a*b, b*c)) >>> C2.sets [2, 3] x [4, 5] U [4, 5] x [1, 7] """ return self.args[1] @property def psets(self): """ Return a tuple of sets (ProductSets) input of the self. Examples ======== >>> from sympy import Interval, ComplexRegion, Union >>> a = Interval(2, 3) >>> b = Interval(4, 5) >>> c = Interval(1, 7) >>> C1 = ComplexRegion(a*b) >>> C1.psets ([2, 3] x [4, 5],) >>> C2 = ComplexRegion(Union(a*b, b*c)) >>> C2.psets ([2, 3] x [4, 5], [4, 5] x [1, 7]) """ if self.args[1].is_ProductSet: psets = () psets = psets + (self.args[1], ) else: psets = self.args[1].args return psets @property def a_interval(self): """ Return the union of intervals of `x` when, self is in rectangular form, or the union of intervals of `r` when self is in polar form. Examples ======== >>> from sympy import Interval, ComplexRegion, Union >>> a = Interval(2, 3) >>> b = Interval(4, 5) >>> c = Interval(1, 7) >>> C1 = ComplexRegion(a*b) >>> C1.a_interval [2, 3] >>> C2 = ComplexRegion(Union(a*b, b*c)) >>> C2.a_interval [2, 3] U [4, 5] """ a_interval = [] for element in self.psets: a_interval.append(element.args[0]) a_interval = Union(*a_interval) return a_interval @property def b_interval(self): """ Return the union of intervals of `y` when, self is in rectangular form, or the union of intervals of `theta` when self is in polar form. Examples ======== >>> from sympy import Interval, ComplexRegion, Union >>> a = Interval(2, 3) >>> b = Interval(4, 5) >>> c = Interval(1, 7) >>> C1 = ComplexRegion(a*b) >>> C1.b_interval [4, 5] >>> C2 = ComplexRegion(Union(a*b, b*c)) >>> C2.b_interval [1, 7] """ b_interval = [] for element in self.psets: b_interval.append(element.args[1]) b_interval = Union(*b_interval) return b_interval @property def polar(self): """ Returns True if self is in polar form. Examples ======== >>> from sympy import Interval, ComplexRegion, Union, S >>> a = Interval(2, 3) >>> b = Interval(4, 5) >>> theta = Interval(0, 2*S.Pi) >>> C1 = ComplexRegion(a*b) >>> C1.polar False >>> C2 = ComplexRegion(a*theta, polar=True) >>> C2.polar True """ return self.args[0].args[1].is_Mul @property def _measure(self): """ The measure of self.sets. Examples ======== >>> from sympy import Interval, ComplexRegion, S >>> a, b = Interval(2, 5), Interval(4, 8) >>> c = Interval(0, 2*S.Pi) >>> c1 = ComplexRegion(a*b) >>> c1.measure 12 >>> c2 = ComplexRegion(a*c, polar=True) >>> c2.measure 6*pi """ return self.sets._measure def _contains(self, other): from sympy.functions import arg, Abs # self in rectangular form if not self.polar: re, im = other.as_real_imag() for element in self.psets: if And(element.args[0]._contains(re), element.args[1]._contains(im)): return True return False # self in polar form elif self.polar: if S(other).is_zero: r, theta = S(0), S(0) else: r, theta = Abs(other), arg(other) for element in self.psets: if And(element.args[0]._contains(r), element.args[1]._contains(theta)): return True return False def _intersect(self, other): if other.is_ComplexRegion: # self in rectangular form if (not self.polar) and (not other.polar): return ComplexRegion(Intersection(self.sets, other.sets)) # self in polar form elif self.polar and other.polar: r1, theta1 = self.a_interval, self.b_interval r2, theta2 = other.a_interval, other.b_interval new_r_interval = Intersection(r1, r2) new_theta_interval = Intersection(theta1, theta2) # 0 and 2*Pi means the same if ((2*S.Pi in theta1 and S(0) in theta2) or (2*S.Pi in theta2 and S(0) in theta1)): new_theta_interval = Union(new_theta_interval, FiniteSet(0)) return ComplexRegion(new_r_interval*new_theta_interval, polar=True) if other is S.Reals: return other if other.is_subset(S.Reals): new_interval = [] # self in rectangular form if not self.polar: for element in self.psets: if S.Zero in element.args[0]: new_interval.append(element.args[0]) new_interval = Union(*new_interval) return Intersection(new_interval, other) # self in polar form elif self.polar: for element in self.psets: if (0 in element.args[1]) or (S.Pi in element.args[1]): new_interval.append(element.args[0]) new_interval = Union(*new_interval) return Intersection(new_interval, other) def _union(self, other): if other.is_ComplexRegion: # self in rectangular form if (not self.polar) and (not other.polar): return ComplexRegion(Union(self.sets, other.sets)) # self in polar form elif self.polar and other.polar: return ComplexRegion(Union(self.sets, other.sets), polar=True) if other.is_subset(S.Reals): return self return None class Complexes(with_metaclass(Singleton, ComplexRegion)): def __new__(cls): return ComplexRegion.__new__(cls, S.Reals*S.Reals) def __eq__(self, other): if other == ComplexRegion(S.Reals*S.Reals): return True def __hash__(self): return hash(ComplexRegion(S.Reals*S.Reals))
Simplify your live video production. Reliable, simple live streaming with Pearl-2s integrated live video production workflow. Manage your streaming, recording, and live switching in one place with this all in one video encoder and video production system. Pearl-2 works with video sources from SD to 4K. Connect up to six sources at once to encode and stream one *4K Ultra HD program or up to six 1080p Full HD programs. Use any combination of your video sources connected to the HDMI, 4K HDMI, 12G SDI, or USB video input ports, or from IP cameras over your network. Choose the model that suits your needs. Each model can be purchased with the base feature-set, or with the optional 4K feature add-on. Pearl 2 - For portable live production. Includes a hard-shell carrying case. Pearl 2 Rackmount - All the same features of Pearl-2, designed for installation in a rack. Pearl 2 Rackmount Twin - Two completely independent Pearl-2 systems for high density rack installation. Harness the power of Pearl-2s 6th generation Intel i7 processor to capture stunning *4K video at 30 fps. Pearl-2s hardware-accelerated encoder provides flawless H.264 encoding for maximum compatibility and performance with any video portal or player. Custom layout designer now with chroma key! Design your own custom layouts, including picture in picture, using the drag-and-drop editor in Pearl-2s web-based Admin panel. Combine, crop and scale up to 6 video sources. Customize it completely with backgrounds, overlay images, text, and timestamping. Chroma key support for green screens and third party titling software to include rich animations and lower thirds. Use the touch screen or Epiphan Live to preview and change the live content in your video programs. See changes immediately in your live video stream and on displays connected to Pearl-2s 4K HDMI video output ports. Control and monitor your live video production with Pearl-2s touch screen. With a touch of the finger, turn on streaming, recording or switch the layout in the live stream. Connect your headphones and monitor audio using the front-mounted 3.5 mm audio jack and on-screen VU meter. Show content to front of house or send it to a larger confidence monitor with Pearl-2s two 4K HDMI video output ports. Configure to share any input source or switched program output. Use Pearl-2s built-in streaming server to share via unicast or multicast stream to up to 50 viewers with popular streaming protocols such as HTTP, HLS, FLV, ASF, UPnP and SAP. Publish your live video stream to a large audience using an enterprise streaming server or CDN using RTMP or RTSP. Supported CDNs including Facebook Live, Youtube, Wowza, Livestream, Akamai, Ustream and others.
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RAlsace(RPackage): """Alternating Least Squares (or Multivariate Curve Resolution) for analytical chemical data, in particular hyphenated data where the first direction is a retention time axis, and the second a spectral axis. Package builds on the basic als function from the ALS package and adds functionality for high-throughput analysis, including definition of time windows, clustering of profiles, retention time correction, etcetera.""" homepage = "https://www.bioconductor.org/packages/alsace/" git = "https://git.bioconductor.org/packages/alsace.git" version('1.12.0', commit='1364c65bbff05786d05c02799fd44fd57748fae3') depends_on('r-als', type=('build', 'run')) depends_on('r-ptw', type=('build', 'run'))
If you are looking for nuffield tractor spares in Roosebeck then come to Tractor Spare Parts Ltd! We are the UK's largest stockists for tractor parts. Our extensive range of parts, merchandise, and booklets ensures anyone who is looking for marshall tractor parts in Roosebeck can find what they need. We specialise in BMC engine parts and parts for Nuffield, Leyland and Marshall Tractors. We offer a full restoration service as well as our parts business, this is to help ensure all of our parts fit straight on to the tractors. Where possible we aim to improve both the design and materials used in our parts. Looking for nuffield tractor spares in Roosebeck can become arduous if you do not know who, or what to look for. Our reliable service is open to visitors by appointment only. So if you need to contact us, please send an email to info@tractorspareparts.co.uk and fill out the message box with any queries you have and we will be happy to help. Also, we have a home delivery service which is fast and a fully trackable courier service. If you need any help with identifying the engine you are looking for, please get in touch with us. All our parts are re-manufactured where possible to the same standard OEM suppliers. With our vast range of stock and engines, our team we will help you find the parts you need straight away, all from one place and at a competitive price that can not be matched anywhere else in the UK. Realising the changing market early on, this enabled Tractor Spare Parts Ltd to build a name in a market that is constantly growing. We aim to build strong relationships with our consumers as we do every day, whilst delivering top quality products throughout the UK and internationally. Our love for all things tractor has stemmed from working around and using tractors on a daily basis, the work and care put into your project will be the best quality work in the UK. A BCM engine in Roosebeck are becoming more and more limited by the day, this can be from tractor owners throwing away parts or the neglection of tractors leaving the part to rust and become unusable. At tractor spare parts we will pass on our expert existing knowledge to ensure the maintenance and parts are well looked after and will last for years to come. So if you are looking to purchase nuffield tractor spares in Roosebeck or would like to find out more about our services for purchasing or collecting nuffield tractor spares in Roosebeck, contact Tractor Spare Parts Ltd on 01335 310 538 or alternatively email info@tractorspareparts.co.uk. We look forward to hearing from you.
#!/usr/bin/python # Module coanlib provides utility routines for the coan test harnesses import sys, os, subprocess, re, shutil, string, shlex, time, atexit, \ errno __progress = 0 __info = 1 __warning = 2 __error = 3 __fatal = 4 __severities_by_word = { 'progress' : __progress, 'info' : __info, 'warning' : __warning, 'error' : __error, 'fatal' : __fatal } __severities_by_num = { __progress : 'progress', __info : 'info', __warning : 'warning', __error : 'error', __fatal : 'fatal' } __prog = '<unknown program>' __verbosity = __progress __time_file = None __test_size_file = None def get_prog(): ''' Get the current program name ''' return __prog def set_prog(prog): ''' Set the current program name ''' global __prog __prog = prog def get_verbosity(): ''' Get the current verbosity level by keyword ''' return __severities_by_num[__verbosity] def set_verbosity(verbosity_keyword): ''' Set the verbosity level by keyword. Messages with a lower verbosity level will be suppressed ''' global __verbosity __validate_verbosity(verbosity_keyword) __verbosity = __severities_by_word[verbosity_keyword] def progress(msg): ''' Issue a progress message ''' __report(__progress,msg) def info(msg): ''' Issue an informational message ''' __report(__info,msg) def warn(msg): ''' Issue a warning message ''' __report(__warning,msg) def error(msg): ''' Issue an error message ''' __report(__error,msg) def fatal(msg): ''' Issue a fatal error message ''' __report(__fatal,msg) def finis(failures): ''' End a test according to the number of failures''' if (failures): file_del(__get_time_file()) file_del(__get_test_size_file()) sys.exit(failures) def bail(msg, exitcode = 1): ''' Optionally issue a fatal error message and exit with a given system code''' fatal(msg) file_del(__get_time_file()) file_del(__get_test_size_file()) sys.exit(exitcode) def __get_time_file(): ''' Set the name of the test timing file if unset''' global __time_file if not __time_file: prog = get_prog() pkgdir = deduce_pkgdir() __time_file = os.path.join(pkgdir,'test_coan',prog +'.time.txt') \ if prog != '<unknown program>' else None return __time_file def __get_test_size_file(): ''' Set the name of the test size file if unset''' global __test_size_file if not __test_size_file: prog = get_prog() pkgdir = deduce_pkgdir() __test_size_file = os.path.join(pkgdir,'test_coan',prog +'.size.txt') \ if prog != '<unknown program>' else None return __test_size_file def __compute_runtime(time_file): ''' Parse the coan timing file and add the entries to compute the total runtime of the process(es) recorded there ''' lines = slurp_lines(time_file) seconds = 0.0 for line in lines: try: seconds = seconds + float(line) except: pass return seconds def __compute_test_size(size_file): ''' Parse the coan test size file and add the entries to compute the total size of the tests(es) recorded there ''' lines = slurp_lines(size_file) test_files = 0 for line in lines: try: test_files = test_files + int(line) except: pass return test_files def __report(severity,msg): ''' Issue a message with a given severity ''' if severity >= __verbosity: severity_keyword = __severities_by_num[severity] outstr = __prog + ": " + severity_keyword + ": " + \ msg + '\n' if severity < __warning: sys.stdout.write(outstr) else: sys.stderr.write(outstr) def __validate_verbosity(keyword): ''' Validate a verbosity level keyword ''' if keyword not in __severities_by_word: bail("Unknown severity keyword: \"" + keyword + "\"") def windows(): ''' Say whether the host OS is Windows ''' return os.name == 'nt' def fopen(file,mode): if mode != 'r' and mode != 'w' and mode != 'a': bail('*** Unknown file open mode\'' + mode + '\' ***') try: return open(file,mode) except IOError as error: modestr = 'reading' if mode == 'r' else 'writing' bail('*** Cannot open file \"' + file + '\"' + " for " + \ modestr + ': ' + error.strerror + ' ***') def make_path(path): ''' Try to create the directory specified by a path, bailing on failiure ''' try: os.makedirs(path) except OSError as error: bail('*** Failed to create directory \"' + path + '\": ' + \ error.strerror) def del_tree(rootdir): ''' Try to delete a directory tree, if it exists, bailing on failure ''' try: if os.path.isdir(rootdir): shutil.rmtree(rootdir) except OSError as error: bail('*** Failed to delete directory \"' + rootdir + '\": ' + error.strerror) def file_copy(src,dest): ''' Try to copy a file to another, bailing on failure ''' try: shutil.copyfile(src,dest) except IOError: bail('*** Failed to copy file \"' +\ src + '\" as \"' + dest + '\"') def file_copy_to(src,dest): ''' Try to copy a file, bailing on failure ''' try: shutil.copy(src,dest) except IOError: bail('*** Failed to copy file \"' +\ src + '\" -> \"' + dest + '\"') def file_del(filename): ''' Try to delete a file if it exists, bailing on failure ''' try: os.remove(filename) except OSError, e: if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory bail('*** Failed to delete file \"' + filename + '\"') def slurp_command(cmd, with_stderr = False): ''' Return the output of a command as a string ''' words = cmd.split() output = subprocess.check_output(words) if not with_stderr \ else subprocess.check_output(words, stderr=subprocess.STDOUT) return output def slurp_file(file): ''' Return the contents of as a string ''' fh = fopen(file,'r') data = fh.read() fh.close() return data def slurp_lines(file): ''' Return the contents of as a list of lines ''' fh = fopen(file,'r') lines = fh.readlines() fh.close() return lines def __timing_metrics_enabled(): return os.getenv('TIMING_METRICS') == '1' def __do_timing_metrics(): if not __timing_metrics_enabled(): return False time_version = slurp_command('/usr/bin/time --version', True) return time_version.find('GNU') != -1 DO_TIMING_METRICS = __do_timing_metrics() def run(cmd, stdout_file = None,stderr_file = None,stdin_file = None, timing = DO_TIMING_METRICS): ''' Run a command optionally specifying files to capture stdout and stderr and whether timing is required Return the exit code of the command. ''' time_file = __get_time_file() if timing else None if windows(): cmd = re.sub(r'\\',r'\\\\',cmd) elif time_file: cmd = "/usr/bin/time -f \"%e\" --quiet -a -o " \ + time_file + ' ' + cmd args = [] try: args = shlex.split(cmd) except ValueError: args = cmd.split() stdout_fh = None stderr_fh = None stdin_fh = None if stdout_file: stdout_fh = fopen(stdout_file,'w') if stderr_file: stderr_fh = fopen(stderr_file,'w') if stdin_file: stdin_fh = fopen(stdin_file,'r') stdin_fh = fopen(stdin_file,'r') progress('*** Running: ' + cmd) syscode = subprocess.call(args, stdout=stdout_fh,stderr=stderr_fh,stdin=stdin_fh) if stdout_fh: stdout_fh.close() if stderr_fh: stderr_fh.close() if stdin_fh: stdin_fh.close() return syscode def run_noerr(cmd): retcode = run(cmd) if retcode: bail('*** Command failed: \"' + cmd + '\": ' + \ os.strerror(retcode)) def is_exe(path): ''' Say whether a path is an executable file ''' return os.path.isfile(path) and os.access(path, os.X_OK) def deduce_pkgdir(args = {}): ''' Deduce the actual coan package directory given the commandline args ''' pkgdir = None try: pkgdir = args['pkgdir'] except: pass if not pkgdir: pkgdir = os.getenv('COAN_PKGDIR') if not pkgdir: pkgdir = os.pardir return os.path.abspath(pkgdir) def deduce_execdir(args = {}): ''' Deduce the actual directory containing the coan executable given the commandline args ''' execdir = None try: execdir = args['execdir'] except: pass if not execdir: execdir = 'src' builddir = os.getenv('COAN_BUILDDIR') if not builddir: builddir = deduce_pkgdir(args) execdir = os.path.join(builddir,execdir) return os.path.abspath(execdir) def compute_runtime(time_files = [__get_time_file()]): ''' Parse the coan timing files and add the entries to compute the total runtime of the process(es) recorded there. If the list of timing files includes any but the the program's own timing file then the current program's timing file is removed from the list. ''' time_file = __get_time_file() if time_files != [time_file]: try: posn = time_files.index(time_file) del time_files[posn] except: pass seconds = 0.0 for time_file in time_files: if os.path.exists(time_file): seconds = seconds + __compute_runtime(time_file) return seconds def update_test_size_file(nfiles): ''' Append the number of files a test is to process to the coan test size file ''' size_file = __get_test_size_file(); if size_file: fh = fopen(size_file,'a') fh.write(str(nfiles) + '\n') fh.close() def compute_test_size(size_files = [__get_test_size_file()]): ''' Parse the coan test size files and add the entries to compute the total size of the test(es) recorded there. If the list of size files includes any but the the program's own size file then the current program's size file is removed from the list. ''' size_file = __get_test_size_file() if size_files != [size_file]: try: posn = size_files.index(size_file) del size_files[posn] except: pass test_files = 0 for size_file in size_files: if os.path.exists(size_file): test_files = test_files + __compute_test_size(size_file) return test_files def report_runtime(time_files = [__get_time_file()]): ''' Display the total runtime recorded in the coan timing files. If the list of timing files includes any but the current program's timing file then the current program's timing file is removed from the list and the report is also written to the current program's timing file. Return the total runtime computed. ''' tot_time = compute_runtime(time_files) info('runtime in coan: ' + str(tot_time) + ' secs.') time_file = __get_time_file() if time_file not in time_files: fh = fopen(time_file,'w') fh.write(str(tot_time) +'\n') fh.close() return tot_time def report_test_size(size_files = [__get_test_size_file()]): ''' Display the total test size recorded in the coan test size files. If the list of size files includes any but the current program's size file then the current program's size file is removed from the list and the report is also written to the current program's size file. Return the total test size computed. ''' tot_size = compute_test_size(size_files) if tot_size != 0: info('Coan processed ' + str(tot_size) + ' input_files.') size_file = __get_test_size_file() if size_file not in size_files: fh = fopen(size_file,'w') fh.write(str(tot_size) + '\n') fh.close() return tot_size def report_metrics( time_files = [__get_time_file()], size_files = [__get_test_size_file()]): tot_size = report_test_size(size_files) if tot_size == 0: return tot_time = report_runtime(time_files); av_time = float(tot_time) / float(tot_size) info('Average processing time per input file: {:2.6f} secs.'\ .format(av_time)) def measure_runtime(): ''' Initialize coan runtime measuring ''' time_file = __get_time_file() if time_file: file_del(time_file) atexit.register(report_runtime,[__get_time_file()]) def measure_test_size(): ''' Initialize coan test_size measuring ''' size_file = __get_test_size_file() if size_file: file_del(size_file) atexit.register(report_test_size,[__get_test_size_file()]) def do_metrics(): ''' Initailize coan test metrics ''' time_file = __get_time_file() size_file = __get_test_size_file() if time_file and size_file: file_del(time_file) file_del(size_file) atexit.register( report_metrics, [__get_time_file()], [__get_test_size_file()]) if __name__ == "__main__": print slurp_command('/usr/bin/time --version', True)
Simantics' origins are at VTT Technical Research Centre of Finland. The software is growing out from its nest and becoming a wide community project. Simantics is licensed under Eclipse Public License, EPL. Read more about the licensing of software applications developed on Simantics platform. About THTH Association of Decentralized Information Management for Industry and Simantics Division.
# -*- coding: utf-8 -*- # Copyright (c) 2019, Frappe Technologies and contributors # For license information, please see license.txt import frappe from frappe.model.document import Document import frappe.cache_manager from frappe.model import log_types class MilestoneTracker(Document): def on_update(self): frappe.cache_manager.clear_doctype_map('Milestone Tracker', self.document_type) def on_trash(self): frappe.cache_manager.clear_doctype_map('Milestone Tracker', self.document_type) def apply(self, doc): before_save = doc.get_doc_before_save() from_value = before_save and before_save.get(self.track_field) or None if from_value != doc.get(self.track_field): frappe.get_doc(dict( doctype = 'Milestone', reference_type = doc.doctype, reference_name = doc.name, track_field = self.track_field, from_value = from_value, value = doc.get(self.track_field), milestone_tracker = self.name, )).insert(ignore_permissions=True) def evaluate_milestone(doc, event): if (frappe.flags.in_install or frappe.flags.in_migrate or frappe.flags.in_setup_wizard or doc.doctype in log_types): return # track milestones related to this doctype for d in get_milestone_trackers(doc.doctype): frappe.get_doc('Milestone Tracker', d.get('name')).apply(doc) def get_milestone_trackers(doctype): return frappe.cache_manager.get_doctype_map('Milestone Tracker', doctype, dict(document_type = doctype, disabled=0))
I bought the jy-mcu 3208 lattice clock from dealextreme. It is a matrix display that features a ht1632c display driver and am atmega8 microprocessor. To be able to program the processor i also bought an usbasp dongel on ebay (something similar to this one). I didn't do much with microprocessors other than the arduino, empty atmega's on which i flashed the arudino bootloader and my previous post. So, this had to change. I figured it all out (at least, i think so) and am writing this post mostly for my own reference. A little test program that flashes the leds really fast showed that it had worked.
# -*- coding: utf-8 -*- # # grabber.py # # Copyright 2012 David Klasinc <bigwhale@lubica.net> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. import os import subprocess import logging logger = logging.getLogger("Grabber") from gi.repository import GObject, Gtk, Gdk, GdkPixbuf, GdkX11 from kazam.backend.prefs import * from kazam.frontend.save_dialog import SaveDialog from gettext import gettext as _ class Grabber(GObject.GObject): __gsignals__ = { "save-done" : (GObject.SIGNAL_RUN_LAST, None, [GObject.TYPE_PYOBJECT],), "flush-done" : (GObject.SIGNAL_RUN_LAST, None, (),), } def __init__(self): GObject.GObject.__init__(self) logger.debug("Starting Grabber.") def setup_sources(self, video_source, area, xid, active = False, god = False): self.video_source = video_source self.area = area self.xid = xid self.god = god if active: from gi.repository import GdkX11 active_win = HW.default_screen.get_active_window() self.xid = GdkX11.X11Window.get_xid(active_win) logger.debug("Grabber source: {0}, {1}, {2}, {3}".format(self.video_source['x'], self.video_source['y'], self.video_source['width'], self.video_source['height'])) def grab(self): self.pixbuf = None disp = GdkX11.X11Display.get_default() dm = Gdk.Display.get_device_manager(disp) pntr_device = dm.get_client_pointer() # # Rewrite this, because it sucks # if prefs.shutter_sound and (not self.god): soundfile = os.path.join(prefs.datadir, 'sounds', prefs.sound_files[prefs.shutter_type]) subprocess.call(['/usr/bin/canberra-gtk-play', '-f', soundfile]) if self.xid: if prefs.capture_borders_pic: app_win = GdkX11.X11Window.foreign_new_for_display(disp, self.xid) (rx, ry, rw, rh) = app_win.get_geometry() area = app_win.get_frame_extents() (fx, fy, fw, fh) = (area.x, area.y, area.width, area.height) win = Gdk.get_default_root_window() logger.debug("Coordinates w: RX {0} RY {1} RW {2} RH {3}".format(rx, ry, rw, rh)) logger.debug("Coordinates f: FX {0} FY {1} FW {2} FH {3}".format(fx, fy, fw, fh)) dx = fw - rw dy = fh - rh (x, y, w, h) = (fx, fy, fw, fh) logger.debug("Coordinates delta: DX {0} DY {1}".format(dx, dy)) else: win = GdkX11.X11Window.foreign_new_for_display(disp, self.xid) (x, y, w, h) = win.get_geometry() else: win = Gdk.get_default_root_window() (x, y, w, h) = (self.video_source['x'], self.video_source['y'], self.video_source['width'], self.video_source['height']) self.pixbuf = Gdk.pixbuf_get_from_window(win, x, y, w, h) logger.debug("Coordinates X {0} Y {1} W {2} H {3}".format(x, y, w, h)) # Code below partially solves problem with overlapping windows. # Partially only because if something is overlapping window frame # it will be captured where the frame should be and also # because it doesn't work as it should. Offset trouble. # #if self.xid and prefs.capture_borders_pic: # cw_pixbuf = Gdk.pixbuf_get_from_window(app_win, rx, ry, rw, rh) # cw_pixbuf.composite(self.pixbuf, rx, ry, rw, rh, # dx, # dy, # 1.0, # 1.0, # GdkPixbuf.InterpType.BILINEAR, # 255) if prefs.capture_cursor_pic: logger.debug("Adding cursor.") cursor = Gdk.Cursor.new_for_display(Gdk.Display.get_default(), Gdk.CursorType.LEFT_PTR) c_picbuf = Gdk.Cursor.get_image(cursor) if self.xid and prefs.capture_borders_pic: pointer = app_win.get_device_position(pntr_device) (px, py) = (pointer[1], pointer[2]) logger.debug("XID cursor: {0} {1}".format(px, py)) c_picbuf.composite(self.pixbuf, rx, ry, rw, rh, px + dx - 6, py + dy - 2, 1.0, 1.0, GdkPixbuf.InterpType.BILINEAR, 255) else: (scr, px, py) = pntr_device.get_position() cur = scr.get_monitor_at_point(x, y) px = px - HW.screens[cur]['x'] py = py - HW.screens[cur]['y'] # # Cursor is offset by 6 pixels to the right and 2 down # c_picbuf.composite(self.pixbuf, 0, 0, w - 1, h - 1, px - 6, py - 2, 1.0, 1.0, GdkPixbuf.InterpType.BILINEAR, 255) logger.debug("Cursor coords: {0} {1}".format(px, py)) if self.area is not None: logger.debug("Cropping image.") self.area_buf = GdkPixbuf.Pixbuf.new(GdkPixbuf.Colorspace.RGB, True, 8, self.area[4], self.area[5]) self.pixbuf.copy_area(self.area[0], self.area[1], self.area[4], self.area[5], self.area_buf, 0, 0) self.pixbuf = None self.pixbuf = self.area_buf self.emit("flush-done") def save(self, filename): if self.pixbuf is not None: self.pixbuf.savev(filename, "png", "", "") def save_capture(self, old_path): logger.debug("Saving screenshot.") self.old_path = old_path (dialog, result, self.old_path) = SaveDialog(_("Save capture"), self.old_path, None, main_mode=MODE_SCREENSHOT) if result == Gtk.ResponseType.OK: uri = os.path.join(dialog.get_current_folder(), dialog.get_filename()) self.save(uri) dialog.destroy() self.emit("save-done", self.old_path) def autosave(self, filename): logger.debug("Autosaving to: {0}".format(filename)) self.save(filename) self.emit("save-done", filename)
Step-by-step directions for making fourteen different rib baskets are accompanied by suggestions for variations to challenge the imagination of both novice and experienced basketmakers. With 231 images and 107 diagrams, this how-to guide offers general techniques and tips for successful weaving. Chapters on basketry tools, materials, and the components of a rib basket are included, as are answers to frequently asked questions. Directions for decorative weaving and composites of both contemporary and antique baskets provide further ideas for designing rib baskets that are uniquely one's own. By Jean Turner Finley. Step-by-step directions for making fourteen different rib baskets are accompanied by suggestions for variations to challenge the imagination of both novice and experienced basketmakers. With 231 images and 107 diagrams, this how-to guide offers general techniques and tips for successful weaving. Chapters on basketry tools, materials, and the components of a rib basket are included, as are answers to frequently asked questions. Directions for decorative weaving and composites of both contemporary and antique baskets provide further ideas for designing rib baskets that are uniquely one's own.
""" Perform http checks and generate reports """ import urllib import urllib2 from client.cli import parser class Check(object): def __init__(self, url, checkType='http', method='GET', data=None, headers=None, *args, **kwargs): self.checkType = checkType self.url = url self.method = method self.data = data self.headers = {} if headers: self.headers = headers def run(self): req = urllib2.Request( self.url, self.data, self.headers, ) result = { 'state': '' } try: response = urllib2.urlopen(req) except Exception as e: return e return response #'close' #'code' #'errno' #'fileno' #'fp' #'getcode' #'geturl' #'headers' #'info' #'msg' #'next' #'read' #'readline' #'readlines' #'url'] def run_check(url): check = Check(url) result = check.run() from pprint import pprint pprint({ 'code': result.code, 'headers': result.headers.items(), 'msg': result.msg, 'url': result.url, 'content': result.read(), }) def cli_handler(**args): url = args.get('<url>') run_check(url)
"the store is very organized and has a lot of examples to look at. There are so many unique finds there. they even have supplies for local schools. ..." "Lots of neat little things. Cute little nic nacs, candles, doll clothing, hair bows, jewelery, & you can even stuff a bear there!! Its really a one stop ..." "This Disney store got rid of all of the expensive adult copllector items in favor for things geared to the younger consumer. They have clothes, toys and stuffed ..." "If you are looking for the newest games, this store always seems to have it in stock. While they may not be a discount store, you do not ..."
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A mock implementation of a key manager. This module should NOT be used for anything but integration testing. """ import array from cinder import exception from cinder.keymgr import key from cinder.keymgr import key_mgr from cinder.openstack.common import log as logging from cinder.openstack.common import uuidutils from cinder import utils LOG = logging.getLogger(__name__) class MockKeyManager(key_mgr.KeyManager): """ This mock key manager implementation supports all the methods specified by the key manager interface. This implementation stores keys within a dictionary, and as a result, it is not acceptable for use across different services. Side effects (e.g., raising exceptions) for each method are handled as specified by the key manager interface. This class should NOT be used for anything but integration testing because keys are not stored persistently. """ def __init__(self): self.keys = {} def create_key(self, ctxt, **kwargs): """Creates a key. This implementation returns a UUID for the created key. A NotAuthorized exception is raised if the specified context is None. """ if ctxt is None: raise exception.NotAuthorized() # generate the key key_length = kwargs.get('key_length', 256) # hex digit => 4 bits hex_string = utils.generate_password(length=key_length / 4, symbolgroups='0123456789ABCDEF') _bytes = array.array('B', hex_string.decode('hex')).tolist() _key = key.SymmetricKey('AES', _bytes) return self.store_key(ctxt, _key) def store_key(self, ctxt, key, **kwargs): """Stores (i.e., registers) a key with the key manager. """ if ctxt is None: raise exception.NotAuthorized() # generate UUID and ensure that it isn't in use key_id = uuidutils.generate_uuid() while key_id in self.keys: key_id = uuidutils.generate_uuid() self.keys[key_id] = key return key_id def get_key(self, ctxt, key_id, **kwargs): """Retrieves the key identified by the specified id. This implementation returns the key that is associated with the specified UUID. A NotAuthorized exception is raised if the specified context is None; a KeyError is raised if the UUID is invalid. """ if ctxt is None: raise exception.NotAuthorized() return self.keys[key_id] def delete_key(self, ctxt, key_id, **kwargs): """Deletes the key identified by the specified id. A NotAuthorized exception is raised if the context is None and a KeyError is raised if the UUID is invalid. """ if ctxt is None: raise exception.NotAuthorized() del self.keys[key_id]
This September the Last Language Textbook is coming to the Teunis Bergen PS 9 School in Brooklyn. Our Wikiotics Fellows have spent the summer building resources to help parents of the school’s bilingual education students learn along with their children. On September 8th we will be introducing these new materials to the community and showing how parents can get involved by building materials for each other, their children, and by participating in school sponsored language exchanges. Whether you are a parent, teacher, or just a community member interested in English and Spanish education, everyone is welcome at this free event. Join us September 8th from 1:00-4:00pm in the Info Commons room of the Brooklyn Public Library’s historic main branch location at Grand Army Plaza. This week we have looked at the three main elements of the NSA’s surveillance system: Bulk data collection and the construction of an index for all communications in the country, use of private companies to store and process the content of our domestic data, and partnerships with other government agencies at home and abroad. We have examined all of these elements to so that we can try and judge the NSA’s surveillance system based on how it is constructed rather than by the motives and ideals of those currently using it. Now that we have examined the components, it is time to look at the bigger picture. Wholesale collection of data, use of private companies as data refineries, and partnerships of mutual convenience with other government surveillance agencies. Those are the functional components of the NSA system, the bits of code out of which it is built. What does that tell us about the system as a whole? We know that tapping into fiber optic lines naturally leads to wholesale data collection. We know that during wholesale data collection it is difficult or impossible to tell just whose data is being collected. We know that possessing all of the data turns what were once external checks and balances, like the prohibition on the NSA collecting US citizen data, into matters of self-policing and internal procedure design. We also know that, given all of this for a decade, the NSA has sought to increase how much data on US citizens they can search, radically increase how long they can keep data, and expand partnerships with groups that can volunteer information for the system that is free of any regulations. Now that we know that, we can ask the real question: is this going to be the kind of system we use to police democratic societies for the rest of our lives? Before you decide, take a minute and watch this talk. The speaker, Malte Spitz, is a member of the German Parliament and used the German freedom of information laws to get a copy of all the “metadata” that his phone company stored about him. You can watch six months of his life reconstructed on that video. Everywhere he went, everyone he talked to, and all the groups he spoke with are captured in that metadata. There is power in being able to reconstruct someone’s life like that. Being able to reconstruct everyone’s lives at once is not just powerful, it is the kind of technology that could keep a government in power. Whether the NSA system was built to chase down terrorists or to disrupt political dissent does not matter. The power of the system matters and how much power we are comfortable giving to the secret operators of such a system matters. In our names the US government is building a new kind of surveillance system, one that upends all the laws meant to regulate such activity and that is tied directly into the internet connections that will be the primary communication infrastructure for the rest of our lives. We have perhaps the best opportunity we will ever get to examine the actions taken in our names and set new rules for how a democratic society governs itself in this area. Our deliberations and decisions will have wide ranging repercussions. As the price of technology continues to fall there will be many others capable of building similar systems and the choices we make now will set the standard of behavior when that happens. If we push back and we decide that this kind of monitoring is incompatible with a democratic society, our position as the central hub of the global internet means that we can hold that line for the next generation. If we move in the other direction and commit the center of the network to constant monitoring and recording, what will we say when those same tools are used to prop up the next “Axis of Evil” or suppress the next Arab Spring? In the technology community “code is law” is said as a reminder that our technologies are governed not by our intentions but by the way they are put together. It is also sometimes spoken in a hopeful note because, while code may be law, we write the code. We determine how our technology is built. It can be hard and it can be complicated, but we need to do it because, if we don’t do it right, someone else will do it wrong. So far this week we have looked at two of the three main components of the NSA’s surveillance system: how the NSA collects raw data from fiber optic cables and uses that to build an index of “metadata” that maps nearly all communications in the country going back to 2001 and how they enlist private companies as data distilleries holding and processing the contents of our domestic data. Today we will finish looking at the functional elements of the NSA system with a look at how government agencies at home and abroad partner with the NSA, skirting all effective data protection regulations as a result. The NSA is a single government agency. It may be the "largest, most covert, and potentially most intrusive intelligence agency ever" and it may sit at the center of the global communications network, but it is still just one agency and it has limits. They are still somewhat prohibited from directly targeting US citizens, which is the only factor limiting which domestic fiber optic cables they can tap into with splitter prisms. They also lack domestic access to the 7.25% of global internet traffic that does not pass through the US during transmission. The essential allies for overcoming these obstacles are other government agencies, both those at home and abroad. At home the NSA cooperates directly with numerous government agencies, most importantly the CIA, FBI, and the little known National Counter Terrorism Center (NCTC). In addition to sharing expertise, connections, and personnel resources, when these agencies work together they also benefit by skirting around laws designed to control just where they can operate. The NSA’s intelligence gathering is limited by law to foreign communications. In order to collect and store the phone records of purely domestic phone calls, as we can now confirm they are doing, someone other than the NSA must do the collection. In the case of phone records, the FBI is the one actually requesting records from the phone companies. The same is true of PRISM requests for internet communications. In all cases the NSA is the one who stores and analyzes the data; the intermediary agencies are used as legal cover. The reason for this game of digital hot potato is that data that is lawfully obtained by the government becomes fair game for other parts of the government to search. So, once the FBI has obtained everyone’s phone records the NSA no longer feels that the legal prohibitions on collecting data about US citizens apply. Making it easier for different government agencies to exchange information was one of the main reasons for creating the NCTC in 2003. Initially this information was limited. Information about US citizens who were not suspected of any crime could be included but could not be kept for longer than 180 days. Then in press release last march the Attorney General changed that from 180 days to five full years. Perhaps unsurprisingly this is the same length of time the NSA keeps such data on citizens. This one government partnership alone is a significant expansion of the NSA’s surveillance system. The NCTC brings access to all Federal databases including flight records, financial forms submitted by people seeking federally backed mortgages, the health records of people who sought treatment at Veterans Administration hospitals and many others. The only restriction on what databases the NCTC may keep is that they must be “reasonably believed” to contain “terrorism information.” With databases this large it seems reasonable to believe they contain everything. When foreign governments cooperate in surveillance even these trivial restrictions fade away. Just as we place no restrictions on what the NSA may do with information about non-US citizens, other governments place no restrictions on what their spy agencies can do with information about US citizens. Theoretically then it would be possible for two nations to spy on each other and then exchange information, much like strangers on a train. By accident or by design, this is much what happens with the British intelligence agency GCHQ, who we help access more than 200 fiber optic cables. In return we gain access to the processed metadata they collect. Any data we wish to share with them can be done through the NCTC. The only difference between our two programs is how long we each keep data. While we keep information on our citizens for up to five years the UK government only stores information on their subjects for a maximum of 30 days. Tomorrow we will put all these pieces into context and draw some conclusions about what these components mean for the surveillance system as a whole: Part 4 – The End. Update July 8: We learned over the weekend more details about the GCHQ cable tapping and have now have information about how the Australian and other close international partners operate their own social monitoring stations. The geographical diversity of these partner nations means that nearly all of the undersea fiber optic cables that tie the internet together are open to unregulated monitoring by one of our partners. As other nations build their communication storage capacities to match our own this means it will be legally and architecturally possible for this small group of democratic governments to access complete records of all internet communications. As long as nations only store information about each other’s citizens, no domestic surveillance laws will be triggered. As long as the records are complete, each nation will know that any information about their own citizens they wish to access at a later date can be simply requested from a partner. Yesterday we looked at how the NSA collects raw data from fiber optic cables and uses that to build an index of “metadata” that maps nearly all communications in the country going back to 2001. Today we take a look at the second component of that system: using private companies to store and process the contents of our data. By tapping into our nation’s fiber optic cables the NSA has built what is likely the largest data collection tool in the world. It is enough to make the Stasi jealous. Processing through all this data is an immense task and no doubt one reason they are building the world’s largest computer. Until that comes online, the NSA relies on an older method that they call “contact chaining” to search through all the data they collect. Contact chaining is when you start with a single person and look through the NSA index of communications to identify every person they have phoned or emailed. From there you can begin searching each of those newly identified contacts to see who they have phoned or emailed, proceeding out however many degrees of separation you wish until, we can assume, you invariably end up searching through Kevin Bacon’s address book. If this contact chain includes someone the NSA is interested in, one of the FISA judges instructs that person’s email, social network, and other online account providers to turn over all information they have about the individual. This collaboration with our largest technology companies is the PRISM program. Architecturally, using private companies to store data is a powerful strength of the NSA’s system. Data stored by private companies has almost no legal protection against government search, cost nothing to the NSA to store, and are kept essentially forever. Perhaps most importantly, because all these tech companies make their money by studying our activities for advertisers, the data they produce to the NSA has been tagged, cross-referenced, and refined into useful formats. While this form of “share everything” plan might be objectionable to consumers, and no doubt this accounts for some of the current upset over the NSA’s activities, in the normal course of events the technology companies are not even allowed to disclose whether they have received demands form the FISA court, let alone what data may have been turned over. Access to the data warehouses of Google, Facebook, Microsoft, and others fills a vital role in the NSA surveillance system by turning the organizations we trust with our data into informants against us. While many of these companies may participate in PRISM unwillingly, Yahoo for example sued the government in secret court to avoid participation, part of the PRISM program is no doubt designed to improve relations with these companies and accustom them to providing information. Such positive relationships with private companies can be quite productive for the NSA. In 2001 it was voluntary cooperation from network operators that enabled the NSA to install all those fiber optic splitters, which operated for four months before the panel of judges charged with overseeing NSA surveillance were informed of the program. Good relationships also encourage some companies to go beyond merely complying with demands for data and actively make it easier to access such data about customers, as Sprint did when building a web portal for police that made it so easy to search for the location of individual phones that it was used 8 million times in 2008 alone. We now know that there are more than 80 companies voluntarily cooperating with the NSA, including one major US network operator that is steering data from around the US past the NSA splitters. It is unclear whether the NSA is gathering credit card information from one of these voluntary relationships or through PRISM demands. Maintaining positive relationships with the companies participating in PRISM also goes a long way toward preventing those technology giants from making changes that would reduce the amount of information the NSA can access. These technology companies are as close as we currently have to a civil society infrastructure for digital communications. If they were significantly against the NSA’s activities, they could do significant damage to the NSA’s capabilities simply by changing their own business practices. When faced with a similarly board government monitoring program in Sweden, internet providers in the country decided to stop keeping records of user activity so that there would be no information to turn over. Similarly, our own tech companies could decide to keep less information about us, to encrypt more of it by default, or make other architectural changes that would reduce the volume of information they are required to transmit to the NSA. The $100 million dollars the NSA spent collecting data from private companies between 2001 and 2006 likely helps prevent those kinds of changes. Yet, no matter how cozy the relationship or how extensive a private company’s resources, to build a truly global surveillance system you need the cooperation of governments: Part 3. If you have heard anything about the NSA this month, you have heard grand statements and sweeping generalizations. More than likely you have heard a whole gallery of commentators try and relate the news to ideals like “liberty”, “security”, and “privacy”, as if we could all agree about what those ideals mean. In the technology world we have a saying, “code is law”, to remind everyone that the systems we build are not governed by our ideals, they are governed by the practical way we put them together. What the NSA has built is a tool: a system of technology, personnel, and regulations. To judge this tool based on the ideals of those involved or the reasons for its creation is a job for pundits. Us? We know to look at the code. Prisms, internet giants, and James Bond. So, what exactly is the “code” of a national surveillance system? Unpacking the avalanche of NSA information this month we can see three major components of the system: collection of wholesale raw data, use of private companies as data refineries, and collaboration with other spy agencies, including the British NSA equivalent, the GCHQ. These three components determine how the system works, what its limitations are, and what it is capable of; they are its “code” and they each have important ramifications for the system as a whole so we will look at them each in turn. Of all the NSA programs reveled recently, PRISM has gotten perhaps the most press. We will be focusing on the specifics of this program in the next section but it is worth mentioning here for its name alone. Have you ever wondered why they would name a data collection program “Prism”? While the actual reasons are still classified, my guess is that the name is an homage to the NSA’s practice of using actual glass prisms-like devices for data collection. Glass is useful for data collections because most internet traffic that travels any distance is converted into patterns of light and sent over fiber optic cables. If you can tap into the fiber optic cable you can install a prism-like device * you can split that light, sending part of it further down the line as intended while sending a duplicate copy somewhere else. We learned back in 2006 that the NSA began installing prism-like “splitter” devices in all the major fiber optic cables in the country, installing secret rooms at the nation’s leading phone and internet companies to capture copies of everything flowing over the network. Notice that this approach is only useful when you want to copy everything going over a cable; you cannot, for instance, have the splitter recognize what information is bound for overseas and what is just moving over to the next town. Once you get down to the actual cables all our communications run through, all our data looks the same. This is fundamentally important because the NSA is legally prohibited from monitoring US citizens but, once you tap into the cables, the only way to make sure that you will end up with the particular data you want is to take all of it and look through it later. While the NSA has varied what portions of this information it keeps, and under what legal authority it claims the right to keep them, those changes are governed by internal decisions at the agency, not by the technology of the system itself. It is impossible to say just how much of this raw data the NSA has kept since 2001. Because there are no legal restrictions on storing information about non-US citizens, the recently disclosed documents pay little attention to the issue. We have learned that in Germany alone the NSA collects half a billion records a month. One possible indication of the scale of the data being stored is the new $2 billion data center the NSA is opening this September: estimates are that it will be able to store all the traffic that moves over the internet for years to come. For US citizens we know that the NSA collected a nearly complete index for all emails sent between 2001 and 2011, when they halted the program for “operational and resource reasons”. This index includes a record of each email sent, who sent it, and what computer network they were on when sending it. They appear to have collected some form of credit card transaction history, likely a list of purchase times, amounts, and merchants. Similarly, the NSA has been collecting records of all phone calls made on US carriers, what numbers they call, how long they talk, and, potentially, where they call from if they are using mobile phones. This sort of communications history for an individual has historically been called a “pen register” and government agencies normally need a court order to create one. The NSA argues that they are not governed by these rules because they collect data in bulk and only search through it later while the older laws were designed for devices that did both at once. This recording of phone activity is still going on today. In the press this index of everyone’s activity is refereed to as “metadata” because it is information about our communications but not the contents of those communications. Storing the contents of our communications would run afoul of wiretapping laws and would require many times more storage than keeping an index does. Until that new data center goes online, such activity might be operationally difficult for the NSA as well as legally treacherous. Instead, the NSA keeps an index of our communications and, whenever they want to see the contents, they request them from the tech companies that run our email and social networks. Tomorrow we will look at the role that private companies play in distilling our data: Part 2.
# You have a queue of integers, you need to retrieve the first unique integer in the queue. # Implement the FirstUnique class: # FirstUnique(int[] nums) Initializes the object with the numbers in the queue. # int showFirstUnique() returns the value of the first unique integer of the queue, and returns -1 if there is no such integer. # void add(int value) insert value to the queue. # Example 1: # Input: # ["FirstUnique","showFirstUnique","add","showFirstUnique","add","showFirstUnique","add","showFirstUnique"] # [[[2,3,5]],[],[5],[],[2],[],[3],[]] # Output: # [null,2,null,2,null,3,null,-1] # Explanation: # FirstUnique firstUnique = new FirstUnique([2,3,5]); # firstUnique.showFirstUnique(); // return 2 # firstUnique.add(5); // the queue is now [2,3,5,5] # firstUnique.showFirstUnique(); // return 2 # firstUnique.add(2); // the queue is now [2,3,5,5,2] # firstUnique.showFirstUnique(); // return 3 # firstUnique.add(3); // the queue is now [2,3,5,5,2,3] # firstUnique.showFirstUnique(); // return -1 # Example 2: # Input: # ["FirstUnique","showFirstUnique","add","add","add","add","add","showFirstUnique"] # [[[7,7,7,7,7,7]],[],[7],[3],[3],[7],[17],[]] # Output: # [null,-1,null,null,null,null,null,17] # Explanation: # FirstUnique firstUnique = new FirstUnique([7,7,7,7,7,7]); # firstUnique.showFirstUnique(); // return -1 # firstUnique.add(7); // the queue is now [7,7,7,7,7,7,7] # firstUnique.add(3); // the queue is now [7,7,7,7,7,7,7,3] # firstUnique.add(3); // the queue is now [7,7,7,7,7,7,7,3,3] # firstUnique.add(7); // the queue is now [7,7,7,7,7,7,7,3,3,7] # firstUnique.add(17); // the queue is now [7,7,7,7,7,7,7,3,3,7,17] # firstUnique.showFirstUnique(); // return 17 # Example 3: # Input: # ["FirstUnique","showFirstUnique","add","showFirstUnique"] # [[[809]],[],[809],[]] # Output: # [null,809,null,-1] # Explanation: # FirstUnique firstUnique = new FirstUnique([809]); # firstUnique.showFirstUnique(); // return 809 # firstUnique.add(809); // the queue is now [809,809] # firstUnique.showFirstUnique(); // return -1 # Constraints: # 1 <= nums.length <= 10^5 # 1 <= nums[i] <= 10^8 # 1 <= value <= 10^8 # At most 50000 calls will be made to showFirstUnique and add. class Node: def __init__(self,val): self.val = val self.prev = None self.next = None class DLL: def __init__(self): self.head = Node(-1) self.tail = Node(-1) self.head.next, self.tail.prev = self.tail, self.head self.count = 0 def insert(self, val): newNode = Node(val) newNode.prev = self.tail.prev newNode.next = self.tail self.tail.prev.next = newNode self.tail.prev = newNode self.count += 1 return newNode def remove(self, node): prev, nxt = node.prev, node.next node.prev.next = nxt node.next.prev = prev self.count -= 1 def isEmpty(self): return (self.count == 0) # LRU cache logic. # We can use a doubly linked list(which will only store unique numbers and the first one in the front, pointed by the head) # and a hashmap (which will have the object reference of the number). # Upon seeing a duplicate, go and delete the object from the DLL. class FirstUnique: def __init__(self, nums: List[int]): self.dll = DLL() self.numDict = {} for num in nums: self.add(num) def showFirstUnique(self) -> int: if self.dll.isEmpty(): return -1 return self.dll.head.next.val def add(self, value: int) -> None: if value in self.numDict and self.numDict[value] != -1: self.dll.remove(self.numDict[value]) self.numDict[value] = -1 else: self.numDict[value] = self.dll.insert(value)
Emma Stone recently did an interview titled “Great Minds Think Unalike” with Dr. Harold S. Koplewicz for the Child Mind Institute. During the interview, Stone discussed her life-long experience managing her anxiety. While speaking to Dr. Koplewicz, she recounted her very first panic attack that hit when she was only 7 years old. She also described how her anxiety affected her ability to spend time with friends and participate in school. Stone continued to tell Dr. Koplewicz that her therapist had diagnosed her with generalized anxiety and panic disorder, but did not inform Emma of the diagnosis. Emma was glad she was kept out of the loop. Luckily for the aspiring actors and actresses out there, Emma is changing that fact. Stone went on to describe how she managed her condition. And, as expected, Stone also found comfort in the empathetic element of acting. Emma still struggles with her anxiety despite being a successful, Oscar-winning actress and the internet loves her for being open about it. I wish more people would speak more openly about mental health. Academy Award winning actress Emma Stone telling her story of the severe anxiety and panic attacks she experienced growing up, to the point where she couldn’t leave her home. It’s a ‘monster on her shoulder’ she still battles with today. Thank you for your honesty, Ms. Stone! To see the full interview between Emma and Dr. Koplewicz, click here.
import sys sys.path.append('C:/Users/dmccloskey-sbrg/Google Drive/SBaaS_base') #sys.path.append('C:/Users/dmccloskey/Google Drive/SBaaS_base') from SBaaS_base.postgresql_settings import postgresql_settings from SBaaS_base.postgresql_orm import postgresql_orm # read in the settings file filename = 'C:/Users/dmccloskey-sbrg/Google Drive/SBaaS_base/settings_1.ini'; #filename = 'C:/Users/dmccloskey/Google Drive/SBaaS_base/settings_2.ini'; pg_settings = postgresql_settings(filename); # connect to the database from the settings file pg_orm = postgresql_orm(); pg_orm.set_sessionFromSettings(pg_settings.database_settings); session = pg_orm.get_session(); engine = pg_orm.get_engine(); # your app... sys.path.append(pg_settings.datadir_settings['drive']+'/SBaaS_LIMS') sys.path.append(pg_settings.datadir_settings['drive']+'/SBaaS_isotopomer') sys.path.append(pg_settings.datadir_settings['github']+'/io_utilities') sys.path.append(pg_settings.datadir_settings['github']+'/calculate_utilities') sys.path.append(pg_settings.datadir_settings['github']+'/MDV_utilities') sys.path.append(pg_settings.datadir_settings['github']+'/molmass') sys.path.append(pg_settings.datadir_settings['github']+'/matplotlib_utilities') sys.path.append(pg_settings.datadir_settings['github']+'/quantification_analysis') ##Analyze the MRM data #make the results table to analyze the MRM data from SBaaS_isotopomer.stage01_isotopomer_MQResultsTable_execute import stage01_isotopomer_MQResultsTable_execute exmqrt01 = stage01_isotopomer_MQResultsTable_execute(session,engine,pg_settings.datadir_settings); exmqrt01.drop_dataStage01_isotopomer_MQResultsTable(); exmqrt01.initialize_dataStage01_isotopomer_MQResultsTable(); exmqrt01.execute_deleteExperimentFromMQResultsTable('ALEsKOs01',sample_types_I = ['Quality Control','Unknown','Standard','Blank']) exmqrt01.import_dataStage01IsotopomerMQResultsTable_add('data/tests/analysis_isotopomer/150911_Isotopomer_ALEsKOs01_tpiAEvo01-04_samples01.csv'); exmqrt01.export_dataStage01MQResultsTable_metricPlot_js('chemoCLim01',component_names_I = ['fdp.fdp_1.Light'],measurement_I='RT'); #make the normalized methods tables from SBaaS_isotopomer.stage01_isotopomer_normalized_execute import stage01_isotopomer_normalized_execute normalized01 = stage01_isotopomer_normalized_execute(session,engine,pg_settings.datadir_settings); normalized01.drop_dataStage01_isotopomer_normalized(); normalized01.initialize_dataStage01_isotopomer_normalized(); normalized01.reset_dataStage01_isotopomer_normalized('ALEsKOs01'); # build the spectrums from MRM normalized01.execute_buildSpectrumFromMRMs('ALEsKOs01', sample_name_abbreviations_I=[ 'OxicEvo04tpiAEvo01EPEcoli13CGlc', 'OxicEvo04tpiAEvo02EPEcoli13CGlc', 'OxicEvo04tpiAEvo03EPEcoli13CGlc', 'OxicEvo04tpiAEvo04EPEcoli13CGlc', ], met_ids_I=[ ] ); # export the data to .csv normalized01.export_dataStage01IsotopomerNormalized_csv('ALEsKOs01', filename_O = 'data/tests/analysis_isotopomer/normalized_MRM.csv', sample_name_abbreviation_I='%', time_point_I='%', scan_type_I='%', met_id_I='%') #export spectrums to js normalized01.export_dataStage01IsotopomerNormalized_js('ALEsKOs01', sample_name_abbreviations_I=[ 'OxicEvo04tpiAEvo01EPEcoli13CGlc', 'OxicEvo04tpiAEvo02EPEcoli13CGlc', 'OxicEvo04tpiAEvo03EPEcoli13CGlc', 'OxicEvo04tpiAEvo04EPEcoli13CGlc' ], met_ids_I=[], scan_types_I=['MRM'], single_plot_I = False, ); #export spectrums to matplotlib normalized01.plot_normalizedSpectrum('ALEsKOs01', sample_name_abbreviations_I=[ 'OxicEvo04tpiAEvo01EPEcoli13CGlc', 'OxicEvo04tpiAEvo02EPEcoli13CGlc', 'OxicEvo04tpiAEvo03EPEcoli13CGlc', 'OxicEvo04tpiAEvo04EPEcoli13CGlc' ], met_ids_I=[], scan_types_I=['MRM'], ); # update the DB from .csv # NOTE: by_id = True should be used for most cases, but for this example, the row id's will do not match what is in the DB normalized01.import_dataStage01IsotopomerNormalized_update('data/tests/analysis_isotopomer/150911_Isotopomer_ALEsKOs01_tpiAEvo01-04_normalizedUpdate01.csv', by_id = False) # update specific samples normalized01.execute_updateNormalizedSpectrum('ALEsKOs01', sample_name_abbreviations_I=[ 'OxicEvo04tpiAEvo01EPEcoli13CGlc', 'OxicEvo04tpiAEvo02EPEcoli13CGlc', 'OxicEvo04tpiAEvo03EPEcoli13CGlc', 'OxicEvo04tpiAEvo04EPEcoli13CGlc' ], met_ids_I=[], scan_types_I=['MRM'] ); #make the averages methods tables from SBaaS_isotopomer.stage01_isotopomer_averages_execute import stage01_isotopomer_averages_execute ave01 = stage01_isotopomer_averages_execute(session,engine,pg_settings.datadir_settings); ave01.drop_dataStage01_isotopomer_averages(); ave01.initialize_dataStage01_isotopomer_averages(); ave01.reset_dataStage01_isotopomer_averages('ALEsKOs01', sample_name_abbreviations_I=[ 'OxicEvo04tpiAEvo01EPEcoli13CGlc', 'OxicEvo04tpiAEvo02EPEcoli13CGlc', 'OxicEvo04tpiAEvo03EPEcoli13CGlc', 'OxicEvo04tpiAEvo04EPEcoli13CGlc'], scan_types_I = ['MRM']); # calculate the spectrum averages for specific met_ids and/or scan_types ave01.execute_analyzeAverages('ALEsKOs01', sample_name_abbreviations_I=[ 'OxicEvo04tpiAEvo01EPEcoli13CGlc', 'OxicEvo04tpiAEvo02EPEcoli13CGlc', 'OxicEvo04tpiAEvo03EPEcoli13CGlc', 'OxicEvo04tpiAEvo04EPEcoli13CGlc'], met_ids_I=[], scan_types_I = ['MRM']); # calculate averages by normalizing the spectrum to 1.0 for specific met_ids and/or scan_types ave01.execute_analyzeAveragesNormSum('ALEsKOs01', sample_name_abbreviations_I=[ 'OxicEvo04tpiAEvo01EPEcoli13CGlc', 'OxicEvo04tpiAEvo02EPEcoli13CGlc', 'OxicEvo04tpiAEvo03EPEcoli13CGlc', 'OxicEvo04tpiAEvo04EPEcoli13CGlc' ], met_ids_I=[], scan_types_I=['MRM'] ); # review the spectrums in excel ave01.export_dataStage01IsotopomerAveragesNormSum_csv('ALEsKOs01', filename_O='data/tests/analysis_isotopomer/averagesNormSum.csv', sample_name_abbreviation_I='%', time_point_I='%', sample_type_I='%', scan_type_I='%', met_id_I='%') # export the spectrums to matplotlib ave01.plot_averageSpectrumNormSum('ALEsKOs01', sample_name_abbreviations_I=[ 'OxicEvo04tpiAEvo01EPEcoli13CGlc', 'OxicEvo04tpiAEvo02EPEcoli13CGlc', 'OxicEvo04tpiAEvo03EPEcoli13CGlc', 'OxicEvo04tpiAEvo04EPEcoli13CGlc' ], met_ids_I=[], scan_types_I=['MRM'] ); # export the spectrums to .js ave01.export_dataStage01IsotopomerAveragesNormSum_js('ALEsKOs01', sample_name_abbreviations_I=[ 'OxicEvo04tpiAEvo01EPEcoli13CGlc', 'OxicEvo04tpiAEvo02EPEcoli13CGlc', 'OxicEvo04tpiAEvo03EPEcoli13CGlc', 'OxicEvo04tpiAEvo04EPEcoli13CGlc' ], met_ids_I=[], scan_types_I=['MRM'], single_plot_I = False, );
St. Anne and St. Louis Elementary Schools in Webster are to combine in the fall of 2016 to form All Saints Academy, Bishop McManus announced in Oct. 20 letters to the pastors. Letters were sent home with the students last Friday and the information was shared with parishioners on the weekend. Responses have been primarily positive, according to leaders from St. Anne's. Father Adam Reid, pastor of Sacred Heart of Jesus Parish, which runs St. Anne's School, included the bishop's letter in the parish bulletin. He said he made himself available after Masses and at an event at the school for comments or questions. ""There was some feedback from parishioners exiting the church,"" he said. ""Almost exclusively, it was positive."" He said people realized the new school could offer additional programs and they said they were very comfortable with these two communities working more closely together. ""In the minds of people I spoke with, it seemed almost a natural fit,"" he said. At the school, parents' response was positive, Father Reid said, and he sensed relief that the search for a viable vision was completed. ""I was very pleased with the feedback people gave, and it seemed to be a further affirmation that the direction we are moving in is in line with God's wishes,"" he said. Undoubtably people have questions about how this vision will crystalize, he said, ""but it appears there is an openness to seeing what I believe is the great potential embedded in this new model."" He said he is convinced that when people start to understand what this could offer, it will generate excitement. Father Reid credited Father William F. Sanders, St. Louis' pastor, with the idea for the name All Saints Academy. ""He envisioned that that name could hint at the rich heritage of the two schools, but also denote that something altogether new is growing"" from their long histories, he said. He said combining the present names might have inhibited thoughts that the new school can offer more than what the two offer now. ""In my opinion the name suggests how advantageous it will be to fervently seek the intercession of the two saints that have stood so long as heavenly advocates for these two schools,"" he said. Delma L. Josephson, superintendent of schools, said it is anticipated that All Saints Academy will serve at least pre-kindergarten 3 through grade 8, as both schools do now. Both campuses will be used, she said. She said she and William J. Mulford, associate superintendent, met with the pastors Oct. 23 to begin discussing a governance model for use of the four buildings (each school has two) and who will be responsible for them, among other things. The pastors will be part of this, and the bishop wants strong parish connections, she said. But it will not be a parish school; it will be separately incorporated. She said there will be a search for someone to head it, and that she couldnÕt say what the current principals will choose to do. When The Catholic Free Press invited Father Sanders to comment about the change Saturday, he said he and his people needed time to assimilate the news and he would refer questions to the bishop's office and the school department. Tuesday St. Louis' principal, Anthony Luzzetti, referred questions to Father Sanders, who he said is their spokesman. St. Joseph Elementary is the other parish school in Webster. It is at 47 Whitcomb St., between St. Anne's on Day St. and St. Louis at 48 Negus St. St. Joseph's is not taking part in this change now, as was originally proposed, Superintendent Josephson said. ""There were a number of St. Joseph's parishioners and parents who really wanted to keep St. Joseph's St. Joseph's,"" she said. Msgr. Anthony S. Czarnecki, pastor of St. Joseph Basilica, which runs St. Joseph School, credited Bishop McManus with making a decision that showed concern for Catholic education and also respect for local circumstances. ""We pray that the new configuration will help to promote Catholic education in Webster,"" he said. ""It will be beneficial for all of us."" The process of getting to this point started last year, when Bishop McManus commissioned Meitler Consultants, of Milwaukee, to conduct a diocesan-wide data study of Catholic education. Work began in February 2013 and ended last fall, Superintendent Josephson said. It looked at enrollment, finances, capacity of buildings, demographics, and other issues. Leaders of parish, regional and central Catholic schools in the diocese received the study for internal use in strategic planning, she said. She said Webster was one of the first areas in the diocese to use it for a study leading to a localized plan. Last November Webster pastors announced that Alan Meitler would facilitate a joint study committee to shape the new vision for Catholic education locally. The committee included the three pastors and people they recommended. The three principals, who were not on the committee, met with Mr. Meitler and a Catholic Schools Office representative on a ""parallel track"" to keep them updated and get their input, Father Reid said. There were also meetings to inform parish and school representatives and get their input. In May a ""vision"" to combine the three schools, beginning in the 2015-2016 school year, was released. Kindergarten through grade 4 would be at St. Louis, grades 5-8 at St. Joseph's and preschool at St. Anne's. St. Louis was to be expanded eventually, and the preschool was to be moved there, leaving St. Anne's school building unused, Father Reid said. St. Anne's gym/hall would continue to be used. Father Reid said feedback from the parish and school communities varied, but that feedback moved the study committee to ask Bishop McManus for time to consider this vision further and look at an alternative. He said Bishop McManus granted them six months. A subcommittee worked on an alternative, and a few weeks ago presented it to the study committee, which recommended it to the bishop, Superintendent Josephson and Father Reid said. Bishop McManus accepted this proposal - to combine St. Anne's and St. Louis into All Saints Academy. The study committee was disbanded, having completed its charge, Superintendent Josephson said. But conversations it started with the Boston Archdiocese's school department will continue, in order to learn from experiences with combining schools there. During the deliberation process in Webster it became evident that ""there is a need for some healing to take place,"" she said. Some emotions stemmed from not knowing what would happen, ""so we want to work quickly, but carefully,"" she said. ""Maybe this is kind of like an engagement period, where you work to prepare for coming together."" Each party brings strengths and weaknesses. ""You emphasize those strengths,"" she said. ""You're not losing; you're bringing them together. I think students and families, as this evolves, are going to be so excited."" She said she thinks other students - Catholics and others - will say, ""I want to be part of that."" Ellen Tagg, St. Anne's development director, and Marilyn Berthiaume, a teacher there, made similar points. They spoke to The Catholic Free Press this week since the principal, Sister Constance Bayeur, a Sister of St. Anne, was away. Mrs. Tagg said some families have been involved with St. Anne's for several generations. ""Both schools - somehow we're going to keep parts of their legacies,"" she said. Mrs. Berthiaume said legacies are important in Webster. In May, when the proposal for combining the three schools and eventually abandoning St. Anne's building was released, there was an ""uproar,"" the women said. ""A lot of people were upset they had no input,"" Mrs. Tagg said. She said parents felt decisions were made too fast and they didn't understand the reasons behind choice of buildings. The pastors and school department representatives listened to concerns, and she and Mrs. Berthiaume became part of the subcommittee which sought an alternative vision, she said. Now parents still want more information, but seem pretty positive and hopeful, Mrs. Tagg said; she thinks they realize their children could get much more from the schools combining. A St. Louis teacher came to buy tickets for a St. Anne's fundraiser and said St. Louis' teachers are excited about the change, Mrs. Berthiaume said. She said St. Anne's student council members asked if they will be doing things with St. Louis students, with whom they play sports on town teams. ""They are already ready to join efforts,"" she said. The women said St. Anne's invited St. Louis and St. Joseph schools to send all their students to a magic show St. Anne's is hosting Jan. 23, 2015, for Catholic Schools Week, and both schools accepted. They said this is the first effort to bring all the students together since discussions about merging began.
import ed25519 from python_sha3.python_sha3 import * import base64 import hashlib from binascii import hexlify, unhexlify class Account: def __init__(self, hexPrivKey, network='mainnet'): self.hexPrivKey = hexPrivKey self.network = network self._calculateKeyPair() self._calculateAddress() def _calculateKeyPair(self): self.sk = unhexlify(self.hexPrivKey)[::-1] self.pk = ed25519.publickey_hash_unsafe(self.sk, sha3_512) self.hexPublicKey = hexlify(self.pk) def _calculateAddress(self): pubkey = self.pk s = sha3_256() s.update(pubkey) sha3_pubkey = s.digest() h = hashlib.new('ripemd160') h.update(sha3_pubkey) ripe = h.digest() if self.network == 'testnet': version = "\x98" + ripe else: version = "\x68" + ripe s2 = sha3_256() s2.update(version) checksum = s2.digest()[0:4] self.address = base64.b32encode(version + checksum) def getHexPublicKey(self): return self.hexPublicKey def getHexPrivateKey(self): return self.hexPrivKey def getAddress(self): return self.address def sign(self, binMessage): signature = ed25519.signature_hash_unsafe(binMessage, self.sk, self.pk, sha3_512) # print ' sig:', hexlify(signature) return signature def verify(self, hexedMessage): pass
In Rome at 8:30am on April 24th 1908, a baby girl was born to an unnamed woman at 152 Viale Giulio Cesare . She was delivered by the midwife Carolina Zacoma, age 42 and at 1:30pm she was presented to Cavalier Vincenzo Stefanelli at the City hall where her birth was registered and she was given the name Ofelia Cairi. Her mother was not named on the record. I think we can assume she was not married and that the address given was a kind of ‘home for unwed mothers’. Given Ofelia’s healthy birth weight I believe she probably came to Rome from the surrounding countryside to give birth to her child and was probably from a farming family. If she had been from a wealthier family she would probably have given birth in the hospital of San Giovanni Laterano who handled many births to unwed mothers. The midwife was ordered by the clerk to take the child to the Brefotrofio of Rome with a copy of the birth record and her belongings which consisted of a little cap, a shirt, a vest and two blankets. She was registered at the Brefotrofio with a weight of 2700 grams. (almost 6lbs) and then taken to the church of San Francesco a Ripa to be baptized. On the 29th of April she was fostered to a family from Castelliri who took her without asking for any payment. Essentially they took her for ‘love’ not money, in an ‘affidamento affetuoso’. The family was a couple named Mattia and Loreta. They already had two sons, Antonio born 1902 and Pasquale born 1904 and a daughter Vincenza.born 1906. Another son Alfredo born 12 March 1908 had died April 26 just two days after Ofelia was born. Despite already having 3 small children, Mattia and Loreta made the trip to Rome to choose a baby to take Alfredo’s place just days after his funeral while she still had a supply of milk to feed a new baby. They could have chosen to be paid for feeding and caring for Ofelia but they chose instead to take her in a ‘loving foster care’. Tragedy struck the family again just three months later with the death of their oldest son Antonio on July 23rd. of that year and again in 1915 with the death of Vincenza. In 1910 Angelo was born and in 1912 Agnese but by this time Ofelia had established her place in the family. In fact, when she married in 1928 consent was given by her foster parents as well as the Brefotrofio since she was still a minor (under 21). Her marriage took place in Isola del Liri where she was living (and I assume ) working. She married Francesco Sbardella and had several children before emigrating to America. This story is posted with the permission of her Grandson Carlo. ← Brick wall in Santa Maria Oliveto – torn down!