repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
bionikspoon/conferences
docs/github_docs.py
1
6026
#!/usr/bin/env python3 # coding=utf-8 """ Used by ``make readme`` to generate github readme from rst docs. Pipeline: routine(`include`, `out_file`) * `include` - include files and inline 'heredocs' * concatenate - flatten documents * sanitize - modify content * rule__code_blocks - - re implement rst code-blocks for github highlighting * rule__everything_else - - various one liners, replace non-standard directives * write_file(`out_file`) - * notify(`out_file`) - print a progress bar """ from __future__ import print_function from datetime import datetime from functools import partial, reduce from os.path import dirname, realpath, join, relpath # CONFIG UTILS # ---------------------------------------------------------------------------- def path(*args): return realpath(join(*args)) # CONFIG # ---------------------------------------------------------------------------- # PATH FUNCTIONS DOCS = partial(path, dirname(__file__)) SOURCE = partial(DOCS, 'source') PROJECT = partial(DOCS, '..') # CONSTANTS README = PROJECT('README.rst') CONTRIBUTING = PROJECT('CONTRIBUTING.rst') TODAY = datetime.now().strftime('%A, %B %d, %Y') # %A, %B %d, %Y -> Friday, December 11, 2015 FILE_HEADER = '.. START Source defined in %s\n\n' FILE_FOOTER = '\n\n.. END Source defined in %s' # SOURCE DEFINITIONS # ---------------------------------------------------------------------------- # comment to redirect contributors comment_line = """ .. This document was procedurally generated by %s on %s """ % (__file__, TODAY) # built in rst mechanic to deal with nonstandard roles role_overrides = """ .. role:: mod(literal) .. role:: func(literal) .. role:: data(literal) .. role:: const(literal) .. role:: class(literal) .. role:: meth(literal) .. role:: attr(literal) .. role:: exc(literal) .. role:: obj(literal) .. role:: envvar(literal) """ def include_readme_docs(_=None): yield read_text(comment_line) yield read_text(role_overrides) yield read_source('readme_title.rst') yield read_source('readme_features.rst') yield read_source('installation.rst') yield read_source('usage.rst') yield read_source('readme_credits.rst') def include_contributing_docs(_=None): yield read_text(comment_line) yield read_text(role_overrides) yield read_source('contributing.rst') # PRE COMPOSED PARTIALS # ---------------------------------------------------------------------------- def read_source(file_name): return read_file(SOURCE(file_name)) # PROCESS PIPELINE # ---------------------------------------------------------------------------- def read_file(file_name): yield FILE_HEADER % relpath(file_name, PROJECT()) with open(file_name) as f: for line in f: yield line yield FILE_FOOTER % relpath(file_name, PROJECT()) def read_text(text): yield FILE_HEADER % relpath(__file__, PROJECT()) for line in text.splitlines(True): yield line yield FILE_FOOTER % relpath(__file__, PROJECT()) def concatenate(sources): for source in sources: for line in source: yield line yield '\n' def sanitize(lines): rules = rule__code_blocks, rule__everything_else return pipeline(rules, lines) def write_file(file_name): def write_lines(lines): with open(file_name, 'w') as f: for line in lines: yield f.write(line) return write_lines def notify(file_name): # Print messages for start and finish; draw a simple progress bar def print_notify(lines): print('Writing', relpath(file_name, PROJECT()), end='') for i, line in enumerate(lines): if i % 10 is 0: print('.', end='') yield line print('Done!') return print_notify # SANITIZE RULES # ---------------------------------------------------------------------------- def rule__code_blocks(lines): # Replace highlight directive with code blocks code_block_language = 'python' for line in lines: # named conditions is_new_file = line.startswith(FILE_HEADER.replace('%s', '').rstrip()) is_code_block_shorthand = line.endswith('::\n') and not line.strip().startswith('..') # set highlight language and remove directive if line.startswith('.. highlight:: '): _, code_block_language = line.rstrip().rsplit(' ', 1) # parse and set language continue # remove highlight directive # reset highlight language to default if is_new_file: code_block_language = 'python' # write code block directive if is_code_block_shorthand: yield line.replace('::\n', '\n') # remove the shorthand yield '\n.. code-block:: %s\n' % code_block_language # space out new directive continue yield line def rule__everything_else(lines): # add small rules here, or create a named rule. for line in lines: # remove orphan directive. if line.startswith(':orphan:'): continue if line.startswith('.. currentmodule::'): continue yield line # SCRIPT UTILS # ---------------------------------------------------------------------------- def pipeline(steps, initial=None): """ Chain results from a list of functions. Inverted reduce. :param (function) steps: List of function callbacks :param initial: Starting value for pipeline. """ def apply(result, step): return step(result) return reduce(apply, steps, initial) # RUN SCRIPT # ---------------------------------------------------------------------------- if __name__ == '__main__': def routine(include, out_file): steps = include, concatenate, sanitize, write_file(out_file), notify(out_file) list(pipeline(steps)) routine(include_readme_docs, README) routine(include_contributing_docs, CONTRIBUTING)
mit
pam-bot/SMSBeds
lib/mechanize/_urllib2.py
134
1299
# urllib2 work-alike interface # ...from urllib2... from urllib2 import \ URLError, \ HTTPError # ...and from mechanize from _auth import \ HTTPProxyPasswordMgr, \ HTTPSClientCertMgr from _debug import \ HTTPResponseDebugProcessor, \ HTTPRedirectDebugProcessor # crap ATM ## from _gzip import \ ## HTTPGzipProcessor from _urllib2_fork import \ AbstractBasicAuthHandler, \ AbstractDigestAuthHandler, \ BaseHandler, \ CacheFTPHandler, \ FileHandler, \ FTPHandler, \ HTTPBasicAuthHandler, \ HTTPCookieProcessor, \ HTTPDefaultErrorHandler, \ HTTPDigestAuthHandler, \ HTTPErrorProcessor, \ HTTPHandler, \ HTTPPasswordMgr, \ HTTPPasswordMgrWithDefaultRealm, \ HTTPRedirectHandler, \ ProxyBasicAuthHandler, \ ProxyDigestAuthHandler, \ ProxyHandler, \ UnknownHandler from _http import \ HTTPEquivProcessor, \ HTTPRefererProcessor, \ HTTPRefreshProcessor, \ HTTPRobotRulesProcessor, \ RobotExclusionError import httplib if hasattr(httplib, 'HTTPS'): from _urllib2_fork import HTTPSHandler del httplib from _opener import OpenerDirector, \ SeekableResponseOpener, \ build_opener, install_opener, urlopen from _request import \ Request
gpl-2.0
idoerg/BOA
src/genome/genome.py
1
3607
import Bio from Bio import SeqIO, SeqFeature from Bio.SeqRecord import SeqRecord from Bio.Blast import NCBIXML from Bio.Blast import NCBIStandalone import sys import os import site import argparse import string import numpy import re import subprocess import genbank import blast import intergene loc_reg = re.compile("(\d+):(\d+)\S\((\S)\)") class GenomeHandler: def __init__(self, genome, intermediate, evalue, num_threads, verbose, keep_tmp): self.genome_file = genome self.evalue = evalue self.num_threads = num_threads self.verbose = verbose self.keep_tmp = keep_tmp # SeqIO.write(input_genes,self.target_genes,"fasta") self.noHits = True self.intermediate = intermediate def cleanup(self): #os.remove(self.genomic_query) #os.remove(self.genome_file) pass """Gets the filtered intergenic regions""" def getGenomicQuery(self): return self.genomic_query def getGenomeFile(self): return self.genome_file """Runs blast to find the gene locations in all of the bacterial genomes""" def getAlignedGenes(self,genes,gene_evalue,num_threads,formatdb): geneBlast = blast.BLAST(self.genome_file,genes,self.intermediate,gene_evalue) if formatdb: geneBlast.buildDatabase("nucleotide") print geneBlast.formatDBCommand() geneBlast.run(blast_cmd="tblastn",mode="xml",num_threads=num_threads) hits = geneBlast.parseBLAST("xml") print hits return hits def main(genome_files,bacteriocins,genes,outHandle,intermediate,gene_evalue,bac_evalue,num_threads,verbose,keep_tmp): for gbk in genome_files: ghr = GenomeHandler(gbk,intermediate,gene_evalue,num_threads,verbose,keep_tmp) hits = ghr.getAlignedGenes(genes,gene_evalue,num_threads) outHandle.write("\n".join( map( str, hits))+"\n") if __name__=="__main__": parser = argparse.ArgumentParser(description=\ 'Finds target genes in a FASTA file') parser.add_argument(\ '--genome-files', type=str, nargs="+", required=False, help='The FASTA containg the bacterial genome') parser.add_argument(\ '--genes', type=str,required=True,default="", help='A FASTA file containing all of the target genes of interest') parser.add_argument(\ '--bacteriocins', type=str, required=True, help='The bacteriocin proteins that are to be blasted') parser.add_argument(\ '--gene-evalue', type=float, required=False, default=0.00001, help='The evalue for gene hits') parser.add_argument(\ '--bac-evalue', type=float, required=False, default=0.000000001, help='The evalue for bacteriocin hits') parser.add_argument(\ '--intermediate', type=str, required=True, help='Directory for storing intermediate files') parser.add_argument(\ '--verbose', action='store_const', const=True, default=False, help='Messages for debugging') parser.add_argument(\ '--test', action='store_const', const=True, default=False, help='Run unittests') blast.addArgs(parser) args = parser.parse_args() outHandle = open(args.output_file,'w') main(args.genome_files, args.bacteriocins, args.genes, outHandle, args.intermediate, args.gene_evalue, args.bac_evalue, args.num_threads, args.verbose, args.keep_tmp)
gpl-3.0
compston/TAP-Workshop
utilities/Gnip-Python-Search-API-Utilities/gnip_time_series.py
1
28752
#!/usr/bin/env python # -*- coding: UTF-8 -*- ####################################################### # This script wraps simple timeseries analysis tools # and access to the Gnip Search API into a simple tool # to help the analysis quickly iterate on filters # a and understand time series trend and events. # # If you find this useful or find a bug you don't want # to fix for yourself, please let me know at @drskippy ####################################################### __author__="Scott Hendrickson" import ConfigParser import argparse import calendar import codecs import csv import datetime import json import logging import matplotlib import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import re import statsmodels.api as sm import string import sys import time from functools import partial from operator import itemgetter from scipy import signal from search.results import * reload(sys) sys.stdout = codecs.getwriter('utf-8')(sys.stdout) sys.stdin = codecs.getreader('utf-8')(sys.stdin) # basic defaults FROM_PICKLE = False DEFAULT_CONFIG_FILENAME = os.path.join(".",".gnip") DATE_FMT = "%Y%m%d%H%M" DATE_FMT2 = "%Y-%m-%dT%H:%M:%S" LOG_FILE_PATH = os.path.join(".","time_series.log") # set up simple logging logging.basicConfig(filename=LOG_FILE_PATH,level=logging.DEBUG) logging.info("#"*70) logging.info("################# started {} #################".format(datetime.datetime.now())) # tunable defaults CHAR_UPPER_CUTOFF = 20 # don't include tokens longer than CHAR_UPPER_CUTOFF TWEET_SAMPLE = 4000 # tweets to collect for peak topics MIN_SNR = 2.0 # signal to noise threshold for peak detection MAX_N_PEAKS = 7 # maximum number of peaks to output MAX_PEAK_WIDTH = 20 # max peak width in periods MIN_PEAK_WIDTH = 1 # min peak width in periods SEARCH_PEAK_WIDTH = 3 # min peak width in periods N_MOVING = 4 # average over buckets OUTLIER_FRAC = 0.8 # cut off values over 80% above or below the average PLOTS_PREFIX = os.path.join(".","plots") PLOT_DELTA_Y = 1.2 # spacing of y values in dotplot logging.debug("CHAR_UPPER_CUTOFF={},TWEET_SAMPLE={},MIN_SNR={},MAX_N_PEAKS={},MAX_PEAK_WIDTH={},MIN_PEAK_WIDTH={},SEARCH_PEAK_WIDTH={},N_MOVING={},OUTLIER_FRAC={},PLOTS_PREFIX={},PLOT_DELTA_Y={}".format( CHAR_UPPER_CUTOFF , TWEET_SAMPLE , MIN_SNR , MAX_N_PEAKS , MAX_PEAK_WIDTH , MIN_PEAK_WIDTH , SEARCH_PEAK_WIDTH , N_MOVING , OUTLIER_FRAC , PLOTS_PREFIX , PLOT_DELTA_Y )) class TimeSeries(): """Containter class for data collected from the API and associated analysis outputs""" pass class GnipSearchTimeseries(): def __init__(self, token_list_size=40): """Retrieve and analysis timesseries and associated interesting trends, spikes and tweet content.""" # default tokenizer and character limit char_upper_cutoff = CHAR_UPPER_CUTOFF self.token_list_size = int(token_list_size) ############################################# # CONFIG FILE/COMMAND LINE OPTIONS PATTERN # parse config file config_from_file = self.config_file() # set required fields to None. Sequence of setting is: # (1) config file # (2) command line # if still none, then fail self.user = None self.password = None self.stream_url = None if config_from_file is not None: try: # command line options take presidence if they exist self.user = config_from_file.get('creds', 'un') self.password = config_from_file.get('creds', 'pwd') self.stream_url = config_from_file.get('endpoint', 'url') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError) as e: logging.warn("Error reading configuration file ({}), ignoring configuration file.".format(e)) # parse the command line options self.options = self.args().parse_args() self.options.filter = self.options.filter.decode("utf-8") self.options.second_filter = self.options.second_filter.decode("utf-8") # set up the job # over ride config file with command line args if present if self.options.user is not None: self.user = self.options.user if self.options.password is not None: self.password = self.options.password if self.options.stream_url is not None: self.stream_url = self.options.stream_url # search v2 uses a different url if "data-api.twitter.com" not in self.stream_url: logging.error("gnipSearch timeline tools require Search V2. Exiting.") sys.stderr.write("gnipSearch timeline tools require Search V2. Exiting.\n") sys.exit(-1) # set some options that should not be changed for this anaysis self.options.paged = True self.options.search_v2 = True self.options.max = 500 self.options.query = False # check paths if self.options.output_file_path is not None: if not os.path.exists(self.options.output_file_path): logging.error("Path {} doesn't exist. Please create it and try again. Exiting.".format( self.options.output_file_path)) sys.stderr.write("Path {} doesn't exist. Please create it and try again. Exiting.\n".format( self.options.output_file_path)) sys.exit(-1) if not os.path.exists(PLOTS_PREFIX): logging.error("Path {} doesn't exist. Please create it and try again. Exiting.".format( PLOTS_PREFIX)) sys.stderr.write("Path {} doesn't exist. Please create it and try again. Exiting.\n".format( PLOTS_PREFIX)) sys.exit(-1) # log the attributes of this class including all of the options for v in dir(self): # except don't log the password! if not v.startswith('__') and not callable(getattr(self,v)) and not v.lower().startswith('password'): tmp = str(getattr(self,v)) tmp = re.sub("password=.*,", "password=XXXXXXX,", tmp) logging.debug(" {}={}".format(v, tmp)) def config_file(self): """Search for a valid config file in the standard locations.""" config = ConfigParser.ConfigParser() # (1) default file name precidence config.read(DEFAULT_CONFIG_FILENAME) logging.info("attempting to read config file {}".format(DEFAULT_CONFIG_FILENAME)) if not config.has_section("creds"): # (2) environment variable file name second if 'GNIP_CONFIG_FILE' in os.environ: config_filename = os.environ['GNIP_CONFIG_FILE'] logging.info("attempting to read config file {}".format(config_filename)) config.read(config_filename) if config.has_section("creds") and config.has_section("endpoint"): return config else: logging.warn("no creds or endpoint section found in config file, attempting to proceed without config info from file") return None def args(self): "Set up the command line argments and the associated help strings.""" twitter_parser = argparse.ArgumentParser( description="GnipSearch timeline tools") twitter_parser.add_argument("-b", "--bucket", dest="count_bucket", default="day", help="Bucket size for counts query. Options are day, hour, minute (default is 'day').") twitter_parser.add_argument("-e", "--end-date", dest="end", default=None, help="End of datetime window, format 'YYYY-mm-DDTHH:MM' (default: most recent activities)") twitter_parser.add_argument("-f", "--filter", dest="filter", default="from:jrmontag OR from:gnip", help="PowerTrack filter rule (See: http://support.gnip.com/customer/portal/articles/901152-powertrack-operators)") twitter_parser.add_argument("-g", "--second_filter", dest="second_filter", default=None, help="Use a second filter to show correlation plots of -f timeline vs -g timeline.") twitter_parser.add_argument("-l", "--stream-url", dest="stream_url", default=None, help="Url of search endpoint. (See your Gnip console.)") twitter_parser.add_argument("-p", "--password", dest="password", default=None, help="Password") twitter_parser.add_argument("-s", "--start-date", dest="start", default=None, help="Start of datetime window, format 'YYYY-mm-DDTHH:MM' (default: 30 days ago)") twitter_parser.add_argument("-u", "--user-name", dest="user", default=None, help="User name") twitter_parser.add_argument("-t", "--get-topics", dest="get_topics", action="store_true", default=False, help="Set flag to evaluate peak topics (this may take a few minutes)") twitter_parser.add_argument("-w", "--output-file-path", dest="output_file_path", default=None, help="Create files in ./OUTPUT-FILE-PATH. This path must exists and will not be created. This options is available only with -a option. Default is no output files.") return twitter_parser def get_results(self): """Execute API calls to the timeseries data and tweet data we need for analysis. Perform analysis as we go because we often need results for next steps.""" ###################### # (1) Get the timeline ###################### logging.info("retrieving timeline counts") results_timeseries = Results( self.user , self.password , self.stream_url , self.options.paged , self.options.output_file_path , pt_filter=self.options.filter , max_results=int(self.options.max) , start=self.options.start , end=self.options.end , count_bucket=self.options.count_bucket , show_query=self.options.query , search_v2=self.options.search_v2 ) # sort by date res_timeseries = sorted(results_timeseries.get_time_series(), key = itemgetter(0)) # calculate total time interval span time_min_date = min(res_timeseries, key = itemgetter(2))[2] time_max_date = max(res_timeseries, key = itemgetter(2))[2] time_min = float(calendar.timegm(time_min_date.timetuple())) time_max = float(calendar.timegm(time_max_date.timetuple())) time_span = time_max - time_min logging.debug("time_min = {}, time_max = {}, time_span = {}".format(time_min, time_max, time_span)) # create a simple object to hold our data ts = TimeSeries() ts.dates = [] ts.x = [] ts.counts = [] # load and format data for i in res_timeseries: ts.dates.append(i[2]) ts.counts.append(float(i[1])) # create a independent variable in interval [0.0,1.0] ts.x.append((calendar.timegm(datetime.datetime.strptime(i[0], DATE_FMT).timetuple()) - time_min)/time_span) logging.info("read {} time items from search API".format(len(ts.dates))) if len(ts.dates) < 35: logging.warn("peak detection with with fewer than ~35 points is unreliable!") logging.debug('dates: ' + ','.join(map(str, ts.dates[:10])) + "...") logging.debug('counts: ' + ','.join(map(str, ts.counts[:10])) + "...") logging.debug('indep var: ' + ','.join(map(str, ts.x[:10])) + "...") ###################### # (1.1) Get a second timeline? ###################### if self.options.second_filter is not None: logging.info("retrieving second timeline counts") results_timeseries = Results( self.user , self.password , self.stream_url , self.options.paged , self.options.output_file_path , pt_filter=self.options.second_filter , max_results=int(self.options.max) , start=self.options.start , end=self.options.end , count_bucket=self.options.count_bucket , show_query=self.options.query , search_v2=self.options.search_v2 ) # sort by date second_res_timeseries = sorted(results_timeseries.get_time_series(), key = itemgetter(0)) if len(second_res_timeseries) != len(res_timeseries): logging.error("time series of different sizes not allowed") else: ts.second_counts = [] # load and format data for i in second_res_timeseries: ts.second_counts.append(float(i[1])) logging.info("read {} time items from search API".format(len(ts.second_counts))) logging.debug('second counts: ' + ','.join(map(str, ts.second_counts[:10])) + "...") ###################### # (2) Detrend and remove prominent period ###################### logging.info("detrending timeline counts") no_trend = signal.detrend(np.array(ts.counts)) # determine period of data df = (ts.dates[1] - ts.dates[0]).total_seconds() if df == 86400: # day counts, average over week n_buckets = 7 n_avgs = {i:[] for i in range(n_buckets)} for t,c in zip(ts.dates, no_trend): n_avgs[t.weekday()].append(c) elif df == 3600: # hour counts, average over day n_buckets = 24 n_avgs = {i:[] for i in range(n_buckets)} for t,c in zip(ts.dates, no_trend): n_avgs[t.hour].append(c) elif df == 60: # minute counts; average over day n_buckets = 24*60 n_avgs = {i:[] for i in range(n_buckets)} for t,c in zip(ts.dates, no_trend): n_avgs[t.minute].append(c) else: sys.stderr.write("Weird interval problem! Exiting.\n") logging.error("Weird interval problem! Exiting.\n") sys.exit() logging.info("averaging over periods of {} buckets".format(n_buckets)) # remove upper outliers from averages df_avg_all = {i:np.average(n_avgs[i]) for i in range(n_buckets)} logging.debug("bucket averages: {}".format(','.join(map(str, [df_avg_all[i] for i in df_avg_all])))) n_avgs_remove_outliers = {i: [j for j in n_avgs[i] if abs(j - df_avg_all[i])/df_avg_all[i] < (1. + OUTLIER_FRAC) ] for i in range(n_buckets)} df_avg = {i:np.average(n_avgs_remove_outliers[i]) for i in range(n_buckets)} logging.debug("bucket averages w/o outliers: {}".format(','.join(map(str, [df_avg[i] for i in df_avg])))) # flatten cycle ts.counts_no_cycle_trend = np.array([no_trend[i] - df_avg[ts.dates[i].hour] for i in range(len(ts.counts))]) logging.debug('no trend: ' + ','.join(map(str, ts.counts_no_cycle_trend[:10])) + "...") ###################### # (3) Moving average ###################### ts.moving = np.convolve(ts.counts, np.ones((N_MOVING,))/N_MOVING, mode='valid') logging.debug('moving ({}): '.format(N_MOVING) + ','.join(map(str, ts.moving[:10])) + "...") ###################### # (4) Peak detection ###################### peakind = signal.find_peaks_cwt(ts.counts_no_cycle_trend, np.arange(MIN_PEAK_WIDTH, MAX_PEAK_WIDTH), min_snr = MIN_SNR) n_peaks = min(MAX_N_PEAKS, len(peakind)) logging.debug('peaks ({}): '.format(n_peaks) + ','.join(map(str, peakind))) logging.debug('peaks ({}): '.format(n_peaks) + ','.join(map(str, [ts.dates[i] for i in peakind]))) # top peaks determined by peak volume, better way? # peak detector algorithm: # * middle of peak (of unknown width) # * finds peaks up to MAX_PEAK_WIDTH wide # # algorithm for geting peak start, peak and end parameters: # find max, find fwhm, # find start, step past peak, keep track of volume and peak height, # stop at end of period or when timeseries turns upward peaks = [] for i in peakind: # find the first max in the possible window i_start = max(0, i - SEARCH_PEAK_WIDTH) i_finish = min(len(ts.counts) - 1, i + SEARCH_PEAK_WIDTH) p_max = max(ts.counts[i_start:i_finish]) h_max = p_max/2. # i_max not center i_max = i_start + ts.counts[i_start:i_finish].index(p_max) i_start, i_finish = i_max, i_max # start at peak, and go back and forward to find start and end while i_start >= 1: if (ts.counts[i_start - 1] <= h_max or ts.counts[i_start - 1] >= ts.counts[i_start] or i_start - 1 <= 0): break i_start -= 1 while i_finish < len(ts.counts) - 1: if (ts.counts[i_finish + 1] <= h_max or ts.counts[i_finish + 1] >= ts.counts[i_finish] or i_finish + 1 >= len(ts.counts)): break i_finish += 1 # i is center of peak so balance window delta_i = max(1, i - i_start) if i_finish - i > delta_i: delta_i = i_finish - i # final est of start and finish i_finish = min(len(ts.counts) - 1, i + delta_i) i_start = max(0, i - delta_i) p_volume = sum(ts.counts[i_start:i_finish]) peaks.append([ i , p_volume , (i, i_start, i_max, i_finish , h_max , p_max, p_volume , ts.dates[i_start], ts.dates[i_max], ts.dates[i_finish])]) # top n_peaks by volume top_peaks = sorted(peaks, key = itemgetter(1))[-n_peaks:] # re-sort peaks by date ts.top_peaks = sorted(top_peaks, key = itemgetter(0)) logging.debug('top peaks ({}): '.format(len(ts.top_peaks)) + ','.join(map(str, ts.top_peaks[:4])) + "...") ###################### # (5) high/low frequency ###################### ts.cycle, ts.trend = sm.tsa.filters.hpfilter(np.array(ts.counts)) logging.debug('cycle: ' + ','.join(map(str, ts.cycle[:10])) + "...") logging.debug('trend: ' + ','.join(map(str, ts.trend[:10])) + "...") ###################### # (6) n-grams for top peaks ###################### ts.topics = [] if self.options.get_topics: logging.info("retrieving tweets for peak topics") for a in ts.top_peaks: # start at peak ds = datetime.datetime.strftime(a[2][8], DATE_FMT2) # estimate how long to get TWEET_SAMPLE tweets # a[1][5] is max tweets per period if a[2][5] > 0: est_periods = float(TWEET_SAMPLE)/a[2][5] else: logging.warn("peak with zero max tweets ({}), setting est_periods to 1".format(a)) est_periods = 1 # df comes from above, in seconds # time resolution is hours est_time = max(int(est_periods * df), 60) logging.debug("est_periods={}, est_time={}".format(est_periods, est_time)) # if a[2][8] + datetime.timedelta(seconds=est_time) < a[2][9]: de = datetime.datetime.strftime(a[2][8] + datetime.timedelta(seconds=est_time), DATE_FMT2) elif a[2][8] < a[2][9]: de = datetime.datetime.strftime(a[2][9], DATE_FMT2) else: de = datetime.datetime.strftime(a[2][8] + datetime.timedelta(seconds=60), DATE_FMT2) logging.info("retreive data for peak index={} in date range [{},{}]".format(a[0], ds, de)) res = Results( self.user , self.password , self.stream_url , self.options.paged , self.options.output_file_path , pt_filter=self.options.filter , max_results=int(self.options.max) , start=ds , end=de , count_bucket=None , show_query=self.options.query , search_v2=self.options.search_v2 , hard_max = TWEET_SAMPLE ) logging.info("retrieved {} records".format(len(res))) n_grams_counts = list(res.get_top_grams(n=self.token_list_size)) ts.topics.append(n_grams_counts) logging.debug('n_grams for peak index={}: '.format(a[0]) + ','.join( map(str, [i[4].encode("utf-8","ignore") for i in n_grams_counts][:10])) + "...") return ts def dotplot(self, x, labels, path = "dotplot.png"): """Makeshift dotplots in matplotlib. This is not completely general and encodes labels and parameter selections that are particular to n-gram dotplots.""" logging.info("dotplot called, writing image to path={}".format(path)) if len(x) <= 1 or len(labels) <= 1: raise ValueError("cannot make a dot plot with only 1 point") # split n_gram_counts into 2 data sets n = len(labels)/2 x1, x2 = x[:n], x[n:] labels1, labels2 = labels[:n], labels[n:] # create enough equally spaced y values for the horizontal lines ys = [r*PLOT_DELTA_Y for r in range(1,len(labels2)+1)] # give ourselves a little extra room on the plot maxx = max(x)*1.05 maxy = max(ys)*1.05 # set up plots to be a factor taller than the default size # make factor proportional to the number of n-grams plotted size = plt.gcf().get_size_inches() # factor of n/10 is empirical scale_denom = 10 fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1,figsize=(size[0], size[1]*n/scale_denom)) logging.debug("plotting top {} terms".format(n)) logging.debug("plot size=({},{})".format(size[0], size[1]*n/scale_denom)) # first plot 1-grams ax1.set_xlim(0,maxx) ax1.set_ylim(0,maxy) ticks = ax1.yaxis.set_ticks(ys) text = ax1.yaxis.set_ticklabels(labels1) for ct, item in enumerate(labels1): ax1.hlines(ys[ct], 0, maxx, linestyle='dashed', color='0.9') ax1.plot(x1, ys, 'ko') ax1.set_title("1-grams") # second plot 2-grams ax2.set_xlim(0,maxx) ax2.set_ylim(0,maxy) ticks = ax2.yaxis.set_ticks(ys) text = ax2.yaxis.set_ticklabels(labels2) for ct, item in enumerate(labels2): ax2.hlines(ys[ct], 0, maxx, linestyle='dashed', color='0.9') ax2.plot(x2, ys, 'ko') ax2.set_title("2-grams") ax2.set_xlabel("Fraction of Mentions") # plt.tight_layout() plt.savefig(path) plt.close("all") def plots(self, ts, out_type="png"): """Basic choice for plotting analysis. If you wish to extend this class, over- write this method.""" # creat a valid file name, in this case and additional requirement is no spaces valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits) filter_prefix_name = ''.join(c for c in self.options.filter if c in valid_chars) filter_prefix_name = filter_prefix_name.replace(" ", "_") if len(filter_prefix_name) > 16: filter_prefix_name = filter_prefix_name[:16] if self.options.second_filter is not None: second_filter_prefix_name = ''.join(c for c in self.options.second_filter if c in valid_chars) second_filter_prefix_name = second_filter_prefix_name.replace(" ", "_") if len(second_filter_prefix_name) > 16: second_filter_prefix_name = second_filter_prefix_name[:16] ###################### # timeline ###################### df0 = pd.Series(ts.counts, index=ts.dates) df0.plot() plt.ylabel("Counts") plt.title(filter_prefix_name) plt.tight_layout() plt.savefig(os.path.join(PLOTS_PREFIX, '{}_{}.{}'.format(filter_prefix_name, "time_line", out_type))) plt.close("all") ###################### # cycle and trend ###################### df1 = pd.DataFrame({"cycle":ts.cycle, "trend":ts.trend}, index=ts.dates) df1.plot() plt.ylabel("Counts") plt.title(filter_prefix_name) plt.tight_layout() plt.savefig(os.path.join(PLOTS_PREFIX, '{}_{}.{}'.format(filter_prefix_name, "cycle_trend_line", out_type))) plt.close("all") ###################### # moving avg ###################### df2 = pd.DataFrame({"moving":ts.moving}, index=ts.dates[:len(ts.moving)]) df2.plot() plt.ylabel("Counts") plt.title(filter_prefix_name) plt.tight_layout() plt.savefig(os.path.join(PLOTS_PREFIX, '{}_{}.{}'.format(filter_prefix_name, "mov_avg_line", out_type))) plt.close("all") ###################### # timeline with peaks marked by vertical bands ###################### df3 = pd.Series(ts.counts, index=ts.dates) df3.plot() # peaks for a in ts.top_peaks: xs = a[2][7] xp = a[2][8] xe = a[2][9] y = a[2][5] # need to get x and y locs plt.axvspan(xs, xe, ymin=0, ymax = y, linewidth=1, color='g', alpha=0.2) plt.axvline(xp, ymin=0, ymax = y, linewidth=1, color='y') plt.ylabel("Counts") plt.title(filter_prefix_name) plt.tight_layout() plt.savefig(os.path.join(PLOTS_PREFIX, '{}_{}.{}'.format(filter_prefix_name, "time_peaks_line", out_type))) plt.close("all") ###################### # n-grams to help determine topics of peaks ###################### for n, p in enumerate(ts.topics): x = [] labels = [] for i in p: x.append(i[1]) labels.append(i[4]) try: logging.info("creating n-grams dotplot for peak {}".format(n)) path = os.path.join(PLOTS_PREFIX, "{}_{}_{}.{}".format(filter_prefix_name, "peak", n, out_type)) self.dotplot(x, labels, path) except ValueError, e: logging.error("{} - plot path={} skipped".format(e, path)) ###################### # x vs y scatter plot for correlations ###################### if self.options.second_filter is not None: logging.info("creating scatter for queries {} and {}".format(self.options.filter, self.options.second_filter)) df4 = pd.DataFrame({filter_prefix_name: ts.counts, second_filter_prefix_name:ts.second_counts}) df4.plot(kind='scatter', x=filter_prefix_name, y=second_filter_prefix_name) plt.ylabel(second_filter_prefix_name) plt.xlabel(filter_prefix_name) plt.xlim([0, 1.05 * max(ts.counts)]) plt.ylim([0, 1.05 * max(ts.second_counts)]) plt.title("{} vs. {}".format(second_filter_prefix_name, filter_prefix_name)) plt.tight_layout() plt.savefig(os.path.join(PLOTS_PREFIX, '{}_v_{}_{}.{}'.format(filter_prefix_name, second_filter_prefix_name, "scatter", out_type))) plt.close("all") if __name__ == "__main__": """ Simple command line utility.""" import pickle g = GnipSearchTimeseries() if FROM_PICKLE: ts = pickle.load(open("./time_series.pickle", "rb")) else: ts = g.get_results() pickle.dump(ts,open("./time_series.pickle", "wb")) g.plots(ts)
mit
monarc99/otr-verwaltung
data/plugins/Mkv.py
3
16281
# -*- coding: utf-8 -*- ### BEGIN LICENSE # Copyright (C) 2010 Benjamin Elbers <elbersb@gmail.com> #This program is free software: you can redistribute it and/or modify it #under the terms of the GNU General Public License version 3, as published #by the Free Software Foundation. # #This program is distributed in the hope that it will be useful, but #WITHOUT ANY WARRANTY; without even the implied warranties of #MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR #PURPOSE. See the GNU General Public License for more details. # #You should have received a copy of the GNU General Public License along #with this program. If not, see <http://www.gnu.org/licenses/>. ### END LICENSE import gtk import subprocess import time import re import os from otrverwaltung.GeneratorTask import GeneratorTask from otrverwaltung.pluginsystem import Plugin from otrverwaltung import fileoperations from otrverwaltung import path from otrverwaltung.constants import Section from otrverwaltung.actions.cut import Cut class Mkv(Plugin): Name = "MKV" Desc = "Wandelt Media-Dateien mit Hilfe von mkvmerge in mkv-Dateien um." Author = "Benjamin Elbers, monarc99" Configurable = True Config = { 'DumpAVIs': True, 'DumpAVIs_delete': False, 'EncodeAudioToAAC': False, 'EncodeOnlyFirstAudioToAAC': True, 'NormalizeAudio': False, 'DownMixStereo': True, 'RemoveOtherAudioStreamsThanAC3': False } def enable(self): self.toolbutton = self.gui.main_window.add_toolbutton(gtk.image_new_from_file(self.get_path('mkv.png')), 'In Mkv umwandeln', [Section.VIDEO_CUT, Section.ARCHIVE]) self.toolbutton.connect('clicked', self.on_mkv_clicked) def disable(self): self.gui.main_window.remove_toolbutton(self.toolbutton) def configurate(self, dialog): dialog.vbox.set_spacing(4) # checkbutton callback def on_checkbutton_toggled(widget, data=None): self.Config[data] = widget.get_active() # checkbutton for dumping media files checkbutton_dump_avis = gtk.CheckButton("Originaldatei automatisch in Mülleimer verschieben?") dialog.vbox.pack_start(checkbutton_dump_avis, expand=False) checkbutton_dump_avis.connect('toggled', on_checkbutton_toggled,'DumpAVIs') # checkbutton for eraseing media file checkbutton_dump_avis_delete = gtk.CheckButton("Originaldatei im Mülleimer gleich für immer löschen?") dialog.vbox.pack_start(checkbutton_dump_avis_delete, expand=False) checkbutton_dump_avis_delete.connect('toggled', on_checkbutton_toggled,'DumpAVIs_delete') # checkbutton encode audio aac checkbutton_encode_audio = gtk.CheckButton("Audiospuren zu AAC umwandeln?") dialog.vbox.pack_start(checkbutton_encode_audio, expand=False) checkbutton_encode_audio.connect('toggled', on_checkbutton_toggled,'EncodeAudioToAAC') # checkbutton encode first audio only checkbutton_encode_only_first_audio = gtk.CheckButton(" AAC: nur erste Audiospur kodieren?") dialog.vbox.pack_start(checkbutton_encode_only_first_audio, expand=False) checkbutton_encode_only_first_audio.connect('toggled', on_checkbutton_toggled,'EncodeOnlyFirstAudioToAAC') # checkbutton down mix first audio stream checkbutton_downmix_stereo = gtk.CheckButton(" AAC: erste Audiospur automatisch auf Stereo downmixen?") dialog.vbox.pack_start(checkbutton_downmix_stereo, expand=False) checkbutton_downmix_stereo.connect('toggled', on_checkbutton_toggled,'DownMixStereo') # checkbutton encode normalize aac checkbutton_normalize_audio = gtk.CheckButton(" AAC: Audio bei Konvertierung normalisieren?") dialog.vbox.pack_start(checkbutton_normalize_audio, expand=False) checkbutton_normalize_audio.connect('toggled', on_checkbutton_toggled,'NormalizeAudio') # checkbutton remove other audio streams than ac3_stream checkbutton_remove_other_audio_streams_than_ac3 = gtk.CheckButton("Falls AC3 gefunden wurde, alle Audiospuren außer AC3 entfernen? \nDie AC3 wird somit erste Spur und wird gegebenenfalls nach AAC konvertiert und downgemixt, wenn oben angewählt.") dialog.vbox.pack_start(checkbutton_remove_other_audio_streams_than_ac3, expand=False) checkbutton_remove_other_audio_streams_than_ac3.connect('toggled', on_checkbutton_toggled,'RemoveOtherAudioStreamsThanAC3') # current config checkbutton_dump_avis.set_active(self.Config['DumpAVIs']) checkbutton_dump_avis_delete.set_active(self.Config['DumpAVIs_delete']) checkbutton_encode_audio.set_active(self.Config['EncodeAudioToAAC']) checkbutton_encode_only_first_audio.set_active(self.Config['EncodeOnlyFirstAudioToAAC']) checkbutton_normalize_audio.set_active(self.Config['NormalizeAudio']) checkbutton_downmix_stereo.set_active(self.Config['DownMixStereo']) checkbutton_remove_other_audio_streams_than_ac3.set_active(self.Config['RemoveOtherAudioStreamsThanAC3']) return dialog def on_mkv_clicked(self, widget, data=None): filenames = self.gui.main_window.get_selected_filenames() if len(filenames) == 0: self.gui.message_error_box("Es muss eine Datei markiert sein.") return self.toolbutton.set_sensitive(False) self.gui.main_window.set_tasks_visible(True) self.success = 0 self.errors ={} def mkvmerge(): # env my_env = os.environ.copy() my_env["LANG"] = "C" for count, filename in enumerate(filenames): yield 0, count yield 3, 0 self.progress = 0 #analyse file cutter = Cut(self.app, self.gui) fps, dar, sar, max_frames, ac3_stream, error = cutter.analyse_mediafile(filename) if fps == None: self.errors[filename] = error continue # encode aac with ffmpeg if self.Config['EncodeAudioToAAC']: #norm volume ausrechnen yield 5, count if self.Config['NormalizeAudio'] and self.Config['EncodeAudioToAAC']: vol, error = self.get_norm_volume(filename) else: vol = 1.0 # ffmpeg pass yield 1, count self.progress = 0 ffmpegpass_file = fileoperations.make_unique_filename(os.path.splitext(filename)[0] + "_remux.mkv") # convert first audio stream to aac if self.Config['EncodeOnlyFirstAudioToAAC']: aacaudiostreams = '-c:a:0' else: aacaudiostreams = '-c:a' # convert first audio stream to aac ffmpeg = self.app.config.get_program('ffmpeg') if 'nonfree' in ffmpeg: # nonfree ffmpeg version with fdk support available audiocodec = ['-c:a', 'copy', aacaudiostreams, 'libfdk_aac', '-flags', '+qscale', '-profile:a:0', 'aac_low', '-global_quality', '5' ,'-afterburner', '1'] else: # only gpl version of ffmpeg available -> use standard aac codec audiocodec = ['-c:a', 'copy', aacaudiostreams, 'aac', '-strict', '-2','-profile:a:0', 'aac_low', '-ab' ,'192k', '-cutoff', '18000'] if self.Config['DownMixStereo'] and self.Config['EncodeAudioToAAC']: audiocodec.extend(['-ac:0', '2']) if ac3_stream == None: # no ac3 stream found - all streams are muxed map = ['-map', '0'] else: if self.Config['RemoveOtherAudioStreamsThanAC3']: # mux only video and ac3 stream map = ['-map', '0:v', '-map', ac3_stream] else: map = ['-map' ,'0'] args = [ffmpeg, "-loglevel", "info", "-y", "-drc_scale", "1.0", "-i", filename, "-vn", '-af', 'volume=volume=' + str(vol), "-vsync", "1", '-async', '1000', "-dts_delta_threshold", "100", "-vf", "fps="+ str(fps), '-threads', '0', ffmpegpass_file] map.extend(audiocodec) args[8:8] = map try: p = subprocess.Popen(args, stderr=subprocess.PIPE, universal_newlines=True) except OSError: self.errors[filename] = "FFMPEG (intern) wurde nicht gefunden!" continue yield 4, 0 line = "" infos_match = re.compile(r"time=(\d{2,}):(\d{2,}):(\d{2,}.\d{2,})") while p.poll() == None: line = p.stderr.readline() m = re.search(infos_match,line) if m and max_frames != 0: frame = (float(m.group(1))*3600 + float(m.group(2))*60 + float(m.group(3)))*fps next = float( frame / float(max_frames) ) * 100 if next > self.progress: self.progress = next yield 4, self.progress else: pass exit_code = p.poll() if exit_code == 0: pass else: self.errors[filename] = "Fehler beim Erzeugen der MP4 Datei durch FFMPEG" if os.path.exists(ffmpegpass_file): fileoperations.remove_file(ffmpegpass_file) continue # mkvmerge pass yield 2, count self.progress = 0 mkvpass_file = fileoperations.make_unique_filename(os.path.splitext(filename)[0] + ".mkv") if self.Config['EncodeAudioToAAC']: args = [self.app.config.get_program('mkvmerge'), '--engage', 'no_cue_duration', '--engage', 'no_cue_relative_position', '--ui-language', 'en_US',"-o", mkvpass_file, '-A', filename, '-D', ffmpegpass_file] else: if self.Config['RemoveOtherAudioStreamsThanAC3'] and ac3_stream: args = [self.app.config.get_program('mkvmerge'), '--engage', 'no_cue_duration', '--engage', 'no_cue_relative_position', '--ui-language', 'en_US', "-o", mkvpass_file, '-a', ac3_stream[2], filename] else: args = [self.app.config.get_program('mkvmerge'), '--engage', 'no_cue_duration', '--engage', 'no_cue_relative_position', '--ui-language', 'en_US', "-o", mkvpass_file, filename] p = subprocess.Popen(args, stdout=subprocess.PIPE, env=my_env) p.stdout.readline() line = "" while p.poll() == None: # read progress from stdout char = p.stdout.read(1) line += char progress = '' if char == ':': if "Error" in line or "Warning" in line: break while char != '%': char = p.stdout.read(1) progress += char try: self.progress = int(progress.strip(' %')) yield 3, self.progress except ValueError: pass exit_code = p.poll() if exit_code == 0 or exit_code == 1: self.success += 1 if self.Config['EncodeAudioToAAC']: fileoperations.remove_file(ffmpegpass_file) if self.Config['DumpAVIs']: if self.Config['DumpAVIs_delete']: fileoperations.remove_file(filename) else: new_filename = os.path.join(self.app.config.get('general', 'folder_trash_avis'), os.path.basename(filename)) if os.path.exists(new_filename): fileoperations.remove_file(new_filename) fileoperations.move_file(filename, self.app.config.get('general', 'folder_trash_avis')) else: error = p.stdout.readline() try: error = error.split(":")[1] except IndexError: pass if "unknown type" in error: error = "Datei konnte nicht gelesen werden." self.errors[filename] = error def loop(state, argument): if state == 0: self.gui.main_window.set_tasks_text("Analysiere Datei ... %s/%s" % (str(argument + 1), str(len(filenames)))) elif state == 1: self.gui.main_window.set_tasks_text("Audiospur in AAC wandeln ... %s/%s" % (str(argument + 1), str(len(filenames)))) elif state == 2: self.gui.main_window.set_tasks_text("MKV erstellen ... %s/%s" % (str(argument + 1), str(len(filenames)))) elif state == 5: self.gui.main_window.set_tasks_text("Normalisierungswert berechnen ... %s/%s" % (str(argument + 1), str(len(filenames)))) else: self.gui.main_window.set_tasks_progress(argument) def complete(): if len(self.errors) == 0: self.gui.main_window.change_status(0, "Erfolgreich %s/%s Dateien umgewandelt." % (str(self.success), str(len(filenames)))) else: self.gui.main_window.change_status(0, "Erfolgreich %s/%s Dateien umgewandelt. (Fehler: %s)" % (str(self.success), str(len(filenames)), " ".join(self.errors.values()))) self.gui.main_window.set_tasks_visible(False) if self.success > 0: self.app.show_section(self.app.section) self.toolbutton.set_sensitive(True) GeneratorTask(mkvmerge, loop, complete).start() def get_norm_volume(self, filename): """ Gets the volume correction of a movie using ffmpeg and sox. Returns without error: norm_vol, None with error: 1.0, error_message """ try: process1 = subprocess.Popen([path.get_tools_path('intern-ffmpeg'), '-loglevel', 'quiet', '-i', filename, '-f', 'sox', '-'], stdout=subprocess.PIPE) except OSError: return "1.0", "FFMPEG wurde nicht gefunden!" try: process2 = subprocess.Popen([path.get_tools_path('intern-sox'), '-p', '--null', 'stat', '-v'], stdin=process1.stdout,stdout=subprocess.PIPE, stderr=subprocess.STDOUT) except OSError: return "1.0", "SOX wurde nicht gefunden!" log = process2.communicate()[0] for line in log.split('\n'): try: return line, None except: return "1.0", "Volume konnte nicht bestimmt werden " + line return None, "Volume konnte nicht bestimmt werden."
gpl-3.0
yanbin-ha/picasso-graphic
tools/gyp/build/lib/gyp/sun_tool.py
314
1569
#!/usr/bin/env python # Copyright (c) 2011 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """These functions are executed via gyp-sun-tool when using the Makefile generator.""" import fcntl import os import struct import subprocess import sys def main(args): executor = SunTool() executor.Dispatch(args) class SunTool(object): """This class performs all the SunOS tooling steps. The methods can either be executed directly, or dispatched from an argument list.""" def Dispatch(self, args): """Dispatches a string command to a method.""" if len(args) < 1: raise Exception("Not enough arguments") method = "Exec%s" % self._CommandifyName(args[0]) getattr(self, method)(*args[1:]) def _CommandifyName(self, name_string): """Transforms a tool name like copy-info-plist to CopyInfoPlist""" return name_string.title().replace('-', '') def ExecFlock(self, lockfile, *cmd_list): """Emulates the most basic behavior of Linux's flock(1).""" # Rely on exception handling to report errors. # Note that the stock python on SunOS has a bug # where fcntl.flock(fd, LOCK_EX) always fails # with EBADF, that's why we use this F_SETLK # hack instead. fd = os.open(lockfile, os.O_WRONLY|os.O_NOCTTY|os.O_CREAT, 0666) op = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0) fcntl.fcntl(fd, fcntl.F_SETLK, op) return subprocess.call(cmd_list) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
bsd-3-clause
krieger-od/nwjs_chromium.src
tools/usb_gadget/echo_gadget.py
48
7023
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """USB echo gadget module. This gadget has pairs of IN/OUT endpoints that echo packets back to the host. """ import math import struct import uuid import gadget import usb_constants import usb_descriptors class EchoGadget(gadget.Gadget): """Echo gadget. """ def __init__(self): """Create an echo gadget. """ device_desc = usb_descriptors.DeviceDescriptor( idVendor=usb_constants.VendorID.GOOGLE, idProduct=usb_constants.ProductID.GOOGLE_ECHO_GADGET, bcdUSB=0x0200, iManufacturer=1, iProduct=2, iSerialNumber=3, bcdDevice=0x0100) fs_config_desc = usb_descriptors.ConfigurationDescriptor( bmAttributes=0x80, MaxPower=50) fs_intr_interface_desc = usb_descriptors.InterfaceDescriptor( bInterfaceNumber=0, bInterfaceClass=usb_constants.DeviceClass.VENDOR, bInterfaceSubClass=0, bInterfaceProtocol=0, iInterface=4, ) fs_intr_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x01, bmAttributes=usb_constants.TransferType.INTERRUPT, wMaxPacketSize=64, bInterval=1 # 1ms )) fs_intr_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x81, bmAttributes=usb_constants.TransferType.INTERRUPT, wMaxPacketSize=64, bInterval=1 # 1ms )) fs_config_desc.AddInterface(fs_intr_interface_desc) fs_bulk_interface_desc = usb_descriptors.InterfaceDescriptor( bInterfaceNumber=1, bInterfaceClass=usb_constants.DeviceClass.VENDOR, bInterfaceSubClass=0, bInterfaceProtocol=0, iInterface=5 ) fs_bulk_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x02, bmAttributes=usb_constants.TransferType.BULK, wMaxPacketSize=64, bInterval=0 )) fs_bulk_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x82, bmAttributes=usb_constants.TransferType.BULK, wMaxPacketSize=64, bInterval=0 )) fs_config_desc.AddInterface(fs_bulk_interface_desc) fs_config_desc.AddInterface(usb_descriptors.InterfaceDescriptor( bInterfaceNumber=2, bInterfaceClass=usb_constants.DeviceClass.VENDOR, bInterfaceSubClass=0, bInterfaceProtocol=0, iInterface=6 )) fs_isoc_interface_desc = usb_descriptors.InterfaceDescriptor( bInterfaceNumber=2, bAlternateSetting=1, bInterfaceClass=usb_constants.DeviceClass.VENDOR, bInterfaceSubClass=0, bInterfaceProtocol=0, iInterface=6 ) fs_isoc_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x03, bmAttributes=usb_constants.TransferType.ISOCHRONOUS, wMaxPacketSize=1023, bInterval=1 # 1ms )) fs_isoc_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x83, bmAttributes=usb_constants.TransferType.ISOCHRONOUS, wMaxPacketSize=1023, bInterval=1 # 1ms )) fs_config_desc.AddInterface(fs_isoc_interface_desc) hs_config_desc = usb_descriptors.ConfigurationDescriptor( bmAttributes=0x80, MaxPower=50) hs_intr_interface_desc = usb_descriptors.InterfaceDescriptor( bInterfaceNumber=0, bInterfaceClass=usb_constants.DeviceClass.VENDOR, bInterfaceSubClass=0, bInterfaceProtocol=0, iInterface=4 ) hs_intr_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x01, bmAttributes=usb_constants.TransferType.INTERRUPT, wMaxPacketSize=64, bInterval=4 # 1ms )) hs_intr_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x81, bmAttributes=usb_constants.TransferType.INTERRUPT, wMaxPacketSize=64, bInterval=4 # 1ms )) hs_config_desc.AddInterface(hs_intr_interface_desc) hs_bulk_interface_desc = usb_descriptors.InterfaceDescriptor( bInterfaceNumber=1, bInterfaceClass=usb_constants.DeviceClass.VENDOR, bInterfaceSubClass=0, bInterfaceProtocol=0, iInterface=5 ) hs_bulk_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x02, bmAttributes=usb_constants.TransferType.BULK, wMaxPacketSize=512, bInterval=0 )) hs_bulk_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x82, bmAttributes=usb_constants.TransferType.BULK, wMaxPacketSize=512, bInterval=0 )) hs_config_desc.AddInterface(hs_bulk_interface_desc) hs_config_desc.AddInterface(usb_descriptors.InterfaceDescriptor( bInterfaceNumber=2, bInterfaceClass=usb_constants.DeviceClass.VENDOR, bInterfaceSubClass=0, bInterfaceProtocol=0, iInterface=6 )) hs_isoc_interface_desc = usb_descriptors.InterfaceDescriptor( bInterfaceNumber=2, bAlternateSetting=1, bInterfaceClass=usb_constants.DeviceClass.VENDOR, bInterfaceSubClass=0, bInterfaceProtocol=0, iInterface=6 ) hs_isoc_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x03, bmAttributes=usb_constants.TransferType.ISOCHRONOUS, wMaxPacketSize=1024, bInterval=4 # 1ms )) hs_isoc_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x83, bmAttributes=usb_constants.TransferType.ISOCHRONOUS, wMaxPacketSize=1024, bInterval=4 # 1ms )) hs_config_desc.AddInterface(hs_isoc_interface_desc) super(EchoGadget, self).__init__( device_desc, fs_config_desc, hs_config_desc) self.AddStringDescriptor(1, 'Google Inc.') self.AddStringDescriptor(2, 'Echo Gadget') self.AddStringDescriptor(3, '{:06X}'.format(uuid.getnode())) self.AddStringDescriptor(4, 'Interrupt Echo') self.AddStringDescriptor(5, 'Bulk Echo') self.AddStringDescriptor(6, 'Isochronous Echo') def ReceivePacket(self, endpoint, data): """Echo a packet back to the host. Args: endpoint: Incoming endpoint (must be an OUT pipe). data: Packet data. """ assert endpoint & usb_constants.Dir.IN == 0 self.SendPacket(endpoint | usb_constants.Dir.IN, data) def RegisterHandlers(): """Registers web request handlers with the application server.""" import server from tornado import web class WebConfigureHandler(web.RequestHandler): def post(self): server.SwitchGadget(EchoGadget()) server.app.add_handlers('.*$', [ (r'/echo/configure', WebConfigureHandler), ])
bsd-3-clause
joone/chromium-crosswalk
tools/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/A.py
248
2055
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import dns.exception import dns.ipv4 import dns.rdata import dns.tokenizer class A(dns.rdata.Rdata): """A record. @ivar address: an IPv4 address @type address: string (in the standard "dotted quad" format)""" __slots__ = ['address'] def __init__(self, rdclass, rdtype, address): super(A, self).__init__(rdclass, rdtype) # check that it's OK junk = dns.ipv4.inet_aton(address) self.address = address def to_text(self, origin=None, relativize=True, **kw): return self.address def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True): address = tok.get_identifier() tok.get_eol() return cls(rdclass, rdtype, address) from_text = classmethod(from_text) def to_wire(self, file, compress = None, origin = None): file.write(dns.ipv4.inet_aton(self.address)) def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None): address = dns.ipv4.inet_ntoa(wire[current : current + rdlen]) return cls(rdclass, rdtype, address) from_wire = classmethod(from_wire) def _cmp(self, other): sa = dns.ipv4.inet_aton(self.address) oa = dns.ipv4.inet_aton(other.address) return cmp(sa, oa)
bsd-3-clause
lancezlin/ml_template_py
lib/python2.7/site-packages/nbconvert/filters/tests/test_ansi.py
8
3061
# coding: utf-8 """ Module with tests for ansi filters """ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import unicode_literals from ...tests.base import TestsBase from ..ansi import strip_ansi, ansi2html, ansi2latex class TestAnsi(TestsBase): """Contains test functions for ansi.py""" def test_strip_ansi(self): """strip_ansi test""" correct_outputs = { '\x1b[32m\x1b[1m\x1b[0;44m\x1b[38;2;255;0;255m\x1b[;m\x1b[m': '', 'hello\x1b[000;34m': 'hello', 'he\x1b[1;33m\x1b[;36mllo': 'hello', '\x1b[;34mhello': 'hello', '\x1b[31mh\x1b[31me\x1b[31ml\x1b[31ml\x1b[31mo\x1b[31m': 'hello', 'hel\x1b[;00;;032;;;32mlo': 'hello', 'hello': 'hello', } for inval, outval in correct_outputs.items(): self.assertEqual(outval, strip_ansi(inval)) def test_ansi2html(self): """ansi2html test""" correct_outputs = { '\x1b[31m': '', 'hello\x1b[34m': 'hello', 'he\x1b[32m\x1b[36mllo': 'he<span class="ansi-cyan-fg">llo</span>', '\x1b[1;33mhello': '<span class="ansi-yellow-intense-fg ansi-bold">hello</span>', '\x1b[37mh\x1b[0;037me\x1b[;0037ml\x1b[00;37ml\x1b[;;37mo': '<span class="ansi-white-fg">h</span><span class="ansi-white-fg">e</span><span class="ansi-white-fg">l</span><span class="ansi-white-fg">l</span><span class="ansi-white-fg">o</span>', 'hel\x1b[0;32mlo': 'hel<span class="ansi-green-fg">lo</span>', 'hellø': 'hellø', '\x1b[1mhello\x1b[33mworld\x1b[0m': '<span class="ansi-bold">hello</span><span class="ansi-yellow-intense-fg ansi-bold">world</span>', } for inval, outval in correct_outputs.items(): self.assertEqual(outval, ansi2html(inval)) def test_ansi2latex(self): """ansi2latex test""" correct_outputs = { '\x1b[31m': '', 'hello\x1b[34m': 'hello', 'he\x1b[32m\x1b[36mllo': r'he\textcolor{ansi-cyan}{llo}', '\x1b[1;33mhello': r'\textcolor{ansi-yellow-intense}{\textbf{hello}}', '\x1b[37mh\x1b[0;037me\x1b[;0037ml\x1b[00;37ml\x1b[;;37mo': r'\textcolor{ansi-white}{h}\textcolor{ansi-white}{e}\textcolor{ansi-white}{l}\textcolor{ansi-white}{l}\textcolor{ansi-white}{o}', 'hel\x1b[0;32mlo': r'hel\textcolor{ansi-green}{lo}', 'hello': 'hello', 'hello\x1b[34mthere\x1b[mworld': r'hello\textcolor{ansi-blue}{there}world', 'hello\x1b[mthere': 'hellothere', 'hello\x1b[01;34mthere': r'hello\textcolor{ansi-blue-intense}{\textbf{there}}', 'hello\x1b[001;34mthere': r'hello\textcolor{ansi-blue-intense}{\textbf{there}}', '\x1b[1mhello\x1b[33mworld\x1b[0m': r'\textbf{hello}\textcolor{ansi-yellow-intense}{\textbf{world}}', } for inval, outval in correct_outputs.items(): self.assertEqual(outval, ansi2latex(inval))
mit
wenhulove333/ScutServer
Sample/Doudizhu/Server/src/ZyGames.Doudizhu.HostServer/PyScript/Action/Action9203.py
4
3297
import clr, sys from action import * from System import * from mathUtils import MathUtils clr.AddReference('ZyGames.Framework'); clr.AddReference('ZyGames.Framework.Common'); clr.AddReference('ZyGames.Framework.Game'); clr.AddReference('ZyGames.Doudizhu.Bll'); clr.AddReference('ZyGames.Doudizhu.Model'); clr.AddReference('ZyGames.Doudizhu.Lang'); from System.Collections.Generic import * from ZyGames.Framework.SyncThreading import * from ZyGames.Framework.Common import * from ZyGames.Framework.Game.Cache import * from ZyGames.Framework.Game.Com.Rank import * from ZyGames.Framework.Game.Service import * from ZyGames.Framework.Game.Model import * from ZyGames.Doudizhu.Bll import * from ZyGames.Doudizhu.Bll.Logic import * from ZyGames.Doudizhu.Bll.Com.Chat import * from ZyGames.Doudizhu.Lang import * from ZyGames.Doudizhu.Model import * from ZyGames.Framework.Cache.Generic import * from ZyGames.Framework.Game.Runtime import * from ZyGames.Framework.Cache import * #9203_公告列表接口 class UrlParam(HttpParam): def __init__(self): HttpParam.__init__(self) self.PageIndex = 0 self.PageSize = 0 class ActionResult(DataResult): def __init__(self): DataResult.__init__(self) self.ChatMaxNum = 0 self.gameNoticelsit = List[GameNotice] self.PageCount = 0 def getUrlElement(httpGet, parent): urlParam = UrlParam() if httpGet.Contains("PageIndex")\ and httpGet.Contains("PageSize"): urlParam.PageIndex = httpGet.GetIntValue("PageIndex") urlParam.PageSize = httpGet.GetIntValue("PageSize") else: urlParam.Result = False return urlParam def takeAction(urlParam, parent): def sortNotice(x,y): if x == None and y == None: return 0; if x != None and y == None: return 1; if x == None: return -1; result = y.IsTop.CompareTo(x.IsTop); if (result == 0): result = y.CreateDate.CompareTo(x.CreateDate); return result; actionResult = ActionResult(); #user = parent.Current.User; actionResult.gameNoticelsit = ShareCacheStruct[GameNotice]().FindAll(match=lambda m:filterNotice(m)); MathUtils.QuickSort[GameNotice](actionResult.gameNoticelsit, lambda x,y:sortNotice(x,y)) result = MathUtils.GetPaging[GameNotice](actionResult.gameNoticelsit,urlParam.PageIndex, urlParam.PageSize) if result: actionResult.gameNoticelsit = result[0] actionResult.PageCount = result[1] return actionResult; def filterNotice(s): minData = MathUtils.SqlMinDate isExpiry = s.ExpiryDate <= minData return isExpiry or (not isExpiry and s.ExpiryDate >= DateTime.Now ) def buildPacket(writer, urlParam, actionResult): writer.PushIntoStack(actionResult.PageCount) writer.PushIntoStack(len(actionResult.gameNoticelsit)) for info in actionResult.gameNoticelsit: DsItem = DataStruct() DsItem.PushIntoStack(MathUtils.ToNotNullString(info.Title)) DsItem.PushIntoStack(MathUtils.ToNotNullString(info.Content)) DsItem.PushIntoStack(MathUtils.ToNotNullString(info.CreateDate)) writer.PushIntoStack(DsItem) return True
mit
jinnykoo/wuyisj.com
src/oscar/apps/dashboard/promotions/app.py
34
2762
from django.conf.urls import url from oscar.core.application import Application from oscar.core.loading import get_class from oscar.apps.promotions.conf import PROMOTION_CLASSES class PromotionsDashboardApplication(Application): name = None default_permissions = ['is_staff', ] list_view = get_class('dashboard.promotions.views', 'ListView') page_list = get_class('dashboard.promotions.views', 'PageListView') page_detail = get_class('dashboard.promotions.views', 'PageDetailView') create_redirect_view = get_class('dashboard.promotions.views', 'CreateRedirectView') delete_page_promotion_view = get_class('dashboard.promotions.views', 'DeletePagePromotionView') # Dynamically set the CRUD views for all promotion classes view_names = ( ('create_%s_view', 'Create%sView'), ('update_%s_view', 'Update%sView'), ('delete_%s_view', 'Delete%sView') ) for klass in PROMOTION_CLASSES: for attr_name, view_name in view_names: full_attr_name = attr_name % klass.classname() full_view_name = view_name % klass.__name__ view = get_class('dashboard.promotions.views', full_view_name) locals()[full_attr_name] = view def get_urls(self): urls = [ url(r'^$', self.list_view.as_view(), name='promotion-list'), url(r'^pages/$', self.page_list.as_view(), name='promotion-list-by-page'), url(r'^page/(?P<path>/([\w-]+(/[\w-]+)*/)?)$', self.page_detail.as_view(), name='promotion-list-by-url'), url(r'^create/$', self.create_redirect_view.as_view(), name='promotion-create-redirect'), url(r'^page-promotion/(?P<pk>\d+)/$', self.delete_page_promotion_view.as_view(), name='pagepromotion-delete')] for klass in PROMOTION_CLASSES: code = klass.classname() urls += [ url(r'create/%s/' % code, getattr(self, 'create_%s_view' % code).as_view(), name='promotion-create-%s' % code), url(r'^update/(?P<ptype>%s)/(?P<pk>\d+)/$' % code, getattr(self, 'update_%s_view' % code).as_view(), name='promotion-update'), url(r'^delete/(?P<ptype>%s)/(?P<pk>\d+)/$' % code, getattr(self, 'delete_%s_view' % code).as_view(), name='promotion-delete')] return self.post_process_urls(urls) application = PromotionsDashboardApplication()
bsd-3-clause
xdevelsistemas/taiga-back-community
taiga/hooks/github/services.py
2
1518
# -*- coding: utf-8 -*- # Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz> # Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com> # Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com> # Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import uuid from django.core.urlresolvers import reverse from taiga.base.utils.urls import get_absolute_url # Set this in settings.PROJECT_MODULES_CONFIGURATORS["github"] def get_or_generate_config(project): config = project.modules_config.config if config and "github" in config: g_config = project.modules_config.config["github"] else: g_config = {"secret": uuid.uuid4().hex} url = reverse("github-hook-list") url = get_absolute_url(url) url = "%s?project=%s" % (url, project.id) g_config["webhooks_url"] = url return g_config
agpl-3.0
harlowja/fasteners
doc/source/conf.py
6
9425
# -*- coding: utf-8 -*- # # Fasteners documentation build configuration file, created by # sphinx-quickstart on Fri Jun 5 14:47:29 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex from fasteners import version as fasteners_version # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.doctest', 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Fasteners' copyright = u'2015, OpenStack Foundation, Yahoo!' author = u'Joshua Harlow, OpenStack Developers' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = fasteners_version.version_string() # The full version, including alpha/beta/rc tags. release = fasteners_version.version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "img/safety-pin-small.png" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'Fastenersdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'Fasteners.tex', u'Fasteners Documentation', u'Joshua Harlow', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'fasteners', u'Fasteners Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'Fasteners', u'Fasteners Documentation', author, 'Fasteners', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
apache-2.0
kuke/models
legacy/generate_sequence_by_rnn_lm/config.py
5
1392
import os ################## for building word dictionary ################## max_word_num = 51200 - 2 cutoff_word_fre = 0 ################## for training task ######################### # path of training data train_file = "data/train_data_examples.txt" # path of testing data, if testing file does not exist, # testing will not be performed at the end of each training pass test_file = "" # path of word dictionary, if this file does not exist, # word dictionary will be built from training data. vocab_file = "data/word_vocab.txt" # directory to save the trained model # create a new directory if the directoy does not exist model_save_dir = "models" batch_size = 32 # the number of training examples in one forward/backward pass num_passes = 20 # how many passes to train the model log_period = 50 save_period_by_batches = 50 use_gpu = False # to use gpu or not trainer_count = 1 # number of trainer ################## for model configuration ################## rnn_type = "lstm" # "gru" or "lstm" emb_dim = 256 hidden_size = 256 stacked_rnn_num = 2 ################## for text generation ################## gen_file = "data/train_data_examples.txt" gen_result = "data/gen_result.txt" max_gen_len = 25 # the max number of words to generate beam_size = 5 model_path = "models/rnn_lm_pass_00000.tar.gz" if not os.path.exists(model_save_dir): os.mkdir(model_save_dir)
apache-2.0
tlangerak/Multi-Agent-Systems
build/lib.win-amd64-2.7/xmpp/__init__.py
212
1795
# $Id: __init__.py,v 1.9 2005/03/07 09:34:51 snakeru Exp $ """ All features of xmpppy library contained within separate modules. At present there are modules: simplexml - XML handling routines protocol - jabber-objects (I.e. JID and different stanzas and sub-stanzas) handling routines. debug - Jacob Lundquist's debugging module. Very handy if you like colored debug. auth - Non-SASL and SASL stuff. You will need it to auth as a client or transport. transports - low level connection handling. TCP and TLS currently. HTTP support planned. roster - simple roster for use in clients. dispatcher - decision-making logic. Handles all hooks. The first who takes control over fresh stanzas. features - different stuff that didn't worths separating into modules browser - DISCO server framework. Allows to build dynamic disco tree. filetransfer - Currently contains only IBB stuff. Can be used for bot-to-bot transfers. Most of the classes that is defined in all these modules is an ancestors of class PlugIn so they share a single set of methods allowing you to compile a featured XMPP client. For every instance of PlugIn class the 'owner' is the class in what the plug was plugged. While plugging in such instance usually sets some methods of owner to it's own ones for easy access. All session specific info stored either in instance of PlugIn or in owner's instance. This is considered unhandy and there are plans to port 'Session' class from xmppd.py project for storing all session-related info. Though if you are not accessing instances variables directly and use only methods for access all values you should not have any problems. """ import simplexml,protocol,debug,auth,transports,roster,dispatcher,features,browser,filetransfer,commands from client import * from protocol import *
lgpl-2.1
Guneet-Dhillon/mxnet
example/gluon/word_language_model/model.py
34
2871
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import mxnet as mx from mxnet import gluon from mxnet.gluon import nn, rnn class RNNModel(gluon.Block): """A model with an encoder, recurrent layer, and a decoder.""" def __init__(self, mode, vocab_size, num_embed, num_hidden, num_layers, dropout=0.5, tie_weights=False, **kwargs): super(RNNModel, self).__init__(**kwargs) with self.name_scope(): self.drop = nn.Dropout(dropout) self.encoder = nn.Embedding(vocab_size, num_embed, weight_initializer=mx.init.Uniform(0.1)) if mode == 'rnn_relu': self.rnn = rnn.RNN(num_hidden, 'relu', num_layers, dropout=dropout, input_size=num_embed) elif mode == 'rnn_tanh': self.rnn = rnn.RNN(num_hidden, num_layers, dropout=dropout, input_size=num_embed) elif mode == 'lstm': self.rnn = rnn.LSTM(num_hidden, num_layers, dropout=dropout, input_size=num_embed) elif mode == 'gru': self.rnn = rnn.GRU(num_hidden, num_layers, dropout=dropout, input_size=num_embed) else: raise ValueError("Invalid mode %s. Options are rnn_relu, " "rnn_tanh, lstm, and gru"%mode) if tie_weights: self.decoder = nn.Dense(vocab_size, in_units=num_hidden, params=self.encoder.params) else: self.decoder = nn.Dense(vocab_size, in_units=num_hidden) self.num_hidden = num_hidden def forward(self, inputs, hidden): emb = self.drop(self.encoder(inputs)) output, hidden = self.rnn(emb, hidden) output = self.drop(output) decoded = self.decoder(output.reshape((-1, self.num_hidden))) return decoded, hidden def begin_state(self, *args, **kwargs): return self.rnn.begin_state(*args, **kwargs)
apache-2.0
EnTeQuAk/django-orm
django_orm/cache/query.py
1
10576
# -*- coding: utf-8 -*- from django.db.models.query import QuerySet, ValuesQuerySet, ValuesListQuerySet from django.db.models.query import ITER_CHUNK_SIZE from django.db import backend, connection from django.conf import settings from django_orm.cache.utils import get_cache_key_for_pk, get_cache from django_orm.cache.exceptions import CacheMissingWarning from django_orm.postgresql import server_side_cursors CACHE_KEY_PREFIX = getattr(settings, 'ORM_CACHE_KEY_PREFIX', 'orm.cache') CACHE_FETCH_BY_ID = getattr(settings, 'ORM_CACHE_FETCH_BY_ID', False) import copy, hashlib import logging; log = logging.getLogger('orm.cache') cache = get_cache() class CachedMixIn(object): from_cache = False cache_object_enable = False cache_queryset_enable = False cache_fetch_by_id = False cache_fetch_by_id_queryset = False def __init__(self, *args, **kwargs): self.cache_key_prefix = CACHE_KEY_PREFIX super(CachedMixIn, self).__init__(*args, **kwargs) orm_meta = getattr(self.model, '_orm_meta') options = getattr(orm_meta, 'options') self.cache_object_enable = options['cache_object'] self.cache_queryset_enable = options['cache_queryset'] self.cache_timeout = options['default_timeout'] def query_key(self): sql, params = self.query.get_compiler(using=self.db).as_sql() return "%s:qs:default:table:%s:%s" % ( CACHE_KEY_PREFIX, self.model._meta.db_table, hashlib.sha1(sql % params).hexdigest() ) def _clone(self, klass=None, **kwargs): """ Clone queryset. """ qs = super(CachedMixIn,self)._clone(klass, **kwargs) qs.cache_object_enable = self.cache_object_enable qs.cache_queryset_enable = self.cache_queryset_enable qs.cache_timeout = self.cache_timeout qs.cache_fetch_by_id = self.cache_fetch_by_id qs.cache_fetch_by_id_queryset = self.cache_fetch_by_id_queryset return qs def cache(self, timeout=None): if not timeout: timeout = self.cache_timeout qs = self._clone() qs.cache_object_enable = True qs.cache_queryset_enable = True qs.cache_timeout = timeout return qs def no_cache(self): qs = self._clone() qs.cache_object_enable = False qs.cache_queryset_enable = False qs.cache_timeout = self.cache_timeout return qs def byid(self, cache_qs=False): qs = self._clone() qs.cache_fetch_by_id = True qs.cache_fetch_by_id_queryset = cache_qs return qs def get(self, *args, **kwargs): if not self.cache_object_enable: return super(CachedMixIn, self).get(*args, **kwargs) if len(args) > 0: return super(CachedMixIn, self).get(*args, **kwargs) pk, params, obj = None, copy.deepcopy(kwargs), None if "pk" in params: pk = params.pop('pk') elif "id" in kwargs: pk = params.pop('id') if pk: ckey = get_cache_key_for_pk(self.model, pk, **params) obj = cache.get(ckey) if not obj: obj = super(CachedMixIn, self).get(*args, **kwargs) cache.set(ckey, obj, self.cache_timeout) log.info("Orm cache missing: %s(%s)", self.model.__name__, obj.id) else: log.info("Orm cache hit: %s(%s)", self.model.__name__, obj.id) else: obj = super(CachedMixIn, self).get(*args, **kwargs) return obj def _prepare_queryset_for_cache(self, queryset): keys = tuple(obj.pk for obj in queryset) fields = () return (self.model, keys, fields, 1) def _get_queryset_from_cache(self, cache_object): model, keys, fields, length = cache_object results = self._get_objects_for_keys(model, keys) #if fields: # # TODO: optimize this so it's only one get_many call instead of one per select_related field # # XXX: this probably isn't handling depth beyond 1, didn't test even depth of 1 yet # for f in fields: # field = model._meta.get_field(f) # field_results = dict((r.id, r) for r in self._get_objects_for_keys(f.rel.to, [getattr(r, field.db_column) for r in results])) # for r in results: # setattr(r, f.name, field_results[getattr(r, field.db_column)]) return results def _get_objects_for_keys(self, model, keys): # First we fetch any keys that we can from the cache results = cache.get_many([get_cache_key_for_pk(model, k) for k in keys]).values() # Now we need to compute which keys weren't present in the cache result_ids = [obj.id for obj in results] missing = [key for key in keys if key not in result_ids] log.info("Orm cache queryset missing objects: %s(%s)", self.model.__name__, missing) # We no longer need to know what the keys were so turn it into a list results = list(results) objects = model._orm_manager.no_cache().filter(pk__in=missing) if objects: cache.set_many(dict([(obj.cache_key, obj) \ for obj in objects]), self.cache_timeout) results.extend(objects) # Do a simple len() lookup (maybe we shouldn't rely on it returning the right # number of objects cnt = len(missing) - len(objects) if cnt: raise CacheMissingWarning("%d objects missing in the database" % (cnt,)) return results def _result_iter(self): if not self.cache_queryset_enable: return super(CachedMixIn, self)._result_iter() if self.cache_fetch_by_id and not self.cache_fetch_by_id_queryset: return super(CachedMixIn, self)._result_iter() from django.db.models.sql import query try: cached_qs = cache.get(self.query_key()) if cached_qs: results = self._get_queryset_from_cache(cached_qs) self._result_cache = results self.from_cache = True self._iter = None log.info("Orm cache queryset hit for %s", self.model.__name__) else: log.info("Orm cache queryset missing for %s", self.model.__name__) except query.EmptyResultSet: pass return super(CachedMixIn, self)._result_iter() class CachedQuerySet(CachedMixIn, QuerySet): """ Main subclass of QuerySet that implements cache subsystem. """ def _fill_cache(self, num=None): super(CachedQuerySet, self)._fill_cache(num=num) if not self._iter and not self.from_cache and self.cache_queryset_enable: qs_prepared_for_cache = self._prepare_queryset_for_cache(self._result_cache) cache.set(self.query_key(), qs_prepared_for_cache, self.cache_timeout) cache.set_many(dict([(obj.cache_key, obj) \ for obj in self._result_cache]), self.cache_timeout) def values(self, *fields): return self._clone(klass=CachedValuesQuerySet, setup=True, _fields=fields) def values_list(self, *fields, **kwargs): flat = kwargs.pop('flat', False) if kwargs: raise TypeError('Unexpected keyword arguments to values_list: %s' % (kwargs.keys(),)) if flat and len(fields) > 1: raise TypeError("'flat' is not valid when values_list is called with more than one field.") return self._clone(klass=CachedValuesListQuerySet, setup=True, flat=flat, _fields=fields) def iterator(self): if self.cache_fetch_by_id: return self.fetch_by_id() return super(CachedMixIn, self).iterator() def fetch_by_id(self): if self.cache_fetch_by_id_queryset and self.cache_queryset_enable: vals = self.values_list('pk', *self.query.extra.keys()) else: vals = self.no_cache().values_list('pk', *self.query.extra.keys()) ids = [val[0] for val in vals] if self.cache_object_enable: keys = dict((get_cache_key_for_pk(self.model, i), i) for i in ids) cached = dict((k, v) for k, v in cache.get_many(keys).items() if v is not None) missed = [pk for key, pk in keys.iteritems() if key not in cached] new = {} if missed: objects = self.model._default_manager.filter(pk__in=missed) new = dict((get_cache_key_for_pk(self.model, o.pk), o) \ for o in objects) cache.set_many(new) objects = dict((o.pk, o) for o in cached.values() + new.values()) for pk in ids: yield objects[pk] else: qs = self.model._orm_manager.no_cache().filter(pk__in=ids) if connection.vendor == 'postgresql': with server_side_cursors(qs, itersize=10): for obj in qs.iterator(): yield obj else: for obj in qs.iterator(): yield obj class CachedValuesMixIn(object): cache_modifier = 'values' def _prepare_queryset_for_cache(self, queryset): return (self.model, queryset, (), 1) def _get_queryset_from_cache(self, cache_object): model, keys, fields, length = cache_object return keys def _fill_cache(self, num=None): super(CachedValuesMixIn, self)._fill_cache(num=num) if not self._iter and not self.from_cache and self.cache_queryset_enable: qs_prepared_for_cache = self._prepare_queryset_for_cache(self._result_cache) cache.set(self.query_key(), qs_prepared_for_cache, self.cache_timeout) def query_key(self): sql, params = self.query.get_compiler(using=self.db).as_sql() return "%s:qs:%s:table:%s:%s:flat=%s" % ( CACHE_KEY_PREFIX, self.cache_modifier, self.model._meta.db_table, hashlib.sha1(sql % params).hexdigest(), getattr(self,'flat', False), ) class CachedValuesQuerySet(CachedValuesMixIn, CachedMixIn, ValuesQuerySet): cache_modifier = 'values' class CachedValuesListQuerySet(CachedValuesMixIn, CachedMixIn, ValuesListQuerySet): cache_modifier = 'valueslist'
bsd-3-clause
dtaht/ceroubnt-3.3
scripts/flashing/jungo-image.py
758
7069
#!/usr/bin/env python # # Copyright 2008, 2009 (C) Jose Vasconcellos <jvasco@verizon.net> # # A script that can communicate with jungo-based routers # (such as MI424-WR, USR8200 and WRV54G) to backup the installed # firmware and replace the boot loader. # # Tested with Python 2.5 on Linux and Windows # """Usage: %s [options] <IP_address> [image.bin | url] Valid options: \t-h | --help: usage statement \t-d | --dump: create a flash dump \t-f | --file: use <filename> to store dump contents \t-u | --user: provide username (default admin) \t-p | --pass: provide password (default password1) \t --port: set port for http (default 8080) \t-q | --quiet: don't display unnecessary information \t-r | --reboot: reboot target on successful transfer \t-V | --version: display version information If no image (or url) is given, a flash dump is created. A built-in http server is used when an image file is provided. """ import os import sys import getopt import getpass import telnetlib import string import binascii import socket import thread import SocketServer import SimpleHTTPServer reboot = 0 HOST = "192.168.1.1" PORT = 8080 user = "admin" #password = getpass.getpass() password = "password1" proto = "http" url = "" imagefile = "" dumpfile = "" verbose = 1 do_dump = 0 dumplen = 0x10000 flashsize=4*1024*1024 #device="br0" device="ixp0" #################### def start_server(server): httpd = SocketServer.TCPServer((server,PORT),SimpleHTTPServer.SimpleHTTPRequestHandler) thread.start_new_thread(httpd.serve_forever,()) #################### def get_flash_size(): # make sure we don't have an A0 stepping tn.write("cat /proc/cpuinfo\n") buf = tn.read_until("Returned 0", 3) if not buf: print "Unable to obtain CPU information; make sure to not use A0 stepping!" elif buf.find('rev 0') > 0: print "Warning: IXP42x stepping A0 detected!" if imagefile or url: print "Error: No linux support for A0 stepping!" sys.exit(2) # now get flash size tn.write("cat /proc/mtd\n") buf = tn.read_until("Returned 0", 3) if buf: i = buf.find('mtd0:') if i > 0: return int(buf[i+6:].split()[0],16) # use different command tn.write("flash_layout\n") buf = tn.read_until("Returned 0", 3) i = buf.rfind('Range ') if i > 0: return int(buf[i+17:].split()[0],16) print "Can't determine flash size!" else: print "Unable to obtain flash size!" sys.exit(2) def image_dump(tn, dumpfile): if not dumpfile: tn.write("ver\n"); buf = tn.read_until("Returned 0",2) i = buf.find("Platform:") if i < 0: platform="jungo" else: line=buf[i+9:] i=line.find('\n') platform=line[:i].split()[-1] tn.write("rg_conf_print /dev/%s/mac\n" % device); buf = tn.read_until("Returned 0",3) i = buf.find("mac(") if i > 0: i += 4 else: print "No MAC address found! (use -f option)" sys.exit(1) dumpfile = "%s-%s.bin" % (platform, buf[i:i+17].replace(':','')) else: tn.write("\n") print "Dumping flash contents (%dMB) to %s" % (flashsize/1048576, dumpfile) f = open(dumpfile, "wb") t=flashsize/dumplen for addr in range(t): if verbose: sys.stdout.write('\r%d%%'%(100*addr/t)) sys.stdout.flush() tn.write("flash_dump -r 0x%x -l %d -4\n" % (addr*dumplen, dumplen)) tn.read_until("\n") count = addr*dumplen while 1: buf = tn.read_until("\n") if buf.strip() == "Returned 0": break s = buf.split() if s and s[0][-1] == ':': a=int(s[0][:-1],16) if a != count: print "Format error: %x != %x"%(a,count) sys.exit(2) count += 16 f.write(binascii.a2b_hex(string.join(s[1:],''))) tn.read_until(">",1) f.close() if verbose: print "" def telnet_option(sock,cmd,option): #print "Option: %d %d" % (ord(cmd), ord(option)) if cmd == telnetlib.DO: c=telnetlib.WILL elif cmd == telnetlib.WILL: c=telnetlib.DO sock.sendall(telnetlib.IAC + c + option) def telnet_timeout(): print "Fatal error: telnet timeout!" sys.exit(1) def usage(): print __doc__ % os.path.basename(sys.argv[0]) #################### try: opts, args = getopt.getopt(sys.argv[1:], "hdf:qp:P:rvV", \ ["help", "dump", "file=", "user=", "pass=", "port=", "quiet=", "reboot", "verbose", "version"]) except getopt.GetoptError: # print help information and exit: usage() sys.exit(1) for o, a in opts: if o in ("-h", "--help"): usage() sys.exit(1) elif o in ("-V", "--version"): print "%s: 0.11" % sys.argv[0] sys.exit(1) elif o in ("-d", "--no-dump"): do_dump = 1 elif o in ("-f", "--file"): dumpfile = a elif o in ("-u", "--user"): user = a elif o in ("-p", "--pass"): password = a elif o == "--port": PORT = int(a) elif o in ("-q", "--quiet"): verbose = 0 elif o in ("-r", "--reboot"): reboot = 1 elif o in ("-v", "--verbose"): verbose = 1 # make sure we have enough arguments if len(args) > 0: HOST = args[0] if len(args) == 2: if args[1].split(':')[0] in ("tftp", "http", "ftp"): url = args[1] else: imagefile = args[1] else: do_dump = 1; #################### # create a telnet session to the router try: tn = telnetlib.Telnet(HOST) except socket.error, msg: print "Unable to establish telnet session to %s: %s" % (HOST, msg) sys.exit(1) tn.set_option_negotiation_callback(telnet_option) buf = tn.read_until("Username: ", 3) if not buf: telnet_timeout() tn.write(user+"\n") if password: buf = tn.read_until("Password: ", 3) if not buf: telnet_timeout() tn.write(password+"\n") # wait for prompt buf = tn.read_until("> ", 3) if not buf: telnet_timeout() flashsize = get_flash_size() if do_dump: image_dump(tn, dumpfile) if imagefile or url: splitpath = os.path.split(imagefile) # create load command if url: cmd = "load -u %s -r 0\n" % (url) else: server = tn.get_socket().getsockname()[0] cmd = "load -u http://%s:%d/%s -r 0\n" % (server, PORT, splitpath[1]) if not os.access(imagefile, os.R_OK): print "File access error: %s" % (imagefile) sys.exit(3) # make sure we're in the directory where the image is located if splitpath[0]: os.chdir(splitpath[0]) start_server(server) if verbose: print "Unlocking flash..." tn.write("unlock 0 0x%x\n" % flashsize) buf = tn.read_until("Returned 0",5) if verbose: print "Writing new image..." print cmd, tn.write(cmd) buf = tn.read_until("Returned 0",10) # wait till the transfer completed buf = tn.read_until("Download completed successfully",20) if buf: print "Flash update complete!" if reboot: tn.write("reboot\n") print "Rebooting..." tn.write("exit\n") tn.close()
gpl-2.0
stefan-jonasson/home-assistant
homeassistant/components/sensor/upnp.py
15
2406
""" Support for UPnP Sensors (IGD). For more details about this platform, please refer to the documentation at https://home-assistant.io/components/sensor.upnp/ """ import logging from homeassistant.components.upnp import DATA_UPNP, UNITS from homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__) # sensor_type: [friendly_name, convert_unit, icon] SENSOR_TYPES = { 'byte_received': ['received bytes', True, 'mdi:server-network'], 'byte_sent': ['sent bytes', True, 'mdi:server-network'], 'packets_in': ['packets received', False, 'mdi:server-network'], 'packets_out': ['packets sent', False, 'mdi:server-network'], } def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the IGD sensors.""" upnp = hass.data[DATA_UPNP] unit = discovery_info['unit'] add_devices([ IGDSensor(upnp, t, unit if SENSOR_TYPES[t][1] else None) for t in SENSOR_TYPES], True) class IGDSensor(Entity): """Representation of a UPnP IGD sensor.""" def __init__(self, upnp, sensor_type, unit=""): """Initialize the IGD sensor.""" self._upnp = upnp self.type = sensor_type self.unit = unit self.unit_factor = UNITS[unit] if unit is not None else 1 self._name = 'IGD {}'.format(SENSOR_TYPES[sensor_type][0]) self._state = None @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the device.""" if self._state is None: return None return format(self._state / self.unit_factor, '.1f') @property def icon(self): """Icon to use in the frontend, if any.""" return SENSOR_TYPES[self.type][2] @property def unit_of_measurement(self): """Return the unit of measurement of this entity, if any.""" return self.unit def update(self): """Get the latest information from the IGD.""" if self.type == "byte_received": self._state = self._upnp.totalbytereceived() elif self.type == "byte_sent": self._state = self._upnp.totalbytesent() elif self.type == "packets_in": self._state = self._upnp.totalpacketreceived() elif self.type == "packets_out": self._state = self._upnp.totalpacketsent()
mit
GoodERPJeff/gooderp_addons
core/models/clean_data.py
6
2022
# -*- coding: utf-8 -*- from odoo import api, fields, models from odoo.exceptions import UserError class BusinessDataTable(models.Model): _name = 'business.data.table' _description = u'业务数据表' model = fields.Many2one('ir.model', u'需要清理的表') name = fields.Char(u'业务数据表名', required=True) clean_business_id = fields.Many2one( 'clean.business.data', string=u'清理数据对象') company_id = fields.Many2one( 'res.company', string=u'公司', change_default=True, default=lambda self: self.env['res.company']._company_default_get()) @api.onchange('model') def onchange_model(self): self.name = self.model and self.model.model class CleanBusinessData(models.Model): _name = 'clean.business.data' _description = u'清理记录' @api.model def _get_business_table_name(self): return self._get_business_table_name_impl() @api.model def _get_business_table_name_impl(self): ''' 默认取business.data.table 里的所有业务数据表清理 ''' return self.env['business.data.table'].search([]) need_clean_table = fields.One2many('business.data.table', 'clean_business_id', default=_get_business_table_name, string='要清理的业务数据表') company_id = fields.Many2one( 'res.company', string=u'公司', change_default=True, default=lambda self: self.env['res.company']._company_default_get()) @api.multi def remove_data(self): try: for line in self.need_clean_table: obj_name = line.name obj = self.env[obj_name] if obj._table_exist: sql = "TRUNCATE TABLE %s CASCADE " % obj._table self.env.cr.execute(sql) except Exception, e: raise UserError(e) return True
agpl-3.0
qPCR4vir/orange
Orange/testing/unit/tests/test_measures.py
6
1916
from Orange.testing import testing from Orange.testing.testing import datasets_driven, test_on_data from Orange.feature import scoring try: import unittest2 as unittest except: import unittest @datasets_driven(datasets=testing.CLASSIFICATION_DATASETS, preprocess=testing.DISCRETIZE_DOMAIN) class TestMeasureAttr_GainRatio(testing.MeasureAttributeTestCase): MEASURE = scoring.GainRatio() @datasets_driven(datasets=testing.CLASSIFICATION_DATASETS, preprocess=testing.DISCRETIZE_DOMAIN) class TestMeasureAttr_InfoGain(testing.MeasureAttributeTestCase): MEASURE = scoring.InfoGain() # TODO: Relevance, Cost @datasets_driven(datasets=testing.CLASSIFICATION_DATASETS, preprocess=testing.DISCRETIZE_DOMAIN) class TestMeasureAttr_Distance(testing.MeasureAttributeTestCase): MEASURE = scoring.Distance() @datasets_driven(datasets=testing.CLASSIFICATION_DATASETS, preprocess=testing.DISCRETIZE_DOMAIN) class TestMeasureAttr_MDL(testing.MeasureAttributeTestCase): MEASURE = scoring.MDL() @datasets_driven(datasets=testing.CLASSIFICATION_DATASETS + \ testing.REGRESSION_DATASETS) class TestMeasureAttr_Relief(testing.MeasureAttributeTestCase): MEASURE = scoring.Relief() @datasets_driven(datasets=testing.REGRESSION_DATASETS, preprocess=testing.DISCRETIZE_DOMAIN) class TestMeasureAttr_MSE(testing.MeasureAttributeTestCase): MEASURE = scoring.MSE() @datasets_driven(datasets=testing.CLASSIFICATION_DATASETS) class TestScoringUtils(testing.DataTestCase): @test_on_data def test_order_attrs(self, dataset): order = scoring.OrderAttributes(scoring.Relief()) orderes_attrs = order(dataset, 0) @test_on_data def test_score_all(self, dataset): scoring.score_all(dataset, score=scoring.Relief()) if __name__ == "__main__": unittest.main()
gpl-3.0
ChonchoFronto/sarah
lambda/numpy/distutils/fcompiler/sun.py
167
1690
from __future__ import division, absolute_import, print_function from numpy.distutils.ccompiler import simple_version_match from numpy.distutils.fcompiler import FCompiler compilers = ['SunFCompiler'] class SunFCompiler(FCompiler): compiler_type = 'sun' description = 'Sun or Forte Fortran 95 Compiler' # ex: # f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28 version_match = simple_version_match( start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95') executables = { 'version_cmd' : ["<F90>", "-V"], 'compiler_f77' : ["f90"], 'compiler_fix' : ["f90", "-fixed"], 'compiler_f90' : ["f90"], 'linker_so' : ["<F90>", "-Bdynamic", "-G"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } module_dir_switch = '-moddir=' module_include_switch = '-M' pic_flags = ['-xcode=pic32'] def get_flags_f77(self): ret = ["-ftrap=%none"] if (self.get_version() or '') >= '7': ret.append("-f77") else: ret.append("-fixed") return ret def get_opt(self): return ['-fast', '-dalign'] def get_arch(self): return ['-xtarget=generic'] def get_libraries(self): opt = [] opt.extend(['fsu', 'sunmath', 'mvec']) return opt def runtime_library_dir_option(self, dir): return '-R"%s"' % dir if __name__ == '__main__': from distutils import log log.set_verbosity(2) from numpy.distutils.fcompiler import new_fcompiler compiler = new_fcompiler(compiler='sun') compiler.customize() print(compiler.get_version())
mit
sivakuna-aap/superdesk-core
tests/celery_tests.py
3
1350
# -*- coding: utf-8; -*- # # This file is part of Superdesk. # # Copyright 2013, 2014 Sourcefabric z.u. and contributors. # # For the full copyright and license information, please see the # AUTHORS and LICENSE files distributed with this source code, or # at https://www.sourcefabric.org/superdesk/license from superdesk.tests import TestCase from superdesk.celery_app import try_cast, loads from bson import ObjectId from datetime import datetime from eve.utils import date_to_str class CeleryTestCase(TestCase): _id = ObjectId('528de7b03b80a13eefc5e610') def test_cast_objectid(self): self.assertEqual(try_cast(str(self._id)), self._id) def test_cast_datetime(self): date = datetime(2012, 12, 12, 12, 12, 12, 0) with self.app.app_context(): s = date_to_str(date) self.assertEqual(try_cast(s).day, date.day) def test_loads_args(self): s = b'{"args": [{"_id": "528de7b03b80a13eefc5e610", "_updated": "2014-09-10T14:31:09+0000"}]}' o = loads(s) self.assertEqual(o['args'][0]['_id'], self._id) self.assertIsInstance(o['args'][0]['_updated'], datetime) def test_loads_kwargs(self): s = b'''{"kwargs": "{}", "pid": 24998, "eta": null}''' o = loads(s) self.assertEqual({}, o['kwargs']) self.assertIsNone(o['eta'])
agpl-3.0
zwimer/IntroToOpenSourceLabs
Lab6/markdown.py/markdown_adapter.py
7
1316
""" Software API adapter for markdown.py This module provides a function based API to markdown.py since markdown.py only provides a CLI. """ from subprocess import Popen, PIPE, STDOUT from tempfile import NamedTemporaryFile import os # This is here so there's one line to change if I want to swap # out a different script, such as markdown.pl _interpreter_and_script = ['python', 'markdown.py'] def run_markdown(input_text): """ The default method when we don't care which method to use. """ return run_markdown_pipe(input_text) def run_markdown_pipe(input_text): """ Simulate: echo 'some input' | python markdown.py """ pipe = Popen(_interpreter_and_script, stdout=PIPE, stdin=PIPE, stderr=STDOUT) output = pipe.communicate(input=input_text)[0] return output.rstrip() def run_markdown_file(input_text): """ Simulate: python markdown.py fileName """ temp_file = NamedTemporaryFile(delete=False) temp_file.write(input_text) temp_file.close() interp_script_and_fileName = _interpreter_and_script interp_script_and_fileName.append(temp_file.name) pipe = Popen(interp_script_and_fileName, stdout=PIPE, stderr=STDOUT) output = pipe.communicate()[0] os.unlink(temp_file.name) return output.rstrip()
mit
sss/calibre-at-bzr
src/calibre/ebooks/rtf/rtfml.py
4
11429
# -*- coding: utf-8 -*- __license__ = 'GPL 3' __copyright__ = '2009, John Schember <john@nachtimwald.com>' __docformat__ = 'restructuredtext en' ''' Transform OEB content into RTF markup ''' import os import re import cStringIO from lxml import etree from calibre.ebooks.metadata import authors_to_string from calibre.utils.magick.draw import save_cover_data_to, identify_data TAGS = { 'b': '\\b', 'del': '\\deleted', 'h1': '\\s1 \\afs32', 'h2': '\\s2 \\afs28', 'h3': '\\s3 \\afs28', 'h4': '\\s4 \\afs23', 'h5': '\\s5 \\afs23', 'h6': '\\s6 \\afs21', 'i': '\\i', 'li': '\t', 'p': '\t', 'sub': '\\sub', 'sup': '\\super', 'u': '\\ul', } SINGLE_TAGS = { 'br': '\n{\\line }\n', } STYLES = [ ('font-weight', {'bold': '\\b', 'bolder': '\\b'}), ('font-style', {'italic': '\\i'}), ('text-align', {'center': '\\qc', 'left': '\\ql', 'right': '\\qr'}), ('text-decoration', {'line-through': '\\strike', 'underline': '\\ul'}), ] BLOCK_TAGS = [ 'div', 'p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'li', ] BLOCK_STYLES = [ 'block' ] ''' TODO: * Tables * Fonts ''' def txt2rtf(text): # Escape { and } in the text. text = text.replace('{', r'\'7b') text = text.replace('}', r'\'7d') if not isinstance(text, unicode): return text buf = cStringIO.StringIO() for x in text: val = ord(x) if val == 160: buf.write('\\~') elif val <= 127: buf.write(x) else: c = r'\u{0:d}?'.format(val) buf.write(c) return buf.getvalue() class RTFMLizer(object): def __init__(self, log): self.log = log def extract_content(self, oeb_book, opts): self.log.info('Converting XHTML to RTF markup...') self.oeb_book = oeb_book self.opts = opts return self.mlize_spine() def mlize_spine(self): from calibre.ebooks.oeb.base import XHTML from calibre.ebooks.oeb.stylizer import Stylizer output = self.header() if 'titlepage' in self.oeb_book.guide: href = self.oeb_book.guide['titlepage'].href item = self.oeb_book.manifest.hrefs[href] if item.spine_position is None: stylizer = Stylizer(item.data, item.href, self.oeb_book, self.opts, self.opts.output_profile) self.currently_dumping_item = item output += self.dump_text(item.data.find(XHTML('body')), stylizer) output += '{\\page }' for item in self.oeb_book.spine: self.log.debug('Converting %s to RTF markup...' % item.href) content = unicode(etree.tostring(item.data, encoding=unicode)) content = self.remove_newlines(content) content = self.remove_tabs(content) content = etree.fromstring(content) stylizer = Stylizer(content, item.href, self.oeb_book, self.opts, self.opts.output_profile) self.currently_dumping_item = item output += self.dump_text(content.find(XHTML('body')), stylizer) output += '{\\page }' output += self.footer() output = self.insert_images(output) output = self.clean_text(output) return output def remove_newlines(self, text): self.log.debug('\tRemove newlines for processing...') text = text.replace('\r\n', ' ') text = text.replace('\n', ' ') text = text.replace('\r', ' ') return text def remove_tabs(self, text): self.log.debug('\Replace tabs with space for processing...') text = text.replace('\t', ' ') return text def header(self): header = u'{\\rtf1{\\info{\\title %s}{\\author %s}}\\ansi\\ansicpg1252\\deff0\\deflang1033\n' % (self.oeb_book.metadata.title[0].value, authors_to_string([x.value for x in self.oeb_book.metadata.creator])) return header + \ '{\\fonttbl{\\f0\\froman\\fprq2\\fcharset128 Times New Roman;}{\\f1\\froman\\fprq2\\fcharset128 Times New Roman;}{\\f2\\fswiss\\fprq2\\fcharset128 Arial;}{\\f3\\fnil\\fprq2\\fcharset128 Arial;}{\\f4\\fnil\\fprq2\\fcharset128 MS Mincho;}{\\f5\\fnil\\fprq2\\fcharset128 Tahoma;}{\\f6\\fnil\\fprq0\\fcharset128 Tahoma;}}\n' \ '{\\stylesheet{\\ql \\li0\\ri0\\nowidctlpar\\wrapdefault\\faauto\\rin0\\lin0\\itap0 \\rtlch\\fcs1 \\af25\\afs24\\alang1033 \\ltrch\\fcs0 \\fs24\\lang1033\\langfe255\\cgrid\\langnp1033\\langfenp255 \\snext0 Normal;}\n' \ '{\\s1\\ql \\li0\\ri0\\sb240\\sa120\\keepn\\nowidctlpar\\wrapdefault\\faauto\\outlinelevel0\\rin0\\lin0\\itap0 \\rtlch\\fcs1 \\ab\\af0\\afs32\\alang1033 \\ltrch\\fcs0 \\b\\fs32\\lang1033\\langfe255\\loch\\f1\\hich\\af1\\dbch\\af26\\cgrid\\langnp1033\\langfenp255 \\sbasedon15 \\snext16 \\slink21 heading 1;}\n' \ '{\\s2\\ql \\li0\\ri0\\sb240\\sa120\\keepn\\nowidctlpar\\wrapdefault\\faauto\\outlinelevel1\\rin0\\lin0\\itap0 \\rtlch\\fcs1 \\ab\\ai\\af0\\afs28\\alang1033 \\ltrch\\fcs0 \\b\\i\\fs28\\lang1033\\langfe255\\loch\\f1\\hich\\af1\\dbch\\af26\\cgrid\\langnp1033\\langfenp255 \\sbasedon15 \\snext16 \\slink22 heading 2;}\n' \ '{\\s3\\ql \\li0\\ri0\\sb240\\sa120\\keepn\\nowidctlpar\\wrapdefault\\faauto\\outlinelevel2\\rin0\\lin0\\itap0 \\rtlch\\fcs1 \\ab\\af0\\afs28\\alang1033 \\ltrch\\fcs0 \\b\\fs28\\lang1033\\langfe255\\loch\\f1\\hich\\af1\\dbch\\af26\\cgrid\\langnp1033\\langfenp255 \\sbasedon15 \\snext16 \\slink23 heading 3;}\n' \ '{\\s4\\ql \\li0\\ri0\\sb240\\sa120\\keepn\\nowidctlpar\\wrapdefault\\faauto\\outlinelevel3\\rin0\\lin0\\itap0 \\rtlch\\fcs1 \\ab\\ai\\af0\\afs23\\alang1033 \\ltrch\\fcs0\\b\\i\\fs23\\lang1033\\langfe255\\loch\\f1\\hich\\af1\\dbch\\af26\\cgrid\\langnp1033\\langfenp255 \\sbasedon15 \\snext16 \\slink24 heading 4;}\n' \ '{\\s5\\ql \\li0\\ri0\\sb240\\sa120\\keepn\\nowidctlpar\\wrapdefault\\faauto\\outlinelevel4\\rin0\\lin0\\itap0 \\rtlch\\fcs1 \\ab\\af0\\afs23\\alang1033 \\ltrch\\fcs0 \\b\\fs23\\lang1033\\langfe255\\loch\\f1\\hich\\af1\\dbch\\af26\\cgrid\\langnp1033\\langfenp255 \\sbasedon15 \\snext16 \\slink25 heading 5;}\n' \ '{\\s6\\ql \\li0\\ri0\\sb240\\sa120\\keepn\\nowidctlpar\\wrapdefault\\faauto\\outlinelevel5\\rin0\\lin0\\itap0 \\rtlch\\fcs1 \\ab\\af0\\afs21\\alang1033 \\ltrch\\fcs0 \\b\\fs21\\lang1033\\langfe255\\loch\\f1\\hich\\af1\\dbch\\af26\\cgrid\\langnp1033\\langfenp255 \\sbasedon15 \\snext16 \\slink26 heading 6;}}\n' def footer(self): return ' }' def insert_images(self, text): from calibre.ebooks.oeb.base import OEB_RASTER_IMAGES for item in self.oeb_book.manifest: if item.media_type in OEB_RASTER_IMAGES: src = item.href try: data, width, height = self.image_to_hexstring(item.data) except: self.log.warn('Image %s is corrupted, ignoring'%item.href) repl = '\n\n' else: repl = '\n\n{\\*\\shppict{\\pict\\jpegblip\\picw%i\\pich%i \n%s\n}}\n\n' % (width, height, data) text = text.replace('SPECIAL_IMAGE-%s-REPLACE_ME' % src, repl) return text def image_to_hexstring(self, data): data = save_cover_data_to(data, 'cover.jpg', return_data=True) width, height = identify_data(data)[:2] raw_hex = '' for char in data: raw_hex += hex(ord(char)).replace('0x', '').rjust(2, '0') # Images must be broken up so that they are no longer than 129 chars # per line hex_string = '' col = 1 for char in raw_hex: if col == 129: hex_string += '\n' col = 1 col += 1 hex_string += char return (hex_string, width, height) def clean_text(self, text): # Remove excessive newlines text = re.sub('%s{3,}' % os.linesep, '%s%s' % (os.linesep, os.linesep), text) # Remove excessive spaces text = re.sub('[ ]{2,}', ' ', text) text = re.sub('\t{2,}', '\t', text) text = re.sub('\t ', '\t', text) # Remove excessive line breaks text = re.sub(r'(\{\\line \}\s*){3,}', r'{\\line }{\\line }', text) # Remove non-breaking spaces text = text.replace(u'\xa0', ' ') text = text.replace('\n\r', '\n') return text def dump_text(self, elem, stylizer, tag_stack=[]): from calibre.ebooks.oeb.base import (XHTML_NS, namespace, barename, urlnormalize) if not isinstance(elem.tag, basestring) \ or namespace(elem.tag) != XHTML_NS: p = elem.getparent() if p is not None and isinstance(p.tag, basestring) and namespace(p.tag) == XHTML_NS \ and elem.tail: return elem.tail return u'' text = u'' style = stylizer.style(elem) if style['display'] in ('none', 'oeb-page-head', 'oeb-page-foot') \ or style['visibility'] == 'hidden': if hasattr(elem, 'tail') and elem.tail: return elem.tail return u'' tag = barename(elem.tag) tag_count = 0 # Are we in a paragraph block? if tag in BLOCK_TAGS or style['display'] in BLOCK_STYLES: if 'block' not in tag_stack: tag_count += 1 tag_stack.append('block') # Process tags that need special processing and that do not have inner # text. Usually these require an argument if tag == 'img': src = elem.get('src') if src: src = urlnormalize(self.currently_dumping_item.abshref(src)) block_start = '' block_end = '' if 'block' not in tag_stack: block_start = '{\\par\\pard\\hyphpar ' block_end = '}' text += '%s SPECIAL_IMAGE-%s-REPLACE_ME %s' % (block_start, src, block_end) single_tag = SINGLE_TAGS.get(tag, None) if single_tag: text += single_tag rtf_tag = TAGS.get(tag, None) if rtf_tag and rtf_tag not in tag_stack: tag_count += 1 text += '{%s\n' % rtf_tag tag_stack.append(rtf_tag) # Processes style information for s in STYLES: style_tag = s[1].get(style[s[0]], None) if style_tag and style_tag not in tag_stack: tag_count += 1 text += '{%s\n' % style_tag tag_stack.append(style_tag) # Proccess tags that contain text. if hasattr(elem, 'text') and elem.text: text += txt2rtf(elem.text) for item in elem: text += self.dump_text(item, stylizer, tag_stack) for i in range(0, tag_count): end_tag = tag_stack.pop() if end_tag != 'block': if tag in BLOCK_TAGS: text += u'\\par\\pard\\plain\\hyphpar}' else: text += u'}' if hasattr(elem, 'tail') and elem.tail: if 'block' in tag_stack: text += '%s' % txt2rtf(elem.tail) else: text += '{\\par\\pard\\hyphpar %s}' % txt2rtf(elem.tail) return text
gpl-3.0
sunlianqiang/kbengine
kbe/res/scripts/common/Lib/test/testcodec.py
203
1046
""" Test Codecs (used by test_charmapcodec) Written by Marc-Andre Lemburg (mal@lemburg.com). (c) Copyright 2000 Guido van Rossum. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_map) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_map) class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return (Codec().encode,Codec().decode,StreamReader,StreamWriter) ### Decoding Map decoding_map = codecs.make_identity_dict(range(256)) decoding_map.update({ 0x78: "abc", # 1-n decoding mapping b"abc": 0x0078,# 1-n encoding mapping 0x01: None, # decoding mapping to <undefined> 0x79: "", # decoding mapping to <remove character> }) ### Encoding Map encoding_map = {} for k,v in decoding_map.items(): encoding_map[v] = k
lgpl-3.0
devrandom/pycoin
tests/parse_block_test.py
22
2430
#!/usr/bin/env python import io import unittest from pycoin.block import Block from pycoin.serialize import b2h_rev, h2b class BlockTest(unittest.TestCase): def test_block(self): expected_checksum = '0000000000089F7910F6755C10EA2795EC368A29B435D80770AD78493A6FECF1'.lower() block_data = h2b('010000007480150B299A16BBCE5CCDB1D1BBC65CFC5893B01E6619107C55200000000000790'\ '0A2B203D24C69710AB6A94BEB937E1B1ADD64C2327E268D8C3E5F8B41DBED8796974CED66471B204C3247030'\ '1000000010000000000000000000000000000000000000000000000000000000000000000FFFFFFFF0804ED6'\ '6471B024001FFFFFFFF0100F2052A010000004341045FEE68BAB9915C4EDCA4C680420ED28BBC369ED84D48A'\ 'C178E1F5F7EEAC455BBE270DABA06802145854B5E29F0A7F816E2DF906E0FE4F6D5B4C9B92940E4F0EDAC000'\ '000000100000001F7B30415D1A7BF6DB91CB2A272767C6799D721A4178AA328E0D77C199CB3B57F010000008'\ 'A4730440220556F61B84F16E637836D2E74B8CB784DE40C28FE3EF93CCB7406504EE9C7CAA5022043BD4749D'\ '4F3F7F831AC696748AD8D8E79AEB4A1C539E742AA3256910FC88E170141049A414D94345712893A828DE57B4C'\ '2054E2F596CDCA9D0B4451BA1CA5F8847830B9BE6E196450E6ABB21C540EA31BE310271AA00A49ED0BA930743'\ 'D1ED465BAD0FFFFFFFF0200E1F505000000001976A914529A63393D63E980ACE6FA885C5A89E4F27AA08988AC'\ 'C0ADA41A000000001976A9145D17976537F308865ED533CCCFDD76558CA3C8F088AC000000000100000001651'\ '48D894D3922EF5FFDA962BE26016635C933D470C8B0AB7618E869E3F70E3C000000008B48304502207F5779EB'\ 'F4834FEAEFF4D250898324EB5C0833B16D7AF4C1CB0F66F50FCF6E85022100B78A65377FD018281E77285EFC3'\ '1E5B9BA7CB7E20E015CF6B7FA3E4A466DD195014104072AD79E0AA38C05FA33DD185F84C17F611E58A8658CE'\ '996D8B04395B99C7BE36529CAB7606900A0CD5A7AEBC6B233EA8E0FE60943054C63620E05E5B85F0426FFFFF'\ 'FFF02404B4C00000000001976A914D4CAA8447532CA8EE4C80A1AE1D230A01E22BFDB88AC8013A0DE0100000'\ '01976A9149661A79AE1F6D487AF3420C13E649D6DF3747FC288AC00000000') # try to parse a block block = Block.parse(io.BytesIO(block_data)) print(block) assert b2h_rev(block.hash()) == expected_checksum for tx in block.txs: print(tx) for t in tx.txs_in: print(" %s" % t) for t in tx.txs_out: print(" %s" % t) block.check_merkle_hash() def main(): unittest.main() if __name__ == "__main__": main()
mit
arborh/tensorflow
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/shapes_for_arguments.py
21
1840
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # RUN: %p/shapes_for_arguments | FileCheck %s # pylint: disable=missing-docstring,line-too-long from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v2 as tf from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common class TestModule(tf.Module): # Check that we get shapes annotated on function arguments. # # Besides checking the shape on the function input argument, this test also # checks that the shape on the input argument is propagated to the return # value. # We eventually want to move the shape inference to a pass separate from # the initial import, in which case that aspect of this test doesn't make much # sense and will be superceded by MLIR->MLIR shape inference tests. # # CHECK: func {{@[a-zA-Z_0-9]+}}(%arg0: tensor<f32> {{.*}}) -> (tensor<f32> {{.*}}) # CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["some_function"] @tf.function(input_signature=[tf.TensorSpec([], tf.float32)]) def some_function(self, x): return x if __name__ == '__main__': common.do_test(TestModule)
apache-2.0
chineyting/project4-Info3180
server/lib/flask/sessions.py
348
12882
# -*- coding: utf-8 -*- """ flask.sessions ~~~~~~~~~~~~~~ Implements cookie based sessions based on itsdangerous. :copyright: (c) 2012 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import uuid import hashlib from datetime import datetime from werkzeug.http import http_date, parse_date from werkzeug.datastructures import CallbackDict from . import Markup, json from ._compat import iteritems, text_type from itsdangerous import URLSafeTimedSerializer, BadSignature def total_seconds(td): return td.days * 60 * 60 * 24 + td.seconds class SessionMixin(object): """Expands a basic dictionary with an accessors that are expected by Flask extensions and users for the session. """ def _get_permanent(self): return self.get('_permanent', False) def _set_permanent(self, value): self['_permanent'] = bool(value) #: this reflects the ``'_permanent'`` key in the dict. permanent = property(_get_permanent, _set_permanent) del _get_permanent, _set_permanent #: some session backends can tell you if a session is new, but that is #: not necessarily guaranteed. Use with caution. The default mixin #: implementation just hardcodes `False` in. new = False #: for some backends this will always be `True`, but some backends will #: default this to false and detect changes in the dictionary for as #: long as changes do not happen on mutable structures in the session. #: The default mixin implementation just hardcodes `True` in. modified = True class TaggedJSONSerializer(object): """A customized JSON serializer that supports a few extra types that we take for granted when serializing (tuples, markup objects, datetime). """ def dumps(self, value): def _tag(value): if isinstance(value, tuple): return {' t': [_tag(x) for x in value]} elif isinstance(value, uuid.UUID): return {' u': value.hex} elif callable(getattr(value, '__html__', None)): return {' m': text_type(value.__html__())} elif isinstance(value, list): return [_tag(x) for x in value] elif isinstance(value, datetime): return {' d': http_date(value)} elif isinstance(value, dict): return dict((k, _tag(v)) for k, v in iteritems(value)) elif isinstance(value, str): try: return text_type(value) except UnicodeError: raise UnexpectedUnicodeError(u'A byte string with ' u'non-ASCII data was passed to the session system ' u'which can only store unicode strings. Consider ' u'base64 encoding your string (String was %r)' % value) return value return json.dumps(_tag(value), separators=(',', ':')) def loads(self, value): def object_hook(obj): if len(obj) != 1: return obj the_key, the_value = next(iteritems(obj)) if the_key == ' t': return tuple(the_value) elif the_key == ' u': return uuid.UUID(the_value) elif the_key == ' m': return Markup(the_value) elif the_key == ' d': return parse_date(the_value) return obj return json.loads(value, object_hook=object_hook) session_json_serializer = TaggedJSONSerializer() class SecureCookieSession(CallbackDict, SessionMixin): """Baseclass for sessions based on signed cookies.""" def __init__(self, initial=None): def on_update(self): self.modified = True CallbackDict.__init__(self, initial, on_update) self.modified = False class NullSession(SecureCookieSession): """Class used to generate nicer error messages if sessions are not available. Will still allow read-only access to the empty session but fail on setting. """ def _fail(self, *args, **kwargs): raise RuntimeError('the session is unavailable because no secret ' 'key was set. Set the secret_key on the ' 'application to something unique and secret.') __setitem__ = __delitem__ = clear = pop = popitem = \ update = setdefault = _fail del _fail class SessionInterface(object): """The basic interface you have to implement in order to replace the default session interface which uses werkzeug's securecookie implementation. The only methods you have to implement are :meth:`open_session` and :meth:`save_session`, the others have useful defaults which you don't need to change. The session object returned by the :meth:`open_session` method has to provide a dictionary like interface plus the properties and methods from the :class:`SessionMixin`. We recommend just subclassing a dict and adding that mixin:: class Session(dict, SessionMixin): pass If :meth:`open_session` returns `None` Flask will call into :meth:`make_null_session` to create a session that acts as replacement if the session support cannot work because some requirement is not fulfilled. The default :class:`NullSession` class that is created will complain that the secret key was not set. To replace the session interface on an application all you have to do is to assign :attr:`flask.Flask.session_interface`:: app = Flask(__name__) app.session_interface = MySessionInterface() .. versionadded:: 0.8 """ #: :meth:`make_null_session` will look here for the class that should #: be created when a null session is requested. Likewise the #: :meth:`is_null_session` method will perform a typecheck against #: this type. null_session_class = NullSession #: A flag that indicates if the session interface is pickle based. #: This can be used by flask extensions to make a decision in regards #: to how to deal with the session object. #: #: .. versionadded:: 0.10 pickle_based = False def make_null_session(self, app): """Creates a null session which acts as a replacement object if the real session support could not be loaded due to a configuration error. This mainly aids the user experience because the job of the null session is to still support lookup without complaining but modifications are answered with a helpful error message of what failed. This creates an instance of :attr:`null_session_class` by default. """ return self.null_session_class() def is_null_session(self, obj): """Checks if a given object is a null session. Null sessions are not asked to be saved. This checks if the object is an instance of :attr:`null_session_class` by default. """ return isinstance(obj, self.null_session_class) def get_cookie_domain(self, app): """Helpful helper method that returns the cookie domain that should be used for the session cookie if session cookies are used. """ if app.config['SESSION_COOKIE_DOMAIN'] is not None: return app.config['SESSION_COOKIE_DOMAIN'] if app.config['SERVER_NAME'] is not None: # chop of the port which is usually not supported by browsers rv = '.' + app.config['SERVER_NAME'].rsplit(':', 1)[0] # Google chrome does not like cookies set to .localhost, so # we just go with no domain then. Flask documents anyways that # cross domain cookies need a fully qualified domain name if rv == '.localhost': rv = None # If we infer the cookie domain from the server name we need # to check if we are in a subpath. In that case we can't # set a cross domain cookie. if rv is not None: path = self.get_cookie_path(app) if path != '/': rv = rv.lstrip('.') return rv def get_cookie_path(self, app): """Returns the path for which the cookie should be valid. The default implementation uses the value from the SESSION_COOKIE_PATH`` config var if it's set, and falls back to ``APPLICATION_ROOT`` or uses ``/`` if it's `None`. """ return app.config['SESSION_COOKIE_PATH'] or \ app.config['APPLICATION_ROOT'] or '/' def get_cookie_httponly(self, app): """Returns True if the session cookie should be httponly. This currently just returns the value of the ``SESSION_COOKIE_HTTPONLY`` config var. """ return app.config['SESSION_COOKIE_HTTPONLY'] def get_cookie_secure(self, app): """Returns True if the cookie should be secure. This currently just returns the value of the ``SESSION_COOKIE_SECURE`` setting. """ return app.config['SESSION_COOKIE_SECURE'] def get_expiration_time(self, app, session): """A helper method that returns an expiration date for the session or `None` if the session is linked to the browser session. The default implementation returns now + the permanent session lifetime configured on the application. """ if session.permanent: return datetime.utcnow() + app.permanent_session_lifetime def open_session(self, app, request): """This method has to be implemented and must either return `None` in case the loading failed because of a configuration error or an instance of a session object which implements a dictionary like interface + the methods and attributes on :class:`SessionMixin`. """ raise NotImplementedError() def save_session(self, app, session, response): """This is called for actual sessions returned by :meth:`open_session` at the end of the request. This is still called during a request context so if you absolutely need access to the request you can do that. """ raise NotImplementedError() class SecureCookieSessionInterface(SessionInterface): """The default session interface that stores sessions in signed cookies through the :mod:`itsdangerous` module. """ #: the salt that should be applied on top of the secret key for the #: signing of cookie based sessions. salt = 'cookie-session' #: the hash function to use for the signature. The default is sha1 digest_method = staticmethod(hashlib.sha1) #: the name of the itsdangerous supported key derivation. The default #: is hmac. key_derivation = 'hmac' #: A python serializer for the payload. The default is a compact #: JSON derived serializer with support for some extra Python types #: such as datetime objects or tuples. serializer = session_json_serializer session_class = SecureCookieSession def get_signing_serializer(self, app): if not app.secret_key: return None signer_kwargs = dict( key_derivation=self.key_derivation, digest_method=self.digest_method ) return URLSafeTimedSerializer(app.secret_key, salt=self.salt, serializer=self.serializer, signer_kwargs=signer_kwargs) def open_session(self, app, request): s = self.get_signing_serializer(app) if s is None: return None val = request.cookies.get(app.session_cookie_name) if not val: return self.session_class() max_age = total_seconds(app.permanent_session_lifetime) try: data = s.loads(val, max_age=max_age) return self.session_class(data) except BadSignature: return self.session_class() def save_session(self, app, session, response): domain = self.get_cookie_domain(app) path = self.get_cookie_path(app) if not session: if session.modified: response.delete_cookie(app.session_cookie_name, domain=domain, path=path) return httponly = self.get_cookie_httponly(app) secure = self.get_cookie_secure(app) expires = self.get_expiration_time(app, session) val = self.get_signing_serializer(app).dumps(dict(session)) response.set_cookie(app.session_cookie_name, val, expires=expires, httponly=httponly, domain=domain, path=path, secure=secure) from flask.debughelpers import UnexpectedUnicodeError
apache-2.0
pfschwartz/openelisglobal-core
liquibase/OE2.8/testCatalogCI_LNSP/scripts/sampleType.py
18
1751
#!/usr/bin/env python # -*- coding: utf-8 -*- existing_types = [] sample_types = [] sample_type_file = open("sampleType.txt") existing_types_file = open("currentSampleTypes.txt") results = open("output/sampleTypeOutput.txt", 'w') def write_massive_type_of_sample(): results.write("\nPaste following in SampleType.sql\n\n") sql_insert = "INSERT INTO type_of_sample( id, description, domain, lastupdated, local_abbrev, display_key, is_active )\n\tVALUES (" for line in sample_types: if line not in existing_types and len(line) > 1: results.write(sql_insert) results.write( " nextval( 'type_of_sample_seq' ) , '" + line + "','H', now() , '" + line[:10] + "', 'sample.type." + line.split()[0] + "', 'Y');\n" ); def write_sample_type_order(): results.write("\nPaste following in TypeOrder.sql\n\n") order = 10 for line in sample_types: results.write("update clinlims.type_of_sample set sort_order=" + str(order) + " where description ILIKE '" + line + "';\n") order = order + 10 def write_inactive_list(): results.write('\nPaste following in TestCatalogUpdate.xml in the set inactive list\n\n') for line in existing_types: if line not in sample_types: results.write(line + ', ') for line in sample_type_file: if len(line) > 1: if line.strip() not in sample_types: sample_types.append(line.strip()) for line in existing_types_file: if len(line) > 0: existing_types.append(line.strip()) existing_types_file.close() write_massive_type_of_sample() write_sample_type_order() write_inactive_list() print "Done check file sampleTypeOutput.txt" results.close();
mpl-2.0
andialbrecht/runsqlrun
rsr/app.py
1
4361
import os import json from functools import partial import xdg.BaseDirectory from gi.repository import Gio, Gtk from rsr import config from rsr.connections.manager import ConnectionManager from rsr.mainwin import MainWindow from rsr.preferences import PreferencesDialog class Application(Gtk.Application): def __init__(self, args): super(Application, self).__init__( application_id='org.runsqlrun', flags=Gio.ApplicationFlags.FLAGS_NONE) self.args = args self.win = None def build_app_menu(self): builder = Gtk.Builder() builder.add_from_resource('/org/runsqlrun/appmenu.ui') menu = builder.get_object('appmenu') self.set_app_menu(menu) newEditorAction = Gio.SimpleAction.new('editor-new', None) newEditorAction.connect('activate', self.on_new_editor) self.add_action(newEditorAction) closeEditorAction = Gio.SimpleAction.new('editor-close', None) closeEditorAction.connect('activate', self.on_close_editor) self.add_action(closeEditorAction) preferencesAction = Gio.SimpleAction.new('preferences', None) preferencesAction.connect('activate', self.on_show_preferences) self.add_action(preferencesAction) quitAction = Gio.SimpleAction.new('quit', None) quitAction.connect('activate', self.on_quit) self.add_action(quitAction) def on_new_editor(self, *args): self.win.docview.add_worksheet() def on_close_editor(self, *args): self.win.docview.close_current_editor() def on_show_preferences(self, *args): dlg = PreferencesDialog(self) dlg.run() dlg.destroy() def on_quit(self, *args): self.win.destroy() def do_window_removed(self, window): self.connection_manager.shutdown() state = window.save_state() statefile = os.path.join( xdg.BaseDirectory.save_config_path('runsqlrun'), 'state') with open(statefile, 'w') as f: json.dump(state, f) self.config.save() Gtk.Application.do_window_removed(self, window) def do_startup(self): Gtk.Application.do_startup(self) self.build_app_menu() def _generic_callback(self, group, callback, cbargs, *args): if group == 'editor': cb = self.win.docview.get_current_editor() else: cb = self for part in callback.split('.'): cb = getattr(cb, part) cb(*cbargs) return True def on_use_dark_theme(self, *args): Gtk.Settings.get_default().set_property( 'gtk-application-prefer-dark-theme', self.config.ui_dark_theme) def do_activate(self): self.connection_manager = ConnectionManager(self) self.config = config.load() self.config.connect('notify::ui-dark-theme', self.on_use_dark_theme) self.on_use_dark_theme() self.action_groups = {} accel_group = Gtk.AccelGroup() commands = self.config.get_commands() for group_key in commands: group = Gio.SimpleActionGroup() self.action_groups[group_key] = group data = commands[group_key] for action_key in data['actions']: action_data = data['actions'][action_key] action = Gio.SimpleAction.new( '{}_{}'.format(group_key, action_key), None) callback = partial(self._generic_callback, group_key, action_data['callback'], action_data.get('args', ())) action.connect('activate', callback) group.insert(action) key, mod = Gtk.accelerator_parse(action_data['shortcut']) accel_group.connect(key, mod, Gtk.AccelFlags.VISIBLE, callback) self.add_action(action) if self.win is None: self.win = MainWindow(self) statefile = os.path.join( xdg.BaseDirectory.save_config_path('runsqlrun'), 'state') if os.path.isfile(statefile): with open(statefile) as f: state = json.load(f) self.win.restore_state(state) self.win.add_accel_group(accel_group) self.win.present()
mit
tmcdonnell87/bananas
config/settings/production.py
1
5856
# -*- coding: utf-8 -*- """ Production Configurations - Use Amazon's S3 for storing static files and uploaded media - Use mailgun to send emails - Use Redis for cache """ from __future__ import absolute_import, unicode_literals from boto.s3.connection import OrdinaryCallingFormat from django.utils import six from .base import ( # Import by name for pep8 DATABASES, INSTALLED_APPS, MIDDLEWARE, TEMPLATES, env ) from .base import * # noqa # SECRET CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key # Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ SECRET_KEY = env('DJANGO_SECRET_KEY') # This ensures that Django will be able to detect a secure connection # properly on Heroku. SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') # Use Whitenoise to serve static files # See: https://whitenoise.readthedocs.io/ WHITENOISE_MIDDLEWARE = ['whitenoise.middleware.WhiteNoiseMiddleware', ] MIDDLEWARE = WHITENOISE_MIDDLEWARE + MIDDLEWARE # SECURITY CONFIGURATION # ------------------------------------------------------------------------------ # See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security # and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy # set this to 60 seconds and then to 518400 when you can prove it works SECURE_HSTS_SECONDS = 60 SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool( 'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True) SECURE_CONTENT_TYPE_NOSNIFF = env.bool( 'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True) SECURE_BROWSER_XSS_FILTER = True # SESSION_COOKIE_SECURE = True SESSION_COOKIE_HTTPONLY = True SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True) # CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY = True X_FRAME_OPTIONS = 'DENY' # SITE CONFIGURATION # ------------------------------------------------------------------------------ # Hosts/domain names that are valid for this site # See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['bananasbunch.org', ]) # END SITE CONFIGURATION INSTALLED_APPS += ['gunicorn', ] # STORAGE CONFIGURATION # ------------------------------------------------------------------------------ # Uploaded Media Files # ------------------------ # See: http://django-storages.readthedocs.io/en/latest/index.html INSTALLED_APPS += ['storages', ] AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID', default=None) AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY', default=None) AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME', default=None) # AWS_AUTO_CREATE_BUCKET = True AWS_QUERYSTRING_AUTH = False AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat() # AWS cache settings, don't change unless you know what you're doing: AWS_EXPIRY = 60 * 60 * 24 * 7 # TODO See: https://github.com/jschneier/django-storages/issues/47 # Revert the following and use str after the above-mentioned bug is fixed in # either django-storage-redux or boto AWS_HEADERS = { 'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % ( AWS_EXPIRY, AWS_EXPIRY)) } # URL that handles the media served from MEDIA_ROOT, used for managing # stored files. MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME # Static Assets # ------------------------ STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' # EMAIL # ------------------------------------------------------------------------------ DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL', default='Bananas Bunch <noreply@bananasbunch.org>') EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[bananas]') SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL) # Anymail with Mailgun INSTALLED_APPS += ['anymail', ] ANYMAIL = { 'MAILGUN_API_KEY': env('MAILGUN_API_KEY', default=None), 'MAILGUN_SENDER_DOMAIN': env('MAILGUN_SENDER_DOMAIN', default=None) } EMAIL_BACKEND = 'anymail.backends.mailgun.EmailBackend' # TEMPLATE CONFIGURATION # ------------------------------------------------------------------------------ # See: # https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader TEMPLATES[0]['OPTIONS']['loaders'] = [ ('django.template.loaders.cached.Loader', [ 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]), ] # DATABASE CONFIGURATION # ------------------------------------------------------------------------------ # Use the Heroku-style specification # Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ DATABASES['default'] = env.db('DATABASE_URL') # CACHING # ------------------------------------------------------------------------------ REDIS_LOCATION = '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0) # Heroku URL does not pass the DB number, so we parse it in CACHES = { 'default': { 'BACKEND': 'django_redis.cache.RedisCache', 'LOCATION': REDIS_LOCATION, 'OPTIONS': { 'CLIENT_CLASS': 'django_redis.client.DefaultClient', 'IGNORE_EXCEPTIONS': True, # mimics memcache behavior. # http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior } } } # Custom Admin URL, use {% url 'admin:index' %} ADMIN_URL = env('DJANGO_ADMIN_URL', default=r'^admin/') # DEBUG # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#debug DEBUG = env.bool('DJANGO_DEBUG', False)
mit
ionelmc/pytest-benchmark
tests/test_cli.py
1
8317
import sys from collections import namedtuple import py import pytest from _pytest.pytester import LineMatcher pytest_plugins = 'pytester', THIS = py.path.local(__file__) STORAGE = THIS.dirpath('test_storage') @pytest.fixture def testdir(testdir, monkeypatch): return namedtuple('testdir', 'tmpdir,run')( testdir.tmpdir, lambda bin, *args: testdir.run(bin+".exe" if sys.platform == "win32" else bin, *args)) def test_help(testdir): result = testdir.run('py.test-benchmark', '--help') result.stdout.fnmatch_lines([ "usage: py.test-benchmark *", " {help,list,compare} ...", "", "pytest_benchmark's management commands.", "", "optional arguments:", " -h [COMMAND], --help [COMMAND]", " Display help and exit.", " --storage URI, -s URI", " Specify a path to store the runs as uri in form", " file://path or elasticsearch+http[s]://host1,host2/[in", " dex/doctype?project_name=Project] (when --benchmark-", " save or --benchmark-autosave are used). For backwards", " compatibility unexpected values are converted to", " file://<value>. Default: 'file://./.benchmarks'.", " --verbose, -v Dump diagnostic and progress information.", "", "commands:", " {help,list,compare}", " help Display help and exit.", " list List saved runs.", " compare Compare saved runs.", ]) assert result.ret == 0 def test_help_command(testdir): result = testdir.run('py.test-benchmark', 'help') result.stdout.fnmatch_lines([ 'usage: py.test-benchmark help [-h] [command]', '', 'Display help and exit.', '', 'positional arguments:', ' command', '', 'optional arguments:', ' -h, --help show this help message and exit', ]) @pytest.mark.parametrize('args', ['list --help', 'help list']) def test_help_list(testdir, args): result = testdir.run('py.test-benchmark', *args.split()) result.stdout.fnmatch_lines([ "usage: py.test-benchmark list [-h]", "", "List saved runs.", "", "optional arguments:", " -h, --help show this help message and exit", ]) assert result.ret == 0 @pytest.mark.parametrize('args', ['compare --help', 'help compare']) def test_help_compare(testdir, args): result = testdir.run('py.test-benchmark', *args.split()) result.stdout.fnmatch_lines([ "usage: py.test-benchmark compare [-h] [--sort COL] [--group-by LABEL]", " [--columns LABELS] [--name FORMAT]", " [--histogram [FILENAME-PREFIX]]", " [--csv [FILENAME]]", " [[]glob_or_file *[]]", "", "Compare saved runs.", "", "positional arguments:", " glob_or_file Glob or exact path for json files. If not specified", " all runs are loaded.", "", "optional arguments:", " -h, --help show this help message and exit", " --sort COL Column to sort on. Can be one of: 'min', 'max',", " 'mean', 'stddev', 'name', 'fullname'. Default: 'min'", " --group-by LABEL How to group tests. Can be one of: 'group', 'name',", " 'fullname', 'func', 'fullfunc', 'param' or", " 'param:NAME', where NAME is the name passed to", " @pytest.parametrize. Default: 'group'", " --columns LABELS Comma-separated list of columns to show in the result", " table. Default: 'min, max, mean, stddev, median, iqr,", " outliers, ops, rounds, iterations'", " --name FORMAT How to format names in results. Can be one of 'short',", " 'normal', 'long', or 'trial'. Default: 'normal'", " --histogram [FILENAME-PREFIX]", " Plot graphs of min/max/avg/stddev over time in", " FILENAME-PREFIX-test_name.svg. If FILENAME-PREFIX", " contains slashes ('/') then directories will be", " created. Default: 'benchmark_*'", " --csv [FILENAME] Save a csv report. If FILENAME contains slashes ('/')", " then directories will be created. Default:", " 'benchmark_*'", "", "examples:", "", " pytest-benchmark compare 'Linux-CPython-3.5-64bit/*'", "", " Loads all benchmarks ran with that interpreter. Note the special quoting that disables your shell's " "glob", " expansion.", "", " pytest-benchmark compare 0001", "", " Loads first run from all the interpreters.", "", " pytest-benchmark compare /foo/bar/0001_abc.json /lorem/ipsum/0001_sir_dolor.json", "", " Loads runs from exactly those files.", ]) assert result.ret == 0 def test_list(testdir): result = testdir.run('py.test-benchmark', '--storage', STORAGE, 'list') assert result.stderr.lines == [] result.stdout.fnmatch_lines([ '*0001_*.json', '*0002_*.json', '*0003_*.json', '*0004_*.json', '*0005_*.json', '*0006_*.json', '*0007_*.json', '*0008_*.json', '*0009_*.json', '*0010_*.json', '*0011_*.json', '*0012_*.json', '*0013_*.json', '*0014_*.json', '*0015_*.json', '*0016_*.json', '*0017_*.json', '*0018_*.json', '*0019_*.json', '*0020_*.json', '*0021_*.json', '*0022_*.json', '*0023_*.json', '*0024_*.json', '*0025_*.json', '*0026_*.json', '*0027_*.json', '*0028_*.json', '*0029_*.json', '*0030_*.json', ]) assert result.ret == 0 @pytest.mark.parametrize('name,name_pattern_generator', [ ('short', lambda n: '*xfast_parametrized[[]0[]] ' '(%.4d*)' % n), ('long', lambda n: '*xfast_parametrized[[]0[]] ' '(%.4d*)' % n), ('normal', lambda n: '*xfast_parametrized[[]0[]] ' '(%.4d*)' % n), ('trial', lambda n: '%.4d*' % n) ]) def test_compare(testdir, name, name_pattern_generator): result = testdir.run('py.test-benchmark', '--storage', STORAGE, 'compare', '0001', '0002', '0003', '--sort', 'min', '--columns', 'min,max', '--name', name, '--histogram', 'foobar', '--csv', 'foobar') result.stderr.fnmatch_lines([ 'Generated csv: *foobar.csv' ]) LineMatcher(testdir.tmpdir.join('foobar.csv').readlines(cr=0)).fnmatch_lines([ "name,min,max", "tests/test_normal.py::test_xfast_parametrized[[]0[]],2.15628567*e-07,1.03186158*e-05", "tests/test_normal.py::test_xfast_parametrized[[]0[]],2.16902756*e-07,7.73929968*e-06", "tests/test_normal.py::test_xfast_parametrized[[]0[]],2.17314542*e-07,1.14473891*e-05", "" ]) result.stdout.fnmatch_lines([ '---*--- benchmark: 3 tests ---*---', 'Name (time in ns) * Min * Max ', '---*---', '%s * 215.6286 (1.0) 10*318.6159 (1.33) ' % name_pattern_generator(3), '%s * 216.9028 (1.01) 7*739.2997 (1.0) ' % name_pattern_generator(2), '%s * 217.3145 (1.01) 11*447.3891 (1.48) ' % name_pattern_generator(1), '---*---', '', 'Legend:', ' Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.', ]) assert result.ret == 0
bsd-2-clause
JFriel/honours_project
venv/lib/python2.7/site-packages/numpy/compat/_inspect.py
114
7553
"""Subset of inspect module from upstream python We use this instead of upstream because upstream inspect is slow to import, and significanly contributes to numpy import times. Importing this copy has almost no overhead. """ from __future__ import division, absolute_import, print_function import types __all__ = ['getargspec', 'formatargspec'] # ----------------------------------------------------------- type-checking def ismethod(object): """Return true if the object is an instance method. Instance method objects provide these attributes: __doc__ documentation string __name__ name with which this method was defined im_class class object in which this method belongs im_func function object containing implementation of method im_self instance to which this method is bound, or None """ return isinstance(object, types.MethodType) def isfunction(object): """Return true if the object is a user-defined function. Function objects provide these attributes: __doc__ documentation string __name__ name with which this function was defined func_code code object containing compiled function bytecode func_defaults tuple of any default values for arguments func_doc (same as __doc__) func_globals global namespace in which this function was defined func_name (same as __name__) """ return isinstance(object, types.FunctionType) def iscode(object): """Return true if the object is a code object. Code objects provide these attributes: co_argcount number of arguments (not including * or ** args) co_code string of raw compiled bytecode co_consts tuple of constants used in the bytecode co_filename name of file in which this code object was created co_firstlineno number of first line in Python source code co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg co_lnotab encoded mapping of line numbers to bytecode indices co_name name with which this code object was defined co_names tuple of names of local variables co_nlocals number of local variables co_stacksize virtual machine stack space required co_varnames tuple of names of arguments and local variables """ return isinstance(object, types.CodeType) # ------------------------------------------------ argument list extraction # These constants are from Python's compile.h. CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8 def getargs(co): """Get information about the arguments accepted by a code object. Three things are returned: (args, varargs, varkw), where 'args' is a list of argument names (possibly containing nested lists), and 'varargs' and 'varkw' are the names of the * and ** arguments or None. """ if not iscode(co): raise TypeError('arg is not a code object') nargs = co.co_argcount names = co.co_varnames args = list(names[:nargs]) # The following acrobatics are for anonymous (tuple) arguments. # Which we do not need to support, so remove to avoid importing # the dis module. for i in range(nargs): if args[i][:1] in ['', '.']: raise TypeError("tuple function arguments are not supported") varargs = None if co.co_flags & CO_VARARGS: varargs = co.co_varnames[nargs] nargs = nargs + 1 varkw = None if co.co_flags & CO_VARKEYWORDS: varkw = co.co_varnames[nargs] return args, varargs, varkw def getargspec(func): """Get the names and default values of a function's arguments. A tuple of four things is returned: (args, varargs, varkw, defaults). 'args' is a list of the argument names (it may contain nested lists). 'varargs' and 'varkw' are the names of the * and ** arguments or None. 'defaults' is an n-tuple of the default values of the last n arguments. """ if ismethod(func): func = func.__func__ if not isfunction(func): raise TypeError('arg is not a Python function') args, varargs, varkw = getargs(func.__code__) return args, varargs, varkw, func.__defaults__ def getargvalues(frame): """Get information about arguments passed into a particular frame. A tuple of four things is returned: (args, varargs, varkw, locals). 'args' is a list of the argument names (it may contain nested lists). 'varargs' and 'varkw' are the names of the * and ** arguments or None. 'locals' is the locals dictionary of the given frame. """ args, varargs, varkw = getargs(frame.f_code) return args, varargs, varkw, frame.f_locals def joinseq(seq): if len(seq) == 1: return '(' + seq[0] + ',)' else: return '(' + ', '.join(seq) + ')' def strseq(object, convert, join=joinseq): """Recursively walk a sequence, stringifying each element. """ if type(object) in [list, tuple]: return join([strseq(_o, convert, join) for _o in object]) else: return convert(object) def formatargspec(args, varargs=None, varkw=None, defaults=None, formatarg=str, formatvarargs=lambda name: '*' + name, formatvarkw=lambda name: '**' + name, formatvalue=lambda value: '=' + repr(value), join=joinseq): """Format an argument spec from the 4 values returned by getargspec. The first four arguments are (args, varargs, varkw, defaults). The other four arguments are the corresponding optional formatting functions that are called to turn names and values into strings. The ninth argument is an optional function to format the sequence of arguments. """ specs = [] if defaults: firstdefault = len(args) - len(defaults) for i in range(len(args)): spec = strseq(args[i], formatarg, join) if defaults and i >= firstdefault: spec = spec + formatvalue(defaults[i - firstdefault]) specs.append(spec) if varargs is not None: specs.append(formatvarargs(varargs)) if varkw is not None: specs.append(formatvarkw(varkw)) return '(' + ', '.join(specs) + ')' def formatargvalues(args, varargs, varkw, locals, formatarg=str, formatvarargs=lambda name: '*' + name, formatvarkw=lambda name: '**' + name, formatvalue=lambda value: '=' + repr(value), join=joinseq): """Format an argument spec from the 4 values returned by getargvalues. The first four arguments are (args, varargs, varkw, locals). The next four arguments are the corresponding optional formatting functions that are called to turn names and values into strings. The ninth argument is an optional function to format the sequence of arguments. """ def convert(name, locals=locals, formatarg=formatarg, formatvalue=formatvalue): return formatarg(name) + formatvalue(locals[name]) specs = [] for i in range(len(args)): specs.append(strseq(args[i], convert, join)) if varargs: specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) if varkw: specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) return '(' + ', '.join(specs) + ')'
gpl-3.0
qedsoftware/commcare-hq
corehq/apps/reports/commtrack/maps.py
1
6996
from django.utils.translation import ugettext_noop from corehq.apps.products.models import Product from django.template.loader import render_to_string from corehq.apps.reports.commtrack.standard import CommtrackReportMixin from corehq.apps.reports.standard.maps import GenericMapReport from corehq.apps.style.decorators import use_maps class StockStatusMapReport(GenericMapReport, CommtrackReportMixin): name = ugettext_noop("Stock Status (map)") slug = "stockstatus_map" fields = [ 'corehq.apps.reports.filters.fixtures.AsyncLocationFilter', 'corehq.apps.reports.filters.commtrack.ProgramFilter', ] data_source = { 'adapter': 'report', 'geo_column': 'geo', 'report': 'corehq.apps.reports.commtrack.data_sources.StockStatusBySupplyPointDataSource', } @use_maps def decorator_dispatcher(self, request, *args, **kwargs): super(StockStatusMapReport, self).decorator_dispatcher(request, *args, **kwargs) @property def display_config(self): conf = { 'name_column': 'name', 'detail_columns': ['type'], 'table_columns': ['type'], 'column_titles': { 'type': 'Supply Point Type', }, 'enum_captions': {}, 'numeric_format': {}, 'metrics': [ { 'color': { 'column': 'type', }, }, ], } titles = { 'current_stock': 'Stock on Hand', 'consumption': 'Monthly Consumption', 'months_remaining': 'Months of Stock Remaining', 'category': 'Current Stock Status', } products = sorted( Product.by_domain(self.domain), key=lambda p: p.name ) if self.program_id: products = filter(lambda c: c.program_id == self.program_id, products) for p in products: col_id = lambda c: '%s-%s' % (p._id, c) product_cols = [] for c in ('category', 'current_stock', 'months_remaining', 'consumption'): conf['column_titles'][col_id(c)] = titles[c] product_cols.append(col_id(c)) conf['detail_columns'].extend(product_cols) product_metrics = [ { 'icon': { 'column': col_id('category'), 'categories': { 'stockout': '/static/commtrack/img/stockout.png', 'understock': '/static/commtrack/img/warning.png', 'adequate': '/static/commtrack/img/goodstock.png', 'overstock': '/static/commtrack/img/overstock.png', '_null': '/static/commtrack/img/no_data.png', }, } } ] conf['enum_captions'][col_id('category')] = { 'stockout': 'Stocked out', 'understock': 'Under-stock', 'adequate': 'Adequate Stock', 'overstock': 'Over-stock', '_null': 'No Data', } for c in ('current_stock', 'months_remaining', 'consumption'): metric = { 'title': conf['column_titles'][col_id(c)], 'size': { 'column': col_id(c), }, } if c not in ('consumption',): metric['color'] = { 'column': col_id('category'), 'categories': { 'stockout': 'rgba(255, 0, 0, .8)', 'understock': 'rgba(255, 120, 0, .8)', 'adequate': 'rgba(50, 200, 50, .8)', 'overstock': 'rgba(120, 0, 255, .8)', '_null': 'rgba(128, 128, 128, .8)', }, } else: metric['color'] = 'rgba(120, 120, 255, .8)' product_metrics.append(metric) conf['numeric_format'][col_id(c)] = { 'current_stock': "return x + ' %s'" % (p.unit or 'unit'), 'months_remaining': "return (Math.round(10 * x) / 10) + (x == 1 ? ' month' : ' months')", 'consumption': "return (Math.round(10 * x) / 10) + ' %s / month'" % (p.unit or 'unit'), }[c] conf['metrics'].append({ 'title': p.name, 'group': True, 'children': product_metrics, }) conf['table_columns'].append({ 'title': p.name, 'subcolumns': product_cols, }) conf['detail_template'] = render_to_string('reports/partials/commtrack/stockstatus_mapdetail.html', { 'products': products, 'columns': [{'id': c, 'title': titles[c]} for c in ('category', 'current_stock', 'consumption', 'months_remaining')], }) conf['display'] = { 'table': False, } return conf class ReportingStatusMapReport(GenericMapReport, CommtrackReportMixin): name = ugettext_noop("Reporting Status (map)") slug = "reportingstatus_map" fields = [ 'corehq.apps.reports.filters.fixtures.AsyncLocationFilter', 'corehq.apps.reports.filters.commtrack.ProgramFilter', 'corehq.apps.reports.filters.forms.FormsByApplicationFilter', 'corehq.apps.reports.filters.dates.DatespanFilter', ] data_source = { 'adapter': 'report', 'geo_column': 'geo', 'report': 'corehq.apps.reports.commtrack.data_sources.ReportingStatusDataSource', } display_config = { 'name_column': 'name', 'detail_columns': ['type', 'reporting_status'], 'column_titles': { 'type': 'Supply Point Type', 'reporting_status': 'Current Reporting Status', }, 'enum_captions': { 'reporting_status': { 'reporting': 'Reporting', 'nonreporting': 'Non-reporting', }, }, 'metrics': [ { 'color': { 'column': 'type', }, }, { 'default': True, 'color': { 'column': 'reporting_status', 'categories': { 'reporting': 'rgba(0, 200, 0, .8)', 'nonreporting': 'rgba(255, 0, 0, .8)', }, }, }, ], } @use_maps def decorator_dispatcher(self, request, *args, **kwargs): super(ReportingStatusMapReport, self).decorator_dispatcher(request, *args, **kwargs)
bsd-3-clause
agrista/odoo-saas
addons/account/report/account_invoice_report.py
60
12934
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import tools import openerp.addons.decimal_precision as dp from openerp.osv import fields,osv class account_invoice_report(osv.osv): _name = "account.invoice.report" _description = "Invoices Statistics" _auto = False _rec_name = 'date' def _compute_amounts_in_user_currency(self, cr, uid, ids, field_names, args, context=None): """Compute the amounts in the currency of the user """ if context is None: context={} currency_obj = self.pool.get('res.currency') currency_rate_obj = self.pool.get('res.currency.rate') user_currency_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id currency_rate_id = currency_rate_obj.search(cr, uid, [('rate', '=', 1)], limit=1, context=context)[0] base_currency_id = currency_rate_obj.browse(cr, uid, currency_rate_id, context=context).currency_id.id res = {} ctx = context.copy() for item in self.browse(cr, uid, ids, context=context): ctx['date'] = item.date price_total = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.price_total, context=ctx) price_average = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.price_average, context=ctx) residual = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.residual, context=ctx) res[item.id] = { 'user_currency_price_total': price_total, 'user_currency_price_average': price_average, 'user_currency_residual': residual, } return res _columns = { 'date': fields.date('Date', readonly=True), 'product_id': fields.many2one('product.product', 'Product', readonly=True), 'product_qty':fields.float('Product Quantity', readonly=True), 'uom_name': fields.char('Reference Unit of Measure', size=128, readonly=True), 'payment_term': fields.many2one('account.payment.term', 'Payment Term', readonly=True), 'period_id': fields.many2one('account.period', 'Force Period', domain=[('state','<>','done')], readonly=True), 'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position', readonly=True), 'currency_id': fields.many2one('res.currency', 'Currency', readonly=True), 'categ_id': fields.many2one('product.category','Category of Product', readonly=True), 'journal_id': fields.many2one('account.journal', 'Journal', readonly=True), 'partner_id': fields.many2one('res.partner', 'Partner', readonly=True), 'commercial_partner_id': fields.many2one('res.partner', 'Partner Company', help="Commercial Entity"), 'company_id': fields.many2one('res.company', 'Company', readonly=True), 'user_id': fields.many2one('res.users', 'Salesperson', readonly=True), 'price_total': fields.float('Total Without Tax', readonly=True), 'user_currency_price_total': fields.function(_compute_amounts_in_user_currency, string="Total Without Tax", type='float', digits_compute=dp.get_precision('Account'), multi="_compute_amounts"), 'price_average': fields.float('Average Price', readonly=True, group_operator="avg"), 'user_currency_price_average': fields.function(_compute_amounts_in_user_currency, string="Average Price", type='float', digits_compute=dp.get_precision('Account'), multi="_compute_amounts"), 'currency_rate': fields.float('Currency Rate', readonly=True), 'nbr': fields.integer('# of Invoices', readonly=True), # TDE FIXME master: rename into nbr_lines 'type': fields.selection([ ('out_invoice','Customer Invoice'), ('in_invoice','Supplier Invoice'), ('out_refund','Customer Refund'), ('in_refund','Supplier Refund'), ],'Type', readonly=True), 'state': fields.selection([ ('draft','Draft'), ('proforma','Pro-forma'), ('proforma2','Pro-forma'), ('open','Open'), ('paid','Done'), ('cancel','Cancelled') ], 'Invoice Status', readonly=True), 'date_due': fields.date('Due Date', readonly=True), 'account_id': fields.many2one('account.account', 'Account',readonly=True), 'account_line_id': fields.many2one('account.account', 'Account Line',readonly=True), 'partner_bank_id': fields.many2one('res.partner.bank', 'Bank Account',readonly=True), 'residual': fields.float('Total Residual', readonly=True), 'user_currency_residual': fields.function(_compute_amounts_in_user_currency, string="Total Residual", type='float', digits_compute=dp.get_precision('Account'), multi="_compute_amounts"), 'country_id': fields.many2one('res.country', 'Country of the Partner Company'), } _order = 'date desc' _depends = { 'account.invoice': [ 'account_id', 'amount_total', 'commercial_partner_id', 'company_id', 'currency_id', 'date_due', 'date_invoice', 'fiscal_position', 'journal_id', 'partner_bank_id', 'partner_id', 'payment_term', 'period_id', 'residual', 'state', 'type', 'user_id', ], 'account.invoice.line': [ 'account_id', 'invoice_id', 'price_subtotal', 'product_id', 'quantity', 'uos_id', ], 'product.product': ['product_tmpl_id'], 'product.template': ['categ_id'], 'product.uom': ['category_id', 'factor', 'name', 'uom_type'], 'res.currency.rate': ['currency_id', 'name'], 'res.partner': ['country_id'], } def _select(self): select_str = """ SELECT sub.id, sub.date, sub.product_id, sub.partner_id, sub.country_id, sub.payment_term, sub.period_id, sub.uom_name, sub.currency_id, sub.journal_id, sub.fiscal_position, sub.user_id, sub.company_id, sub.nbr, sub.type, sub.state, sub.categ_id, sub.date_due, sub.account_id, sub.account_line_id, sub.partner_bank_id, sub.product_qty, sub.price_total / cr.rate as price_total, sub.price_average /cr.rate as price_average, cr.rate as currency_rate, sub.residual / cr.rate as residual, sub.commercial_partner_id as commercial_partner_id """ return select_str def _sub_select(self): select_str = """ SELECT min(ail.id) AS id, ai.date_invoice AS date, ail.product_id, ai.partner_id, ai.payment_term, ai.period_id, CASE WHEN u.uom_type::text <> 'reference'::text THEN ( SELECT product_uom.name FROM product_uom WHERE product_uom.uom_type::text = 'reference'::text AND product_uom.active AND product_uom.category_id = u.category_id LIMIT 1) ELSE u.name END AS uom_name, ai.currency_id, ai.journal_id, ai.fiscal_position, ai.user_id, ai.company_id, count(ail.*) AS nbr, ai.type, ai.state, pt.categ_id, ai.date_due, ai.account_id, ail.account_id AS account_line_id, ai.partner_bank_id, SUM(CASE WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text]) THEN (- ail.quantity) / u.factor ELSE ail.quantity / u.factor END) AS product_qty, SUM(CASE WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text]) THEN - ail.price_subtotal ELSE ail.price_subtotal END) AS price_total, CASE WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text]) THEN SUM(- ail.price_subtotal) ELSE SUM(ail.price_subtotal) END / CASE WHEN SUM(ail.quantity / u.factor) <> 0::numeric THEN CASE WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text]) THEN SUM((- ail.quantity) / u.factor) ELSE SUM(ail.quantity / u.factor) END ELSE 1::numeric END AS price_average, CASE WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text]) THEN - ai.residual ELSE ai.residual END / CASE WHEN (( SELECT count(l.id) AS count FROM account_invoice_line l LEFT JOIN account_invoice a ON a.id = l.invoice_id WHERE a.id = ai.id)) <> 0 THEN ( SELECT count(l.id) AS count FROM account_invoice_line l LEFT JOIN account_invoice a ON a.id = l.invoice_id WHERE a.id = ai.id) ELSE 1::bigint END::numeric AS residual, ai.commercial_partner_id as commercial_partner_id, partner.country_id """ return select_str def _from(self): from_str = """ FROM account_invoice_line ail JOIN account_invoice ai ON ai.id = ail.invoice_id JOIN res_partner partner ON ai.commercial_partner_id = partner.id LEFT JOIN product_product pr ON pr.id = ail.product_id left JOIN product_template pt ON pt.id = pr.product_tmpl_id LEFT JOIN product_uom u ON u.id = ail.uos_id """ return from_str def _group_by(self): group_by_str = """ GROUP BY ail.product_id, ai.date_invoice, ai.id, ai.partner_id, ai.payment_term, ai.period_id, u.name, ai.currency_id, ai.journal_id, ai.fiscal_position, ai.user_id, ai.company_id, ai.type, ai.state, pt.categ_id, ai.date_due, ai.account_id, ail.account_id, ai.partner_bank_id, ai.residual, ai.amount_total, u.uom_type, u.category_id, ai.commercial_partner_id, partner.country_id """ return group_by_str def init(self, cr): # self._table = account_invoice_report tools.drop_view_if_exists(cr, self._table) cr.execute("""CREATE or REPLACE VIEW %s as ( %s FROM ( %s %s %s ) AS sub JOIN res_currency_rate cr ON (cr.currency_id = sub.currency_id) WHERE cr.id IN (SELECT id FROM res_currency_rate cr2 WHERE (cr2.currency_id = sub.currency_id) AND ((sub.date IS NOT NULL AND cr2.name <= sub.date) OR (sub.date IS NULL AND cr2.name <= NOW())) ORDER BY name DESC LIMIT 1) )""" % ( self._table, self._select(), self._sub_select(), self._from(), self._group_by())) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
larsks/packstack
tests/installer/test_setup_params.py
14
3413
# -*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for packstack.installer.core.parameters module. """ from unittest import TestCase from ..test_base import PackstackTestCaseMixin from packstack.installer.core.parameters import * class ParameterTestCase(PackstackTestCaseMixin, TestCase): def setUp(self): super(ParameterTestCase, self).setUp() self.data = { "CMD_OPTION": "mysql-host", "USAGE": ("The IP address of the server on which to " "install MySQL"), "PROMPT": "Enter the IP address of the MySQL server", "OPTION_LIST": [], "VALIDATORS": [], "DEFAULT_VALUE": "127.0.0.1", "MASK_INPUT": False, "LOOSE_VALIDATION": True, "CONF_NAME": "CONFIG_MYSQL_HOST", "USE_DEFAULT": False, "NEED_CONFIRM": False, "CONDITION": False} def test_parameter_init(self): """ Test packstack.installer.core.parameters.Parameter initialization """ param = Parameter(self.data) for key, value in self.data.iteritems(): self.assertEqual(getattr(param, key), value) def test_default_attribute(self): """ Test packstack.installer.core.parameters.Parameter default value """ param = Parameter() self.assertIsNone(param.PROCESSORS) class GroupTestCase(PackstackTestCaseMixin, TestCase): def setUp(self): super(GroupTestCase, self).setUp() self.attrs = { "GROUP_NAME": "MYSQL", "DESCRIPTION": "MySQL Config parameters", "PRE_CONDITION": "y", "PRE_CONDITION_MATCH": "y", "POST_CONDITION": False, "POST_CONDITION_MATCH": False} self.params = [ {"CONF_NAME": "CONFIG_MYSQL_HOST", "PROMPT": "find_me"}, {"CONF_NAME": "CONFIG_MYSQL_USER"}, {"CONF_NAME": "CONFIG_MYSQL_PW"}] def test_group_init(self): """ Test packstack.installer.core.parameters.Group initialization """ group = Group(attributes=self.attrs, parameters=self.params) for key, value in self.attrs.iteritems(): self.assertEqual(getattr(group, key), value) for param in self.params: self.assertIn(param['CONF_NAME'], group.parameters) def test_search(self): """ Test packstack.installer.core.parameters.Group search method """ group = Group(attributes=self.attrs, parameters=self.params) param_list = group.search('PROMPT', 'find_me') self.assertEqual(len(param_list), 1) self.assertIsInstance(param_list[0], Parameter) self.assertEqual(param_list[0].CONF_NAME, 'CONFIG_MYSQL_HOST')
apache-2.0
BaconPancakes/valor
lib/youtube_dl/extractor/tunein.py
40
5891
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ExtractorError from ..compat import compat_urlparse class TuneInBaseIE(InfoExtractor): _API_BASE_URL = 'http://tunein.com/tuner/tune/' @staticmethod def _extract_urls(webpage): return re.findall( r'<iframe[^>]+src=["\'](?P<url>(?:https?://)?tunein\.com/embed/player/[pst]\d+)', webpage) def _real_extract(self, url): content_id = self._match_id(url) content_info = self._download_json( self._API_BASE_URL + self._API_URL_QUERY % content_id, content_id, note='Downloading JSON metadata') title = content_info['Title'] thumbnail = content_info.get('Logo') location = content_info.get('Location') streams_url = content_info.get('StreamUrl') if not streams_url: raise ExtractorError('No downloadable streams found', expected=True) if not streams_url.startswith('http://'): streams_url = compat_urlparse.urljoin(url, streams_url) streams = self._download_json( streams_url, content_id, note='Downloading stream data', transform_source=lambda s: re.sub(r'^\s*\((.*)\);\s*$', r'\1', s))['Streams'] is_live = None formats = [] for stream in streams: if stream.get('Type') == 'Live': is_live = True reliability = stream.get('Reliability') format_note = ( 'Reliability: %d%%' % reliability if reliability is not None else None) formats.append({ 'preference': ( 0 if reliability is None or reliability > 90 else 1), 'abr': stream.get('Bandwidth'), 'ext': stream.get('MediaType').lower(), 'acodec': stream.get('MediaType'), 'vcodec': 'none', 'url': stream.get('Url'), 'source_preference': reliability, 'format_note': format_note, }) self._sort_formats(formats) return { 'id': content_id, 'title': title, 'formats': formats, 'thumbnail': thumbnail, 'location': location, 'is_live': is_live, } class TuneInClipIE(TuneInBaseIE): IE_NAME = 'tunein:clip' _VALID_URL = r'https?://(?:www\.)?tunein\.com/station/.*?audioClipId\=(?P<id>\d+)' _API_URL_QUERY = '?tuneType=AudioClip&audioclipId=%s' _TESTS = [{ 'url': 'http://tunein.com/station/?stationId=246119&audioClipId=816', 'md5': '99f00d772db70efc804385c6b47f4e77', 'info_dict': { 'id': '816', 'title': '32m', 'ext': 'mp3', }, }] class TuneInStationIE(TuneInBaseIE): IE_NAME = 'tunein:station' _VALID_URL = r'https?://(?:www\.)?tunein\.com/(?:radio/.*?-s|station/.*?StationId=|embed/player/s)(?P<id>\d+)' _API_URL_QUERY = '?tuneType=Station&stationId=%s' @classmethod def suitable(cls, url): return False if TuneInClipIE.suitable(url) else super(TuneInStationIE, cls).suitable(url) _TESTS = [{ 'url': 'http://tunein.com/radio/Jazz24-885-s34682/', 'info_dict': { 'id': '34682', 'title': 'Jazz 24 on 88.5 Jazz24 - KPLU-HD2', 'ext': 'mp3', 'location': 'Tacoma, WA', }, 'params': { 'skip_download': True, # live stream }, }, { 'url': 'http://tunein.com/embed/player/s6404/', 'only_matching': True, }] class TuneInProgramIE(TuneInBaseIE): IE_NAME = 'tunein:program' _VALID_URL = r'https?://(?:www\.)?tunein\.com/(?:radio/.*?-p|program/.*?ProgramId=|embed/player/p)(?P<id>\d+)' _API_URL_QUERY = '?tuneType=Program&programId=%s' _TESTS = [{ 'url': 'http://tunein.com/radio/Jazz-24-p2506/', 'info_dict': { 'id': '2506', 'title': 'Jazz 24 on 91.3 WUKY-HD3', 'ext': 'mp3', 'location': 'Lexington, KY', }, 'params': { 'skip_download': True, # live stream }, }, { 'url': 'http://tunein.com/embed/player/p191660/', 'only_matching': True, }] class TuneInTopicIE(TuneInBaseIE): IE_NAME = 'tunein:topic' _VALID_URL = r'https?://(?:www\.)?tunein\.com/(?:topic/.*?TopicId=|embed/player/t)(?P<id>\d+)' _API_URL_QUERY = '?tuneType=Topic&topicId=%s' _TESTS = [{ 'url': 'http://tunein.com/topic/?TopicId=101830576', 'md5': 'c31a39e6f988d188252eae7af0ef09c9', 'info_dict': { 'id': '101830576', 'title': 'Votez pour moi du 29 octobre 2015 (29/10/15)', 'ext': 'mp3', 'location': 'Belgium', }, }, { 'url': 'http://tunein.com/embed/player/t101830576/', 'only_matching': True, }] class TuneInShortenerIE(InfoExtractor): IE_NAME = 'tunein:shortener' IE_DESC = False # Do not list _VALID_URL = r'https?://tun\.in/(?P<id>[A-Za-z0-9]+)' _TEST = { # test redirection 'url': 'http://tun.in/ser7s', 'info_dict': { 'id': '34682', 'title': 'Jazz 24 on 88.5 Jazz24 - KPLU-HD2', 'ext': 'mp3', 'location': 'Tacoma, WA', }, 'params': { 'skip_download': True, # live stream }, } def _real_extract(self, url): redirect_id = self._match_id(url) # The server doesn't support HEAD requests urlh = self._request_webpage( url, redirect_id, note='Downloading redirect page') url = urlh.geturl() self.to_screen('Following redirect: %s' % url) return self.url_result(url)
gpl-3.0
AiJiaZone/linux-4.0
virt/tools/perf/scripts/python/export-to-postgresql.py
238
25591
# export-to-postgresql.py: export perf data to a postgresql database # Copyright (c) 2014, Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, # version 2, as published by the Free Software Foundation. # # This program is distributed in the hope it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. import os import sys import struct import datetime # To use this script you will need to have installed package python-pyside which # provides LGPL-licensed Python bindings for Qt. You will also need the package # libqt4-sql-psql for Qt postgresql support. # # The script assumes postgresql is running on the local machine and that the # user has postgresql permissions to create databases. Examples of installing # postgresql and adding such a user are: # # fedora: # # $ sudo yum install postgresql postgresql-server python-pyside qt-postgresql # $ sudo su - postgres -c initdb # $ sudo service postgresql start # $ sudo su - postgres # $ createuser <your user id here> # Shall the new role be a superuser? (y/n) y # # ubuntu: # # $ sudo apt-get install postgresql # $ sudo su - postgres # $ createuser <your user id here> # Shall the new role be a superuser? (y/n) y # # An example of using this script with Intel PT: # # $ perf record -e intel_pt//u ls # $ perf script -s ~/libexec/perf-core/scripts/python/export-to-postgresql.py pt_example branches calls # 2015-05-29 12:49:23.464364 Creating database... # 2015-05-29 12:49:26.281717 Writing to intermediate files... # 2015-05-29 12:49:27.190383 Copying to database... # 2015-05-29 12:49:28.140451 Removing intermediate files... # 2015-05-29 12:49:28.147451 Adding primary keys # 2015-05-29 12:49:28.655683 Adding foreign keys # 2015-05-29 12:49:29.365350 Done # # To browse the database, psql can be used e.g. # # $ psql pt_example # pt_example=# select * from samples_view where id < 100; # pt_example=# \d+ # pt_example=# \d+ samples_view # pt_example=# \q # # An example of using the database is provided by the script # call-graph-from-postgresql.py. Refer to that script for details. # # Tables: # # The tables largely correspond to perf tools' data structures. They are largely self-explanatory. # # samples # # 'samples' is the main table. It represents what instruction was executing at a point in time # when something (a selected event) happened. The memory address is the instruction pointer or 'ip'. # # calls # # 'calls' represents function calls and is related to 'samples' by 'call_id' and 'return_id'. # 'calls' is only created when the 'calls' option to this script is specified. # # call_paths # # 'call_paths' represents all the call stacks. Each 'call' has an associated record in 'call_paths'. # 'calls_paths' is only created when the 'calls' option to this script is specified. # # branch_types # # 'branch_types' provides descriptions for each type of branch. # # comm_threads # # 'comm_threads' shows how 'comms' relates to 'threads'. # # comms # # 'comms' contains a record for each 'comm' - the name given to the executable that is running. # # dsos # # 'dsos' contains a record for each executable file or library. # # machines # # 'machines' can be used to distinguish virtual machines if virtualization is supported. # # selected_events # # 'selected_events' contains a record for each kind of event that has been sampled. # # symbols # # 'symbols' contains a record for each symbol. Only symbols that have samples are present. # # threads # # 'threads' contains a record for each thread. # # Views: # # Most of the tables have views for more friendly display. The views are: # # calls_view # call_paths_view # comm_threads_view # dsos_view # machines_view # samples_view # symbols_view # threads_view # # More examples of browsing the database with psql: # Note that some of the examples are not the most optimal SQL query. # Note that call information is only available if the script's 'calls' option has been used. # # Top 10 function calls (not aggregated by symbol): # # SELECT * FROM calls_view ORDER BY elapsed_time DESC LIMIT 10; # # Top 10 function calls (aggregated by symbol): # # SELECT symbol_id,(SELECT name FROM symbols WHERE id = symbol_id) AS symbol, # SUM(elapsed_time) AS tot_elapsed_time,SUM(branch_count) AS tot_branch_count # FROM calls_view GROUP BY symbol_id ORDER BY tot_elapsed_time DESC LIMIT 10; # # Note that the branch count gives a rough estimation of cpu usage, so functions # that took a long time but have a relatively low branch count must have spent time # waiting. # # Find symbols by pattern matching on part of the name (e.g. names containing 'alloc'): # # SELECT * FROM symbols_view WHERE name LIKE '%alloc%'; # # Top 10 function calls for a specific symbol (e.g. whose symbol_id is 187): # # SELECT * FROM calls_view WHERE symbol_id = 187 ORDER BY elapsed_time DESC LIMIT 10; # # Show function calls made by function in the same context (i.e. same call path) (e.g. one with call_path_id 254): # # SELECT * FROM calls_view WHERE parent_call_path_id = 254; # # Show branches made during a function call (e.g. where call_id is 29357 and return_id is 29370 and tid is 29670) # # SELECT * FROM samples_view WHERE id >= 29357 AND id <= 29370 AND tid = 29670 AND event LIKE 'branches%'; # # Show transactions: # # SELECT * FROM samples_view WHERE event = 'transactions'; # # Note transaction start has 'in_tx' true whereas, transaction end has 'in_tx' false. # Transaction aborts have branch_type_name 'transaction abort' # # Show transaction aborts: # # SELECT * FROM samples_view WHERE event = 'transactions' AND branch_type_name = 'transaction abort'; # # To print a call stack requires walking the call_paths table. For example this python script: # #!/usr/bin/python2 # # import sys # from PySide.QtSql import * # # if __name__ == '__main__': # if (len(sys.argv) < 3): # print >> sys.stderr, "Usage is: printcallstack.py <database name> <call_path_id>" # raise Exception("Too few arguments") # dbname = sys.argv[1] # call_path_id = sys.argv[2] # db = QSqlDatabase.addDatabase('QPSQL') # db.setDatabaseName(dbname) # if not db.open(): # raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text()) # query = QSqlQuery(db) # print " id ip symbol_id symbol dso_id dso_short_name" # while call_path_id != 0 and call_path_id != 1: # ret = query.exec_('SELECT * FROM call_paths_view WHERE id = ' + str(call_path_id)) # if not ret: # raise Exception("Query failed: " + query.lastError().text()) # if not query.next(): # raise Exception("Query failed") # print "{0:>6} {1:>10} {2:>9} {3:<30} {4:>6} {5:<30}".format(query.value(0), query.value(1), query.value(2), query.value(3), query.value(4), query.value(5)) # call_path_id = query.value(6) from PySide.QtSql import * # Need to access PostgreSQL C library directly to use COPY FROM STDIN from ctypes import * libpq = CDLL("libpq.so.5") PQconnectdb = libpq.PQconnectdb PQconnectdb.restype = c_void_p PQfinish = libpq.PQfinish PQstatus = libpq.PQstatus PQexec = libpq.PQexec PQexec.restype = c_void_p PQresultStatus = libpq.PQresultStatus PQputCopyData = libpq.PQputCopyData PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ] PQputCopyEnd = libpq.PQputCopyEnd PQputCopyEnd.argtypes = [ c_void_p, c_void_p ] sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') # These perf imports are not used at present #from perf_trace_context import * #from Core import * perf_db_export_mode = True perf_db_export_calls = False def usage(): print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>]" print >> sys.stderr, "where: columns 'all' or 'branches'" print >> sys.stderr, " calls 'calls' => create calls table" raise Exception("Too few arguments") if (len(sys.argv) < 2): usage() dbname = sys.argv[1] if (len(sys.argv) >= 3): columns = sys.argv[2] else: columns = "all" if columns not in ("all", "branches"): usage() branches = (columns == "branches") if (len(sys.argv) >= 4): if (sys.argv[3] == "calls"): perf_db_export_calls = True else: usage() output_dir_name = os.getcwd() + "/" + dbname + "-perf-data" os.mkdir(output_dir_name) def do_query(q, s): if (q.exec_(s)): return raise Exception("Query failed: " + q.lastError().text()) print datetime.datetime.today(), "Creating database..." db = QSqlDatabase.addDatabase('QPSQL') query = QSqlQuery(db) db.setDatabaseName('postgres') db.open() try: do_query(query, 'CREATE DATABASE ' + dbname) except: os.rmdir(output_dir_name) raise query.finish() query.clear() db.close() db.setDatabaseName(dbname) db.open() query = QSqlQuery(db) do_query(query, 'SET client_min_messages TO WARNING') do_query(query, 'CREATE TABLE selected_events (' 'id bigint NOT NULL,' 'name varchar(80))') do_query(query, 'CREATE TABLE machines (' 'id bigint NOT NULL,' 'pid integer,' 'root_dir varchar(4096))') do_query(query, 'CREATE TABLE threads (' 'id bigint NOT NULL,' 'machine_id bigint,' 'process_id bigint,' 'pid integer,' 'tid integer)') do_query(query, 'CREATE TABLE comms (' 'id bigint NOT NULL,' 'comm varchar(16))') do_query(query, 'CREATE TABLE comm_threads (' 'id bigint NOT NULL,' 'comm_id bigint,' 'thread_id bigint)') do_query(query, 'CREATE TABLE dsos (' 'id bigint NOT NULL,' 'machine_id bigint,' 'short_name varchar(256),' 'long_name varchar(4096),' 'build_id varchar(64))') do_query(query, 'CREATE TABLE symbols (' 'id bigint NOT NULL,' 'dso_id bigint,' 'sym_start bigint,' 'sym_end bigint,' 'binding integer,' 'name varchar(2048))') do_query(query, 'CREATE TABLE branch_types (' 'id integer NOT NULL,' 'name varchar(80))') if branches: do_query(query, 'CREATE TABLE samples (' 'id bigint NOT NULL,' 'evsel_id bigint,' 'machine_id bigint,' 'thread_id bigint,' 'comm_id bigint,' 'dso_id bigint,' 'symbol_id bigint,' 'sym_offset bigint,' 'ip bigint,' 'time bigint,' 'cpu integer,' 'to_dso_id bigint,' 'to_symbol_id bigint,' 'to_sym_offset bigint,' 'to_ip bigint,' 'branch_type integer,' 'in_tx boolean)') else: do_query(query, 'CREATE TABLE samples (' 'id bigint NOT NULL,' 'evsel_id bigint,' 'machine_id bigint,' 'thread_id bigint,' 'comm_id bigint,' 'dso_id bigint,' 'symbol_id bigint,' 'sym_offset bigint,' 'ip bigint,' 'time bigint,' 'cpu integer,' 'to_dso_id bigint,' 'to_symbol_id bigint,' 'to_sym_offset bigint,' 'to_ip bigint,' 'period bigint,' 'weight bigint,' 'transaction bigint,' 'data_src bigint,' 'branch_type integer,' 'in_tx boolean)') if perf_db_export_calls: do_query(query, 'CREATE TABLE call_paths (' 'id bigint NOT NULL,' 'parent_id bigint,' 'symbol_id bigint,' 'ip bigint)') do_query(query, 'CREATE TABLE calls (' 'id bigint NOT NULL,' 'thread_id bigint,' 'comm_id bigint,' 'call_path_id bigint,' 'call_time bigint,' 'return_time bigint,' 'branch_count bigint,' 'call_id bigint,' 'return_id bigint,' 'parent_call_path_id bigint,' 'flags integer)') do_query(query, 'CREATE VIEW machines_view AS ' 'SELECT ' 'id,' 'pid,' 'root_dir,' 'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest' ' FROM machines') do_query(query, 'CREATE VIEW dsos_view AS ' 'SELECT ' 'id,' 'machine_id,' '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,' 'short_name,' 'long_name,' 'build_id' ' FROM dsos') do_query(query, 'CREATE VIEW symbols_view AS ' 'SELECT ' 'id,' 'name,' '(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,' 'dso_id,' 'sym_start,' 'sym_end,' 'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding' ' FROM symbols') do_query(query, 'CREATE VIEW threads_view AS ' 'SELECT ' 'id,' 'machine_id,' '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,' 'process_id,' 'pid,' 'tid' ' FROM threads') do_query(query, 'CREATE VIEW comm_threads_view AS ' 'SELECT ' 'comm_id,' '(SELECT comm FROM comms WHERE id = comm_id) AS command,' 'thread_id,' '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' '(SELECT tid FROM threads WHERE id = thread_id) AS tid' ' FROM comm_threads') if perf_db_export_calls: do_query(query, 'CREATE VIEW call_paths_view AS ' 'SELECT ' 'c.id,' 'to_hex(c.ip) AS ip,' 'c.symbol_id,' '(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,' '(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,' '(SELECT dso FROM symbols_view WHERE id = c.symbol_id) AS dso_short_name,' 'c.parent_id,' 'to_hex(p.ip) AS parent_ip,' 'p.symbol_id AS parent_symbol_id,' '(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,' '(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,' '(SELECT dso FROM symbols_view WHERE id = p.symbol_id) AS parent_dso_short_name' ' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id') do_query(query, 'CREATE VIEW calls_view AS ' 'SELECT ' 'calls.id,' 'thread_id,' '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' '(SELECT tid FROM threads WHERE id = thread_id) AS tid,' '(SELECT comm FROM comms WHERE id = comm_id) AS command,' 'call_path_id,' 'to_hex(ip) AS ip,' 'symbol_id,' '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,' 'call_time,' 'return_time,' 'return_time - call_time AS elapsed_time,' 'branch_count,' 'call_id,' 'return_id,' 'CASE WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' ELSE \'\' END AS flags,' 'parent_call_path_id' ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id') do_query(query, 'CREATE VIEW samples_view AS ' 'SELECT ' 'id,' 'time,' 'cpu,' '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' '(SELECT tid FROM threads WHERE id = thread_id) AS tid,' '(SELECT comm FROM comms WHERE id = comm_id) AS command,' '(SELECT name FROM selected_events WHERE id = evsel_id) AS event,' 'to_hex(ip) AS ip_hex,' '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,' 'sym_offset,' '(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,' 'to_hex(to_ip) AS to_ip_hex,' '(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,' 'to_sym_offset,' '(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,' '(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,' 'in_tx' ' FROM samples') file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0) file_trailer = "\377\377" def open_output_file(file_name): path_name = output_dir_name + "/" + file_name file = open(path_name, "w+") file.write(file_header) return file def close_output_file(file): file.write(file_trailer) file.close() def copy_output_file_direct(file, table_name): close_output_file(file) sql = "COPY " + table_name + " FROM '" + file.name + "' (FORMAT 'binary')" do_query(query, sql) # Use COPY FROM STDIN because security may prevent postgres from accessing the files directly def copy_output_file(file, table_name): conn = PQconnectdb("dbname = " + dbname) if (PQstatus(conn)): raise Exception("COPY FROM STDIN PQconnectdb failed") file.write(file_trailer) file.seek(0) sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')" res = PQexec(conn, sql) if (PQresultStatus(res) != 4): raise Exception("COPY FROM STDIN PQexec failed") data = file.read(65536) while (len(data)): ret = PQputCopyData(conn, data, len(data)) if (ret != 1): raise Exception("COPY FROM STDIN PQputCopyData failed, error " + str(ret)) data = file.read(65536) ret = PQputCopyEnd(conn, None) if (ret != 1): raise Exception("COPY FROM STDIN PQputCopyEnd failed, error " + str(ret)) PQfinish(conn) def remove_output_file(file): name = file.name file.close() os.unlink(name) evsel_file = open_output_file("evsel_table.bin") machine_file = open_output_file("machine_table.bin") thread_file = open_output_file("thread_table.bin") comm_file = open_output_file("comm_table.bin") comm_thread_file = open_output_file("comm_thread_table.bin") dso_file = open_output_file("dso_table.bin") symbol_file = open_output_file("symbol_table.bin") branch_type_file = open_output_file("branch_type_table.bin") sample_file = open_output_file("sample_table.bin") if perf_db_export_calls: call_path_file = open_output_file("call_path_table.bin") call_file = open_output_file("call_table.bin") def trace_begin(): print datetime.datetime.today(), "Writing to intermediate files..." # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs evsel_table(0, "unknown") machine_table(0, 0, "unknown") thread_table(0, 0, 0, -1, -1) comm_table(0, "unknown") dso_table(0, 0, "unknown", "unknown", "") symbol_table(0, 0, 0, 0, 0, "unknown") sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) if perf_db_export_calls: call_path_table(0, 0, 0, 0) unhandled_count = 0 def trace_end(): print datetime.datetime.today(), "Copying to database..." copy_output_file(evsel_file, "selected_events") copy_output_file(machine_file, "machines") copy_output_file(thread_file, "threads") copy_output_file(comm_file, "comms") copy_output_file(comm_thread_file, "comm_threads") copy_output_file(dso_file, "dsos") copy_output_file(symbol_file, "symbols") copy_output_file(branch_type_file, "branch_types") copy_output_file(sample_file, "samples") if perf_db_export_calls: copy_output_file(call_path_file, "call_paths") copy_output_file(call_file, "calls") print datetime.datetime.today(), "Removing intermediate files..." remove_output_file(evsel_file) remove_output_file(machine_file) remove_output_file(thread_file) remove_output_file(comm_file) remove_output_file(comm_thread_file) remove_output_file(dso_file) remove_output_file(symbol_file) remove_output_file(branch_type_file) remove_output_file(sample_file) if perf_db_export_calls: remove_output_file(call_path_file) remove_output_file(call_file) os.rmdir(output_dir_name) print datetime.datetime.today(), "Adding primary keys" do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE comms ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE comm_threads ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE dsos ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE symbols ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE branch_types ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE samples ADD PRIMARY KEY (id)') if perf_db_export_calls: do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)') print datetime.datetime.today(), "Adding foreign keys" do_query(query, 'ALTER TABLE threads ' 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),' 'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)') do_query(query, 'ALTER TABLE comm_threads ' 'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),' 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id)') do_query(query, 'ALTER TABLE dsos ' 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id)') do_query(query, 'ALTER TABLE symbols ' 'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id)') do_query(query, 'ALTER TABLE samples ' 'ADD CONSTRAINT evselfk FOREIGN KEY (evsel_id) REFERENCES selected_events (id),' 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),' 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),' 'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),' 'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id),' 'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id),' 'ADD CONSTRAINT todsofk FOREIGN KEY (to_dso_id) REFERENCES dsos (id),' 'ADD CONSTRAINT tosymbolfk FOREIGN KEY (to_symbol_id) REFERENCES symbols (id)') if perf_db_export_calls: do_query(query, 'ALTER TABLE call_paths ' 'ADD CONSTRAINT parentfk FOREIGN KEY (parent_id) REFERENCES call_paths (id),' 'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id)') do_query(query, 'ALTER TABLE calls ' 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),' 'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),' 'ADD CONSTRAINT call_pathfk FOREIGN KEY (call_path_id) REFERENCES call_paths (id),' 'ADD CONSTRAINT callfk FOREIGN KEY (call_id) REFERENCES samples (id),' 'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),' 'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)') do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)') if (unhandled_count): print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events" print datetime.datetime.today(), "Done" def trace_unhandled(event_name, context, event_fields_dict): global unhandled_count unhandled_count += 1 def sched__sched_switch(*x): pass def evsel_table(evsel_id, evsel_name, *x): n = len(evsel_name) fmt = "!hiqi" + str(n) + "s" value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name) evsel_file.write(value) def machine_table(machine_id, pid, root_dir, *x): n = len(root_dir) fmt = "!hiqiii" + str(n) + "s" value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir) machine_file.write(value) def thread_table(thread_id, machine_id, process_id, pid, tid, *x): value = struct.pack("!hiqiqiqiiii", 5, 8, thread_id, 8, machine_id, 8, process_id, 4, pid, 4, tid) thread_file.write(value) def comm_table(comm_id, comm_str, *x): n = len(comm_str) fmt = "!hiqi" + str(n) + "s" value = struct.pack(fmt, 2, 8, comm_id, n, comm_str) comm_file.write(value) def comm_thread_table(comm_thread_id, comm_id, thread_id, *x): fmt = "!hiqiqiq" value = struct.pack(fmt, 3, 8, comm_thread_id, 8, comm_id, 8, thread_id) comm_thread_file.write(value) def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x): n1 = len(short_name) n2 = len(long_name) n3 = len(build_id) fmt = "!hiqiqi" + str(n1) + "si" + str(n2) + "si" + str(n3) + "s" value = struct.pack(fmt, 5, 8, dso_id, 8, machine_id, n1, short_name, n2, long_name, n3, build_id) dso_file.write(value) def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x): n = len(symbol_name) fmt = "!hiqiqiqiqiii" + str(n) + "s" value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name) symbol_file.write(value) def branch_type_table(branch_type, name, *x): n = len(name) fmt = "!hiii" + str(n) + "s" value = struct.pack(fmt, 2, 4, branch_type, n, name) branch_type_file.write(value) def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, *x): if branches: value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiB", 17, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx) else: value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiB", 21, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx) sample_file.write(value) def call_path_table(cp_id, parent_id, symbol_id, ip, *x): fmt = "!hiqiqiqiq" value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip) call_path_file.write(value) def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, *x): fmt = "!hiqiqiqiqiqiqiqiqiqiqii" value = struct.pack(fmt, 11, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags) call_file.write(value)
gpl-2.0
guardicore/monkey
monkey/common/cloud/azure/azure_instance.py
1
2473
import logging import requests import simplejson from common.cloud.environment_names import Environment from common.cloud.instance import CloudInstance from common.common_consts.timeouts import SHORT_REQUEST_TIMEOUT LATEST_AZURE_METADATA_API_VERSION = "2019-04-30" AZURE_METADATA_SERVICE_URL = ( "http://169.254.169.254/metadata/instance?api-version=%s" % LATEST_AZURE_METADATA_API_VERSION ) logger = logging.getLogger(__name__) class AzureInstance(CloudInstance): """ Access to useful information about the current machine if it's an Azure VM. Based on Azure metadata service: https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service """ def is_instance(self): return self._on_azure def get_cloud_provider_name(self) -> Environment: return Environment.AZURE def __init__(self): """ Determines if on Azure and if so, gets some basic metadata on this instance. """ self.instance_name = None self.instance_id = None self.location = None self._on_azure = False try: response = requests.get( AZURE_METADATA_SERVICE_URL, headers={"Metadata": "true"}, timeout=SHORT_REQUEST_TIMEOUT, ) # If not on cloud, the metadata URL is non-routable and the connection will fail. # If on AWS, should get 404 since the metadata service URL is different, # so bool(response) will be false. if response: logger.debug("Trying to parse Azure metadata.") self.try_parse_response(response) else: logger.warning(f"Metadata response not ok: {response.status_code}") except requests.RequestException: logger.debug( "Failed to get response from Azure metadata service: This instance is not on " "Azure." ) def try_parse_response(self, response): try: response_data = response.json() self.instance_name = response_data["compute"]["name"] self.instance_id = response_data["compute"]["vmId"] self.location = response_data["compute"]["location"] self._on_azure = True except (KeyError, simplejson.errors.JSONDecodeError) as e: logger.exception(f"Error while parsing response from Azure metadata service: {e}")
gpl-3.0
PokeHunterProject/pogom-updated
pogom/pgoapi/protos/POGOProtos/Inventory/Item/ItemType_pb2.py
15
3796
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: POGOProtos/Inventory/Item/ItemType.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='POGOProtos/Inventory/Item/ItemType.proto', package='POGOProtos.Inventory.Item', syntax='proto3', serialized_pb=_b('\n(POGOProtos/Inventory/Item/ItemType.proto\x12\x19POGOProtos.Inventory.Item*\xb2\x02\n\x08ItemType\x12\x12\n\x0eITEM_TYPE_NONE\x10\x00\x12\x16\n\x12ITEM_TYPE_POKEBALL\x10\x01\x12\x14\n\x10ITEM_TYPE_POTION\x10\x02\x12\x14\n\x10ITEM_TYPE_REVIVE\x10\x03\x12\x11\n\rITEM_TYPE_MAP\x10\x04\x12\x14\n\x10ITEM_TYPE_BATTLE\x10\x05\x12\x12\n\x0eITEM_TYPE_FOOD\x10\x06\x12\x14\n\x10ITEM_TYPE_CAMERA\x10\x07\x12\x12\n\x0eITEM_TYPE_DISK\x10\x08\x12\x17\n\x13ITEM_TYPE_INCUBATOR\x10\t\x12\x15\n\x11ITEM_TYPE_INCENSE\x10\n\x12\x16\n\x12ITEM_TYPE_XP_BOOST\x10\x0b\x12\x1f\n\x1bITEM_TYPE_INVENTORY_UPGRADE\x10\x0c\x62\x06proto3') ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _ITEMTYPE = _descriptor.EnumDescriptor( name='ItemType', full_name='POGOProtos.Inventory.Item.ItemType', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='ITEM_TYPE_NONE', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='ITEM_TYPE_POKEBALL', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='ITEM_TYPE_POTION', index=2, number=2, options=None, type=None), _descriptor.EnumValueDescriptor( name='ITEM_TYPE_REVIVE', index=3, number=3, options=None, type=None), _descriptor.EnumValueDescriptor( name='ITEM_TYPE_MAP', index=4, number=4, options=None, type=None), _descriptor.EnumValueDescriptor( name='ITEM_TYPE_BATTLE', index=5, number=5, options=None, type=None), _descriptor.EnumValueDescriptor( name='ITEM_TYPE_FOOD', index=6, number=6, options=None, type=None), _descriptor.EnumValueDescriptor( name='ITEM_TYPE_CAMERA', index=7, number=7, options=None, type=None), _descriptor.EnumValueDescriptor( name='ITEM_TYPE_DISK', index=8, number=8, options=None, type=None), _descriptor.EnumValueDescriptor( name='ITEM_TYPE_INCUBATOR', index=9, number=9, options=None, type=None), _descriptor.EnumValueDescriptor( name='ITEM_TYPE_INCENSE', index=10, number=10, options=None, type=None), _descriptor.EnumValueDescriptor( name='ITEM_TYPE_XP_BOOST', index=11, number=11, options=None, type=None), _descriptor.EnumValueDescriptor( name='ITEM_TYPE_INVENTORY_UPGRADE', index=12, number=12, options=None, type=None), ], containing_type=None, options=None, serialized_start=72, serialized_end=378, ) _sym_db.RegisterEnumDescriptor(_ITEMTYPE) ItemType = enum_type_wrapper.EnumTypeWrapper(_ITEMTYPE) ITEM_TYPE_NONE = 0 ITEM_TYPE_POKEBALL = 1 ITEM_TYPE_POTION = 2 ITEM_TYPE_REVIVE = 3 ITEM_TYPE_MAP = 4 ITEM_TYPE_BATTLE = 5 ITEM_TYPE_FOOD = 6 ITEM_TYPE_CAMERA = 7 ITEM_TYPE_DISK = 8 ITEM_TYPE_INCUBATOR = 9 ITEM_TYPE_INCENSE = 10 ITEM_TYPE_XP_BOOST = 11 ITEM_TYPE_INVENTORY_UPGRADE = 12 DESCRIPTOR.enum_types_by_name['ItemType'] = _ITEMTYPE # @@protoc_insertion_point(module_scope)
mit
git-cola/git-cola
cola/widgets/cfgactions.py
2
10843
from __future__ import division, absolute_import, unicode_literals import os from qtpy import QtCore from qtpy import QtWidgets from qtpy.QtCore import Qt from .. import core from .. import gitcmds from .. import icons from .. import qtutils from ..i18n import N_ from ..interaction import Interaction from . import defs from . import completion from . import standard from .text import LineEdit def install(): Interaction.run_command = staticmethod(run_command) Interaction.confirm_config_action = staticmethod(confirm_config_action) def get_config_actions(context): cfg = context.cfg return cfg.get_guitool_names_and_shortcuts() def confirm_config_action(context, name, opts): dlg = ActionDialog(context, qtutils.active_window(), name, opts) dlg.show() if dlg.exec_() != QtWidgets.QDialog.Accepted: return False rev = dlg.revision() if rev: opts['revision'] = rev args = dlg.args() if args: opts['args'] = args return True def run_command(title, command): """Show a command widget""" view = GitCommandWidget(title, qtutils.active_window()) view.set_command(command) view.show() view.raise_() view.run() view.exec_() return (view.exitstatus, view.out, view.err) class GitCommandWidget(standard.Dialog): """Text viewer that reads the output of a command synchronously""" # Keep us in scope otherwise PyQt kills the widget def __init__(self, title, parent=None): standard.Dialog.__init__(self, parent) self.setWindowTitle(title) if parent is not None: self.setWindowModality(Qt.ApplicationModal) # Construct the process self.proc = QtCore.QProcess(self) self.exitstatus = 0 self.out = '' self.err = '' self.command = [] # Create the text browser self.output_text = QtWidgets.QTextBrowser(self) self.output_text.setAcceptDrops(False) self.output_text.setTabChangesFocus(True) self.output_text.setUndoRedoEnabled(False) self.output_text.setReadOnly(True) self.output_text.setAcceptRichText(False) # Create abort / close buttons # Start with abort disabled - will be enabled when the process is run. self.button_abort = qtutils.create_button(text=N_('Abort'), enabled=False) self.button_close = qtutils.close_button() # Put them in a horizontal layout at the bottom. self.button_box = QtWidgets.QDialogButtonBox(self) self.button_box.addButton( self.button_abort, QtWidgets.QDialogButtonBox.RejectRole ) self.button_box.addButton( self.button_close, QtWidgets.QDialogButtonBox.AcceptRole ) # Connect the signals to the process # pylint: disable=no-member self.proc.readyReadStandardOutput.connect(self.read_stdout) self.proc.readyReadStandardError.connect(self.read_stderr) self.proc.finished.connect(self.proc_finished) self.proc.stateChanged.connect(self.proc_state_changed) qtutils.connect_button(self.button_abort, self.abort) qtutils.connect_button(self.button_close, self.close) self._layout = qtutils.vbox( defs.margin, defs.spacing, self.output_text, self.button_box ) self.setLayout(self._layout) self.resize(720, 420) def set_command(self, command): self.command = command def run(self): """Runs the process""" self.proc.start(self.command[0], self.command[1:]) def read_stdout(self): text = self.read_stream(self.proc.readAllStandardOutput) self.out += text def read_stderr(self): text = self.read_stream(self.proc.readAllStandardError) self.err += text def read_stream(self, fn): data = fn().data() text = core.decode(data) self.append_text(text) return text def append_text(self, text): cursor = self.output_text.textCursor() cursor.movePosition(cursor.End) cursor.insertText(text) cursor.movePosition(cursor.End) self.output_text.setTextCursor(cursor) def abort(self): if self.proc.state() != QtCore.QProcess.NotRunning: # Terminate seems to do nothing in windows self.proc.terminate() # Kill the process. QtCore.QTimer.singleShot(1000, self.proc.kill) def closeEvent(self, event): if self.proc.state() != QtCore.QProcess.NotRunning: # The process is still running, make sure we really want to abort. title = N_('Abort Action') msg = N_( 'An action is still running.\n' 'Terminating it could result in data loss.' ) info_text = N_('Abort the action?') ok_text = N_('Abort Action') if Interaction.confirm( title, msg, info_text, ok_text, default=False, icon=icons.close() ): self.abort() event.accept() else: event.ignore() else: event.accept() return standard.Dialog.closeEvent(self, event) def proc_state_changed(self, newstate): # State of process has changed - change the abort button state. if newstate == QtCore.QProcess.NotRunning: self.button_abort.setEnabled(False) else: self.button_abort.setEnabled(True) def proc_finished(self, status): self.exitstatus = status class ActionDialog(standard.Dialog): VALUES = {} def __init__(self, context, parent, name, opts): standard.Dialog.__init__(self, parent) self.context = context self.action_name = name self.opts = opts try: values = self.VALUES[name] except KeyError: values = self.VALUES[name] = {} self.setWindowModality(Qt.ApplicationModal) title = opts.get('title') if title: self.setWindowTitle(os.path.expandvars(title)) self.prompt = QtWidgets.QLabel() prompt = opts.get('prompt') if prompt: self.prompt.setText(os.path.expandvars(prompt)) self.argslabel = QtWidgets.QLabel() if 'argprompt' not in opts or opts.get('argprompt') is True: argprompt = N_('Arguments') else: argprompt = opts.get('argprompt') self.argslabel.setText(argprompt) self.argstxt = LineEdit() if self.opts.get('argprompt'): try: # Remember the previous value saved_value = values['argstxt'] self.argstxt.setText(saved_value) except KeyError: pass else: self.argslabel.setMinimumSize(10, 10) self.argstxt.setMinimumSize(10, 10) self.argstxt.hide() self.argslabel.hide() revs = ( (N_('Local Branch'), gitcmds.branch_list(context, remote=False)), (N_('Tracking Branch'), gitcmds.branch_list(context, remote=True)), (N_('Tag'), gitcmds.tag_list(context)), ) if 'revprompt' not in opts or opts.get('revprompt') is True: revprompt = N_('Revision') else: revprompt = opts.get('revprompt') self.revselect = RevisionSelector(context, self, revs) self.revselect.set_revision_label(revprompt) if not opts.get('revprompt'): self.revselect.hide() # Close/Run buttons self.closebtn = qtutils.close_button() self.runbtn = qtutils.create_button( text=N_('Run'), default=True, icon=icons.ok() ) self.argslayt = qtutils.hbox( defs.margin, defs.spacing, self.argslabel, self.argstxt ) self.btnlayt = qtutils.hbox( defs.margin, defs.spacing, qtutils.STRETCH, self.closebtn, self.runbtn ) self.layt = qtutils.vbox( defs.margin, defs.spacing, self.prompt, self.argslayt, self.revselect, self.btnlayt, ) self.setLayout(self.layt) # pylint: disable=no-member self.argstxt.textChanged.connect(self._argstxt_changed) qtutils.connect_button(self.closebtn, self.reject) qtutils.connect_button(self.runbtn, self.accept) # Widen the dialog by default self.resize(666, self.height()) def revision(self): return self.revselect.revision() def args(self): return self.argstxt.text() def _argstxt_changed(self, value): """Store the argstxt value so that we can remember it between calls""" self.VALUES[self.action_name]['argstxt'] = value class RevisionSelector(QtWidgets.QWidget): def __init__(self, context, parent, revs): QtWidgets.QWidget.__init__(self, parent) self.context = context self._revs = revs self._revdict = dict(revs) self._rev_label = QtWidgets.QLabel(self) self._revision = completion.GitRefLineEdit(context, parent=self) # Create the radio buttons radio_btns = [] self._radio_btns = {} for label, rev_list in self._revs: radio = qtutils.radio(text=label) radio.setObjectName(label) qtutils.connect_button(radio, self._set_revision_list) radio_btns.append(radio) self._radio_btns[label] = radio radio_btns.append(qtutils.STRETCH) self._rev_list = QtWidgets.QListWidget() label, rev_list = self._revs[0] self._radio_btns[label].setChecked(True) qtutils.set_items(self._rev_list, rev_list) self._rev_layt = qtutils.hbox( defs.no_margin, defs.spacing, self._rev_label, self._revision ) self._radio_layt = qtutils.hbox(defs.margin, defs.spacing, *radio_btns) self._layt = qtutils.vbox( defs.no_margin, defs.spacing, self._rev_layt, self._radio_layt, self._rev_list, ) self.setLayout(self._layt) # pylint: disable=no-member self._rev_list.itemSelectionChanged.connect(self.selection_changed) def revision(self): return self._revision.text() def set_revision_label(self, txt): self._rev_label.setText(txt) def _set_revision_list(self): sender = self.sender().objectName() revs = self._revdict[sender] qtutils.set_items(self._rev_list, revs) def selection_changed(self): items = self._rev_list.selectedItems() if not items: return self._revision.setText(items[0].text())
gpl-2.0
proxysh/Safejumper-for-Mac
buildlinux/env32/lib/python2.7/site-packages/twisted/mail/test/test_smtp.py
9
62133
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Test cases for twisted.mail.smtp module. """ import inspect from zope.interface import implementer, directlyProvides from twisted.python.util import LineLog from twisted.trial import unittest from twisted.protocols import basic, loopback from twisted.mail import smtp from twisted.internet import defer, protocol, reactor, interfaces from twisted.internet import address, error, task from twisted.test.proto_helpers import MemoryReactor, StringTransport from twisted import cred import twisted.cred.error import twisted.cred.portal import twisted.cred.checkers import twisted.cred.credentials from twisted.cred.portal import IRealm, Portal from twisted.cred.checkers import ICredentialsChecker, AllowAnonymousAccess from twisted.cred.credentials import IAnonymous from twisted.cred.error import UnauthorizedLogin from twisted.mail import imap4 from twisted.mail._cred import LOGINCredentials try: from twisted.test.ssl_helpers import ClientTLSContext, ServerTLSContext except ImportError: sslSkip = "OpenSSL not present" else: sslSkip = None import re try: from cStringIO import StringIO except ImportError: from StringIO import StringIO def spameater(*spam, **eggs): return None @implementer(smtp.IMessage) class BrokenMessage(object): """ L{BrokenMessage} is an L{IMessage} which raises an unexpected exception from its C{eomReceived} method. This is useful for creating a server which can be used to test client retry behavior. """ def __init__(self, user): pass def lineReceived(self, line): pass def eomReceived(self): raise RuntimeError("Some problem, delivery is failing.") def connectionLost(self): pass class DummyMessage(object): """ L{BrokenMessage} is an L{IMessage} which saves the message delivered to it to its domain object. @ivar domain: A L{DummyDomain} which will be used to store the message once it is received. """ def __init__(self, domain, user): self.domain = domain self.user = user self.buffer = [] def lineReceived(self, line): # Throw away the generated Received: header if not re.match('Received: From yyy.com \(\[.*\]\) by localhost;', line): self.buffer.append(line) def eomReceived(self): message = '\n'.join(self.buffer) + '\n' self.domain.messages[self.user.dest.local].append(message) deferred = defer.Deferred() deferred.callback("saved") return deferred class DummyDomain(object): """ L{DummyDomain} is an L{IDomain} which keeps track of messages delivered to it in memory. """ def __init__(self, names): self.messages = {} for name in names: self.messages[name] = [] def exists(self, user): if user.dest.local in self.messages: return defer.succeed(lambda: DummyMessage(self, user)) return defer.fail(smtp.SMTPBadRcpt(user)) mail = '''\ Subject: hello Goodbye ''' class MyClient: def __init__(self, messageInfo=None): if messageInfo is None: messageInfo = ( 'moshez@foo.bar', ['moshez@foo.bar'], StringIO(mail)) self._sender = messageInfo[0] self._recipient = messageInfo[1] self._data = messageInfo[2] def getMailFrom(self): return self._sender def getMailTo(self): return self._recipient def getMailData(self): return self._data def sendError(self, exc): self._error = exc def sentMail(self, code, resp, numOk, addresses, log): # Prevent another mail from being sent. self._sender = None self._recipient = None self._data = None class MySMTPClient(MyClient, smtp.SMTPClient): def __init__(self, messageInfo=None): smtp.SMTPClient.__init__(self, 'foo.baz') MyClient.__init__(self, messageInfo) class MyESMTPClient(MyClient, smtp.ESMTPClient): def __init__(self, secret = '', contextFactory = None): smtp.ESMTPClient.__init__(self, secret, contextFactory, 'foo.baz') MyClient.__init__(self) class LoopbackMixin: def loopback(self, server, client): return loopback.loopbackTCP(server, client) class FakeSMTPServer(basic.LineReceiver): clientData = [ '220 hello', '250 nice to meet you', '250 great', '250 great', '354 go on, lad' ] def connectionMade(self): self.buffer = [] self.clientData = self.clientData[:] self.clientData.reverse() self.sendLine(self.clientData.pop()) def lineReceived(self, line): self.buffer.append(line) if line == "QUIT": self.transport.write("221 see ya around\r\n") self.transport.loseConnection() elif line == ".": self.transport.write("250 gotcha\r\n") elif line == "RSET": self.transport.loseConnection() if self.clientData: self.sendLine(self.clientData.pop()) class SMTPClientTests(unittest.TestCase, LoopbackMixin): """ Tests for L{smtp.SMTPClient}. """ def test_timeoutConnection(self): """ L{smtp.SMTPClient.timeoutConnection} calls the C{sendError} hook with a fatal L{SMTPTimeoutError} with the current line log. """ errors = [] client = MySMTPClient() client.sendError = errors.append client.makeConnection(StringTransport()) client.lineReceived("220 hello") client.timeoutConnection() self.assertIsInstance(errors[0], smtp.SMTPTimeoutError) self.assertTrue(errors[0].isFatal) self.assertEqual( str(errors[0]), "Timeout waiting for SMTP server response\n" "<<< 220 hello\n" ">>> HELO foo.baz\n") expected_output = [ 'HELO foo.baz', 'MAIL FROM:<moshez@foo.bar>', 'RCPT TO:<moshez@foo.bar>', 'DATA', 'Subject: hello', '', 'Goodbye', '.', 'RSET' ] def test_messages(self): """ L{smtp.SMTPClient} sends I{HELO}, I{MAIL FROM}, I{RCPT TO}, and I{DATA} commands based on the return values of its C{getMailFrom}, C{getMailTo}, and C{getMailData} methods. """ client = MySMTPClient() server = FakeSMTPServer() d = self.loopback(server, client) d.addCallback(lambda x : self.assertEqual(server.buffer, self.expected_output)) return d def test_transferError(self): """ If there is an error while producing the message body to the connection, the C{sendError} callback is invoked. """ client = MySMTPClient( ('alice@example.com', ['bob@example.com'], StringIO("foo"))) transport = StringTransport() client.makeConnection(transport) client.dataReceived( '220 Ok\r\n' # Greeting '250 Ok\r\n' # EHLO response '250 Ok\r\n' # MAIL FROM response '250 Ok\r\n' # RCPT TO response '354 Ok\r\n' # DATA response ) # Sanity check - a pull producer should be registered now. self.assertNotIdentical(transport.producer, None) self.assertFalse(transport.streaming) # Now stop the producer prematurely, meaning the message was not sent. transport.producer.stopProducing() # The sendError hook should have been invoked as a result. self.assertIsInstance(client._error, Exception) def test_sendFatalError(self): """ If L{smtp.SMTPClient.sendError} is called with an L{SMTPClientError} which is fatal, it disconnects its transport without writing anything more to it. """ client = smtp.SMTPClient(None) transport = StringTransport() client.makeConnection(transport) client.sendError(smtp.SMTPClientError(123, "foo", isFatal=True)) self.assertEqual(transport.value(), "") self.assertTrue(transport.disconnecting) def test_sendNonFatalError(self): """ If L{smtp.SMTPClient.sendError} is called with an L{SMTPClientError} which is not fatal, it sends C{"QUIT"} and waits for the server to close the connection. """ client = smtp.SMTPClient(None) transport = StringTransport() client.makeConnection(transport) client.sendError(smtp.SMTPClientError(123, "foo", isFatal=False)) self.assertEqual(transport.value(), "QUIT\r\n") self.assertFalse(transport.disconnecting) def test_sendOtherError(self): """ If L{smtp.SMTPClient.sendError} is called with an exception which is not an L{SMTPClientError}, it disconnects its transport without writing anything more to it. """ client = smtp.SMTPClient(None) transport = StringTransport() client.makeConnection(transport) client.sendError(Exception("foo")) self.assertEqual(transport.value(), "") self.assertTrue(transport.disconnecting) class DummySMTPMessage: def __init__(self, protocol, users): self.protocol = protocol self.users = users self.buffer = [] def lineReceived(self, line): self.buffer.append(line) def eomReceived(self): message = '\n'.join(self.buffer) + '\n' helo, origin = self.users[0].helo[0], str(self.users[0].orig) recipients = [] for user in self.users: recipients.append(str(user)) self.protocol.message[tuple(recipients)] = (helo, origin, recipients, message) return defer.succeed("saved") class DummyProto: def connectionMade(self): self.dummyMixinBase.connectionMade(self) self.message = {} def receivedHeader(*spam): return None def validateTo(self, user): self.delivery = SimpleDelivery(None) return lambda: DummySMTPMessage(self, [user]) def validateFrom(self, helo, origin): return origin class DummySMTP(DummyProto, smtp.SMTP): dummyMixinBase = smtp.SMTP class DummyESMTP(DummyProto, smtp.ESMTP): dummyMixinBase = smtp.ESMTP class AnotherTestCase: serverClass = None clientClass = None messages = [ ('foo.com', 'moshez@foo.com', ['moshez@bar.com'], 'moshez@foo.com', ['moshez@bar.com'], '''\ From: Moshe To: Moshe Hi, how are you? '''), ('foo.com', 'tttt@rrr.com', ['uuu@ooo', 'yyy@eee'], 'tttt@rrr.com', ['uuu@ooo', 'yyy@eee'], '''\ Subject: pass ..rrrr.. '''), ('foo.com', '@this,@is,@ignored:foo@bar.com', ['@ignore,@this,@too:bar@foo.com'], 'foo@bar.com', ['bar@foo.com'], '''\ Subject: apa To: foo 123 . 456 '''), ] data = [ ('', '220.*\r\n$', None, None), ('HELO foo.com\r\n', '250.*\r\n$', None, None), ('RSET\r\n', '250.*\r\n$', None, None), ] for helo_, from_, to_, realfrom, realto, msg in messages: data.append(('MAIL FROM:<%s>\r\n' % from_, '250.*\r\n', None, None)) for rcpt in to_: data.append(('RCPT TO:<%s>\r\n' % rcpt, '250.*\r\n', None, None)) data.append(('DATA\r\n','354.*\r\n', msg, ('250.*\r\n', (helo_, realfrom, realto, msg)))) def test_buffer(self): """ Exercise a lot of the SMTP client code. This is a "shotgun" style unit test. It does a lot of things and hopes that something will go really wrong if it is going to go wrong. This test should be replaced with a suite of nicer tests. """ transport = StringTransport() a = self.serverClass() class fooFactory: domain = 'foo.com' a.factory = fooFactory() a.makeConnection(transport) for (send, expect, msg, msgexpect) in self.data: if send: a.dataReceived(send) data = transport.value() transport.clear() if not re.match(expect, data): raise AssertionError(send, expect, data) if data[:3] == '354': for line in msg.splitlines(): if line and line[0] == '.': line = '.' + line a.dataReceived(line + '\r\n') a.dataReceived('.\r\n') # Special case for DATA. Now we want a 250, and then # we compare the messages data = transport.value() transport.clear() resp, msgdata = msgexpect if not re.match(resp, data): raise AssertionError(resp, data) for recip in msgdata[2]: expected = list(msgdata[:]) expected[2] = [recip] self.assertEqual( a.message[(recip,)], tuple(expected) ) a.setTimeout(None) class AnotherESMTPTests(AnotherTestCase, unittest.TestCase): serverClass = DummyESMTP clientClass = MyESMTPClient class AnotherSMTPTests(AnotherTestCase, unittest.TestCase): serverClass = DummySMTP clientClass = MySMTPClient @implementer(cred.checkers.ICredentialsChecker) class DummyChecker: users = { 'testuser': 'testpassword' } credentialInterfaces = (cred.credentials.IUsernamePassword, cred.credentials.IUsernameHashedPassword) def requestAvatarId(self, credentials): return defer.maybeDeferred( credentials.checkPassword, self.users[credentials.username] ).addCallback(self._cbCheck, credentials.username) def _cbCheck(self, result, username): if result: return username raise cred.error.UnauthorizedLogin() @implementer(smtp.IMessageDelivery) class SimpleDelivery(object): """ L{SimpleDelivery} is a message delivery factory with no interesting behavior. """ def __init__(self, messageFactory): self._messageFactory = messageFactory def receivedHeader(self, helo, origin, recipients): return None def validateFrom(self, helo, origin): return origin def validateTo(self, user): return lambda: self._messageFactory(user) class DummyRealm: def requestAvatar(self, avatarId, mind, *interfaces): return smtp.IMessageDelivery, SimpleDelivery(None), lambda: None class AuthTests(unittest.TestCase, LoopbackMixin): def test_crammd5Auth(self): """ L{ESMTPClient} can authenticate using the I{CRAM-MD5} SASL mechanism. @see: U{http://tools.ietf.org/html/rfc2195} """ realm = DummyRealm() p = cred.portal.Portal(realm) p.registerChecker(DummyChecker()) server = DummyESMTP({'CRAM-MD5': cred.credentials.CramMD5Credentials}) server.portal = p client = MyESMTPClient('testpassword') cAuth = smtp.CramMD5ClientAuthenticator('testuser') client.registerAuthenticator(cAuth) d = self.loopback(server, client) d.addCallback(lambda x : self.assertEqual(server.authenticated, 1)) return d def test_loginAuth(self): """ L{ESMTPClient} can authenticate using the I{LOGIN} SASL mechanism. @see: U{http://sepp.oetiker.ch/sasl-2.1.19-ds/draft-murchison-sasl-login-00.txt} """ realm = DummyRealm() p = cred.portal.Portal(realm) p.registerChecker(DummyChecker()) server = DummyESMTP({b'LOGIN': LOGINCredentials}) server.portal = p client = MyESMTPClient('testpassword') cAuth = smtp.LOGINAuthenticator('testuser') client.registerAuthenticator(cAuth) d = self.loopback(server, client) d.addCallback(lambda x: self.assertTrue(server.authenticated)) return d def test_loginAgainstWeirdServer(self): """ When communicating with a server which implements the I{LOGIN} SASL mechanism using C{"Username:"} as the challenge (rather than C{"User Name\\0"}), L{ESMTPClient} can still authenticate successfully using the I{LOGIN} mechanism. """ realm = DummyRealm() p = cred.portal.Portal(realm) p.registerChecker(DummyChecker()) server = DummyESMTP({'LOGIN': smtp.LOGINCredentials}) server.portal = p client = MyESMTPClient('testpassword') cAuth = smtp.LOGINAuthenticator('testuser') client.registerAuthenticator(cAuth) d = self.loopback(server, client) d.addCallback(lambda x: self.assertTrue(server.authenticated)) return d class SMTPHelperTests(unittest.TestCase): def testMessageID(self): d = {} for i in range(1000): m = smtp.messageid('testcase') self.assertFalse(m in d) d[m] = None def testQuoteAddr(self): cases = [ ['user@host.name', '<user@host.name>'], ['"User Name" <user@host.name>', '<user@host.name>'], [smtp.Address('someguy@someplace'), '<someguy@someplace>'], ['', '<>'], [smtp.Address(''), '<>'], ] for (c, e) in cases: self.assertEqual(smtp.quoteaddr(c), e) def testUser(self): u = smtp.User('user@host', 'helo.host.name', None, None) self.assertEqual(str(u), 'user@host') def testXtextEncoding(self): cases = [ ('Hello world', 'Hello+20world'), ('Hello+world', 'Hello+2Bworld'), ('\0\1\2\3\4\5', '+00+01+02+03+04+05'), ('e=mc2@example.com', 'e+3Dmc2@example.com') ] for (case, expected) in cases: self.assertEqual(smtp.xtext_encode(case), (expected, len(case))) self.assertEqual(case.encode('xtext'), expected) self.assertEqual( smtp.xtext_decode(expected), (case, len(expected))) self.assertEqual(expected.decode('xtext'), case) def test_encodeWithErrors(self): """ Specifying an error policy to C{unicode.encode} with the I{xtext} codec should produce the same result as not specifying the error policy. """ text = u'Hello world' self.assertEqual( smtp.xtext_encode(text, 'strict'), (text.encode('xtext'), len(text))) self.assertEqual( text.encode('xtext', 'strict'), text.encode('xtext')) def test_decodeWithErrors(self): """ Similar to L{test_encodeWithErrors}, but for C{str.decode}. """ bytes = 'Hello world' self.assertEqual( smtp.xtext_decode(bytes, 'strict'), (bytes.decode('xtext'), len(bytes))) self.assertEqual( bytes.decode('xtext', 'strict'), bytes.decode('xtext')) class NoticeTLSClient(MyESMTPClient): tls = False def esmtpState_starttls(self, code, resp): MyESMTPClient.esmtpState_starttls(self, code, resp) self.tls = True class TLSTests(unittest.TestCase, LoopbackMixin): if sslSkip is not None: skip = sslSkip def testTLS(self): clientCTX = ClientTLSContext() serverCTX = ServerTLSContext() client = NoticeTLSClient(contextFactory=clientCTX) server = DummyESMTP(contextFactory=serverCTX) def check(ignored): self.assertEqual(client.tls, True) self.assertEqual(server.startedTLS, True) return self.loopback(server, client).addCallback(check) if not interfaces.IReactorSSL.providedBy(reactor): for case in (TLSTests,): case.skip = "Reactor doesn't support SSL" class EmptyLineTests(unittest.TestCase): def test_emptyLineSyntaxError(self): """ If L{smtp.SMTP} receives an empty line, it responds with a 500 error response code and a message about a syntax error. """ proto = smtp.SMTP() transport = StringTransport() proto.makeConnection(transport) proto.lineReceived('') proto.setTimeout(None) out = transport.value().splitlines() self.assertEqual(len(out), 2) self.assertTrue(out[0].startswith('220')) self.assertEqual(out[1], "500 Error: bad syntax") class TimeoutTests(unittest.TestCase, LoopbackMixin): """ Check that SMTP client factories correctly use the timeout. """ def _timeoutTest(self, onDone, clientFactory): """ Connect the clientFactory, and check the timeout on the request. """ clock = task.Clock() client = clientFactory.buildProtocol( address.IPv4Address('TCP', 'example.net', 25)) client.callLater = clock.callLater t = StringTransport() client.makeConnection(t) t.protocol = client def check(ign): self.assertEqual(clock.seconds(), 0.5) d = self.assertFailure(onDone, smtp.SMTPTimeoutError ).addCallback(check) # The first call should not trigger the timeout clock.advance(0.1) # But this one should clock.advance(0.4) return d def test_SMTPClient(self): """ Test timeout for L{smtp.SMTPSenderFactory}: the response L{Deferred} should be errback with a L{smtp.SMTPTimeoutError}. """ onDone = defer.Deferred() clientFactory = smtp.SMTPSenderFactory( 'source@address', 'recipient@address', StringIO("Message body"), onDone, retries=0, timeout=0.5) return self._timeoutTest(onDone, clientFactory) def test_ESMTPClient(self): """ Test timeout for L{smtp.ESMTPSenderFactory}: the response L{Deferred} should be errback with a L{smtp.SMTPTimeoutError}. """ onDone = defer.Deferred() clientFactory = smtp.ESMTPSenderFactory( 'username', 'password', 'source@address', 'recipient@address', StringIO("Message body"), onDone, retries=0, timeout=0.5) return self._timeoutTest(onDone, clientFactory) def test_resetTimeoutWhileSending(self): """ The timeout is not allowed to expire after the server has accepted a DATA command and the client is actively sending data to it. """ class SlowFile: """ A file-like which returns one byte from each read call until the specified number of bytes have been returned. """ def __init__(self, size): self._size = size def read(self, max=None): if self._size: self._size -= 1 return 'x' return '' failed = [] onDone = defer.Deferred() onDone.addErrback(failed.append) clientFactory = smtp.SMTPSenderFactory( 'source@address', 'recipient@address', SlowFile(1), onDone, retries=0, timeout=3) clientFactory.domain = "example.org" clock = task.Clock() client = clientFactory.buildProtocol( address.IPv4Address('TCP', 'example.net', 25)) client.callLater = clock.callLater transport = StringTransport() client.makeConnection(transport) client.dataReceived( "220 Ok\r\n" # Greet the client "250 Ok\r\n" # Respond to HELO "250 Ok\r\n" # Respond to MAIL FROM "250 Ok\r\n" # Respond to RCPT TO "354 Ok\r\n" # Respond to DATA ) # Now the client is producing data to the server. Any time # resumeProducing is called on the producer, the timeout should be # extended. First, a sanity check. This test is only written to # handle pull producers. self.assertNotIdentical(transport.producer, None) self.assertFalse(transport.streaming) # Now, allow 2 seconds (1 less than the timeout of 3 seconds) to # elapse. clock.advance(2) # The timeout has not expired, so the failure should not have happened. self.assertEqual(failed, []) # Let some bytes be produced, extending the timeout. Then advance the # clock some more and verify that the timeout still hasn't happened. transport.producer.resumeProducing() clock.advance(2) self.assertEqual(failed, []) # The file has been completely produced - the next resume producing # finishes the upload, successfully. transport.producer.resumeProducing() client.dataReceived("250 Ok\r\n") self.assertEqual(failed, []) # Verify that the client actually did send the things expected. self.assertEqual( transport.value(), "HELO example.org\r\n" "MAIL FROM:<source@address>\r\n" "RCPT TO:<recipient@address>\r\n" "DATA\r\n" "x\r\n" ".\r\n" # This RSET is just an implementation detail. It's nice, but this # test doesn't really care about it. "RSET\r\n") class MultipleDeliveryFactorySMTPServerFactory(protocol.ServerFactory): """ L{MultipleDeliveryFactorySMTPServerFactory} creates SMTP server protocol instances with message delivery factory objects supplied to it. Each factory is used for one connection and then discarded. Factories are used in the order they are supplied. """ def __init__(self, messageFactories): self._messageFactories = messageFactories def buildProtocol(self, addr): p = protocol.ServerFactory.buildProtocol(self, addr) p.delivery = SimpleDelivery(self._messageFactories.pop(0)) return p class SMTPSenderFactoryTests(unittest.TestCase): """ Tests for L{smtp.SMTPSenderFactory}. """ def test_removeCurrentProtocolWhenClientConnectionLost(self): """ L{smtp.SMTPSenderFactory} removes the current protocol when the client connection is lost. """ reactor = MemoryReactor() sentDeferred = defer.Deferred() clientFactory = smtp.SMTPSenderFactory( "source@address", "recipient@address", StringIO("message"), sentDeferred) connector = reactor.connectTCP("localhost", 25, clientFactory) clientFactory.buildProtocol(None) clientFactory.clientConnectionLost(connector, error.ConnectionDone("Bye.")) self.assertEqual(clientFactory.currentProtocol, None) def test_removeCurrentProtocolWhenClientConnectionFailed(self): """ L{smtp.SMTPSenderFactory} removes the current protocol when the client connection is failed. """ reactor = MemoryReactor() sentDeferred = defer.Deferred() clientFactory = smtp.SMTPSenderFactory( "source@address", "recipient@address", StringIO("message"), sentDeferred) connector = reactor.connectTCP("localhost", 25, clientFactory) clientFactory.buildProtocol(None) clientFactory.clientConnectionFailed(connector, error.ConnectionDone("Bye.")) self.assertEqual(clientFactory.currentProtocol, None) class SMTPSenderFactoryRetryTests(unittest.TestCase): """ Tests for the retry behavior of L{smtp.SMTPSenderFactory}. """ def test_retryAfterDisconnect(self): """ If the protocol created by L{SMTPSenderFactory} loses its connection before receiving confirmation of message delivery, it reconnects and tries to deliver the message again. """ recipient = 'alice' message = "some message text" domain = DummyDomain([recipient]) class CleanSMTP(smtp.SMTP): """ An SMTP subclass which ensures that its transport will be disconnected before the test ends. """ def makeConnection(innerSelf, transport): self.addCleanup(transport.loseConnection) smtp.SMTP.makeConnection(innerSelf, transport) # Create a server which will fail the first message deliver attempt to # it with a 500 and a disconnect, but which will accept a message # delivered over the 2nd connection to it. serverFactory = MultipleDeliveryFactorySMTPServerFactory([ BrokenMessage, lambda user: DummyMessage(domain, user)]) serverFactory.protocol = CleanSMTP serverPort = reactor.listenTCP(0, serverFactory, interface='127.0.0.1') serverHost = serverPort.getHost() self.addCleanup(serverPort.stopListening) # Set up a client to try to deliver a message to the above created # server. sentDeferred = defer.Deferred() clientFactory = smtp.SMTPSenderFactory( "bob@example.org", recipient + "@example.com", StringIO(message), sentDeferred) clientFactory.domain = "example.org" clientConnector = reactor.connectTCP( serverHost.host, serverHost.port, clientFactory) self.addCleanup(clientConnector.disconnect) def cbSent(ignored): """ Verify that the message was successfully delivered and flush the error which caused the first attempt to fail. """ self.assertEqual( domain.messages, {recipient: ["\n%s\n" % (message,)]}) # Flush the RuntimeError that BrokenMessage caused to be logged. self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 1) sentDeferred.addCallback(cbSent) return sentDeferred @implementer(IRealm) class SingletonRealm(object): """ Trivial realm implementation which is constructed with an interface and an avatar and returns that avatar when asked for that interface. """ def __init__(self, interface, avatar): self.interface = interface self.avatar = avatar def requestAvatar(self, avatarId, mind, *interfaces): for iface in interfaces: if iface is self.interface: return iface, self.avatar, lambda: None class NotImplementedDelivery(object): """ Non-implementation of L{smtp.IMessageDelivery} which only has methods which raise L{NotImplementedError}. Subclassed by various tests to provide the particular behavior being tested. """ def validateFrom(self, helo, origin): raise NotImplementedError("This oughtn't be called in the course of this test.") def validateTo(self, user): raise NotImplementedError("This oughtn't be called in the course of this test.") def receivedHeader(self, helo, origin, recipients): raise NotImplementedError("This oughtn't be called in the course of this test.") class SMTPServerTests(unittest.TestCase): """ Test various behaviors of L{twisted.mail.smtp.SMTP} and L{twisted.mail.smtp.ESMTP}. """ def testSMTPGreetingHost(self, serverClass=smtp.SMTP): """ Test that the specified hostname shows up in the SMTP server's greeting. """ s = serverClass() s.host = "example.com" t = StringTransport() s.makeConnection(t) s.connectionLost(error.ConnectionDone()) self.assertIn("example.com", t.value()) def testSMTPGreetingNotExtended(self): """ Test that the string "ESMTP" does not appear in the SMTP server's greeting since that string strongly suggests the presence of support for various SMTP extensions which are not supported by L{smtp.SMTP}. """ s = smtp.SMTP() t = StringTransport() s.makeConnection(t) s.connectionLost(error.ConnectionDone()) self.assertNotIn("ESMTP", t.value()) def testESMTPGreetingHost(self): """ Similar to testSMTPGreetingHost, but for the L{smtp.ESMTP} class. """ self.testSMTPGreetingHost(smtp.ESMTP) def testESMTPGreetingExtended(self): """ Test that the string "ESMTP" does appear in the ESMTP server's greeting since L{smtp.ESMTP} does support the SMTP extensions which that advertises to the client. """ s = smtp.ESMTP() t = StringTransport() s.makeConnection(t) s.connectionLost(error.ConnectionDone()) self.assertIn("ESMTP", t.value()) def test_SMTPUnknownCommand(self): """ Sending an unimplemented command is responded to with a 500. """ s = smtp.SMTP() t = StringTransport() s.makeConnection(t) s.lineReceived(b"DOAGOODTHING") s.connectionLost(error.ConnectionDone()) self.assertIn("500 Command not implemented", t.value()) def test_acceptSenderAddress(self): """ Test that a C{MAIL FROM} command with an acceptable address is responded to with the correct success code. """ class AcceptanceDelivery(NotImplementedDelivery): """ Delivery object which accepts all senders as valid. """ def validateFrom(self, helo, origin): return origin realm = SingletonRealm(smtp.IMessageDelivery, AcceptanceDelivery()) portal = Portal(realm, [AllowAnonymousAccess()]) proto = smtp.SMTP() proto.portal = portal trans = StringTransport() proto.makeConnection(trans) # Deal with the necessary preliminaries proto.dataReceived('HELO example.com\r\n') trans.clear() # Try to specify our sender address proto.dataReceived('MAIL FROM:<alice@example.com>\r\n') # Clean up the protocol before doing anything that might raise an # exception. proto.connectionLost(error.ConnectionLost()) # Make sure that we received exactly the correct response self.assertEqual( trans.value(), '250 Sender address accepted\r\n') def test_deliveryRejectedSenderAddress(self): """ Test that a C{MAIL FROM} command with an address rejected by a L{smtp.IMessageDelivery} instance is responded to with the correct error code. """ class RejectionDelivery(NotImplementedDelivery): """ Delivery object which rejects all senders as invalid. """ def validateFrom(self, helo, origin): raise smtp.SMTPBadSender(origin) realm = SingletonRealm(smtp.IMessageDelivery, RejectionDelivery()) portal = Portal(realm, [AllowAnonymousAccess()]) proto = smtp.SMTP() proto.portal = portal trans = StringTransport() proto.makeConnection(trans) # Deal with the necessary preliminaries proto.dataReceived('HELO example.com\r\n') trans.clear() # Try to specify our sender address proto.dataReceived('MAIL FROM:<alice@example.com>\r\n') # Clean up the protocol before doing anything that might raise an # exception. proto.connectionLost(error.ConnectionLost()) # Make sure that we received exactly the correct response self.assertEqual( trans.value(), '550 Cannot receive from specified address ' '<alice@example.com>: Sender not acceptable\r\n') @implementer(ICredentialsChecker) def test_portalRejectedSenderAddress(self): """ Test that a C{MAIL FROM} command with an address rejected by an L{smtp.SMTP} instance's portal is responded to with the correct error code. """ class DisallowAnonymousAccess(object): """ Checker for L{IAnonymous} which rejects authentication attempts. """ credentialInterfaces = (IAnonymous,) def requestAvatarId(self, credentials): return defer.fail(UnauthorizedLogin()) realm = SingletonRealm(smtp.IMessageDelivery, NotImplementedDelivery()) portal = Portal(realm, [DisallowAnonymousAccess()]) proto = smtp.SMTP() proto.portal = portal trans = StringTransport() proto.makeConnection(trans) # Deal with the necessary preliminaries proto.dataReceived('HELO example.com\r\n') trans.clear() # Try to specify our sender address proto.dataReceived('MAIL FROM:<alice@example.com>\r\n') # Clean up the protocol before doing anything that might raise an # exception. proto.connectionLost(error.ConnectionLost()) # Make sure that we received exactly the correct response self.assertEqual( trans.value(), '550 Cannot receive from specified address ' '<alice@example.com>: Sender not acceptable\r\n') def test_portalRejectedAnonymousSender(self): """ Test that a C{MAIL FROM} command issued without first authenticating when a portal has been configured to disallow anonymous logins is responded to with the correct error code. """ realm = SingletonRealm(smtp.IMessageDelivery, NotImplementedDelivery()) portal = Portal(realm, []) proto = smtp.SMTP() proto.portal = portal trans = StringTransport() proto.makeConnection(trans) # Deal with the necessary preliminaries proto.dataReceived('HELO example.com\r\n') trans.clear() # Try to specify our sender address proto.dataReceived('MAIL FROM:<alice@example.com>\r\n') # Clean up the protocol before doing anything that might raise an # exception. proto.connectionLost(error.ConnectionLost()) # Make sure that we received exactly the correct response self.assertEqual( trans.value(), '550 Cannot receive from specified address ' '<alice@example.com>: Unauthenticated senders not allowed\r\n') class ESMTPAuthenticationTests(unittest.TestCase): def assertServerResponse(self, bytes, response): """ Assert that when the given bytes are delivered to the ESMTP server instance, it responds with the indicated lines. @type bytes: str @type response: list of str """ self.transport.clear() self.server.dataReceived(bytes) self.assertEqual( response, self.transport.value().splitlines()) def assertServerAuthenticated(self, loginArgs, username="username", password="password"): """ Assert that a login attempt has been made, that the credentials and interfaces passed to it are correct, and that when the login request is satisfied, a successful response is sent by the ESMTP server instance. @param loginArgs: A C{list} previously passed to L{portalFactory}. """ d, credentials, mind, interfaces = loginArgs.pop() self.assertEqual(loginArgs, []) self.assertTrue(twisted.cred.credentials.IUsernamePassword.providedBy(credentials)) self.assertEqual(credentials.username, username) self.assertTrue(credentials.checkPassword(password)) self.assertIn(smtp.IMessageDeliveryFactory, interfaces) self.assertIn(smtp.IMessageDelivery, interfaces) d.callback((smtp.IMessageDeliveryFactory, None, lambda: None)) self.assertEqual( ["235 Authentication successful."], self.transport.value().splitlines()) def setUp(self): """ Create an ESMTP instance attached to a StringTransport. """ self.server = smtp.ESMTP({ 'LOGIN': imap4.LOGINCredentials}) self.server.host = 'localhost' self.transport = StringTransport( peerAddress=address.IPv4Address('TCP', '127.0.0.1', 12345)) self.server.makeConnection(self.transport) def tearDown(self): """ Disconnect the ESMTP instance to clean up its timeout DelayedCall. """ self.server.connectionLost(error.ConnectionDone()) def portalFactory(self, loginList): class DummyPortal: def login(self, credentials, mind, *interfaces): d = defer.Deferred() loginList.append((d, credentials, mind, interfaces)) return d return DummyPortal() def test_authenticationCapabilityAdvertised(self): """ Test that AUTH is advertised to clients which issue an EHLO command. """ self.transport.clear() self.server.dataReceived('EHLO\r\n') responseLines = self.transport.value().splitlines() self.assertEqual( responseLines[0], "250-localhost Hello 127.0.0.1, nice to meet you") self.assertEqual( responseLines[1], "250 AUTH LOGIN") self.assertEqual(len(responseLines), 2) def test_plainAuthentication(self): """ Test that the LOGIN authentication mechanism can be used """ loginArgs = [] self.server.portal = self.portalFactory(loginArgs) self.server.dataReceived('EHLO\r\n') self.transport.clear() self.assertServerResponse( 'AUTH LOGIN\r\n', ["334 " + "User Name\0".encode('base64').strip()]) self.assertServerResponse( 'username'.encode('base64') + '\r\n', ["334 " + "Password\0".encode('base64').strip()]) self.assertServerResponse( 'password'.encode('base64').strip() + '\r\n', []) self.assertServerAuthenticated(loginArgs) def test_plainAuthenticationEmptyPassword(self): """ Test that giving an empty password for plain auth succeeds. """ loginArgs = [] self.server.portal = self.portalFactory(loginArgs) self.server.dataReceived('EHLO\r\n') self.transport.clear() self.assertServerResponse( 'AUTH LOGIN\r\n', ["334 " + "User Name\0".encode('base64').strip()]) self.assertServerResponse( 'username'.encode('base64') + '\r\n', ["334 " + "Password\0".encode('base64').strip()]) self.assertServerResponse('\r\n', []) self.assertServerAuthenticated(loginArgs, password='') def test_plainAuthenticationInitialResponse(self): """ The response to the first challenge may be included on the AUTH command line. Test that this is also supported. """ loginArgs = [] self.server.portal = self.portalFactory(loginArgs) self.server.dataReceived('EHLO\r\n') self.transport.clear() self.assertServerResponse( 'AUTH LOGIN ' + "username".encode('base64').strip() + '\r\n', ["334 " + "Password\0".encode('base64').strip()]) self.assertServerResponse( 'password'.encode('base64').strip() + '\r\n', []) self.assertServerAuthenticated(loginArgs) def test_abortAuthentication(self): """ Test that a challenge/response sequence can be aborted by the client. """ loginArgs = [] self.server.portal = self.portalFactory(loginArgs) self.server.dataReceived('EHLO\r\n') self.server.dataReceived('AUTH LOGIN\r\n') self.assertServerResponse( '*\r\n', ['501 Authentication aborted']) def test_invalidBase64EncodedResponse(self): """ Test that a response which is not properly Base64 encoded results in the appropriate error code. """ loginArgs = [] self.server.portal = self.portalFactory(loginArgs) self.server.dataReceived('EHLO\r\n') self.server.dataReceived('AUTH LOGIN\r\n') self.assertServerResponse( 'x\r\n', ['501 Syntax error in parameters or arguments']) self.assertEqual(loginArgs, []) def test_invalidBase64EncodedInitialResponse(self): """ Like L{test_invalidBase64EncodedResponse} but for the case of an initial response included with the C{AUTH} command. """ loginArgs = [] self.server.portal = self.portalFactory(loginArgs) self.server.dataReceived('EHLO\r\n') self.assertServerResponse( 'AUTH LOGIN x\r\n', ['501 Syntax error in parameters or arguments']) self.assertEqual(loginArgs, []) def test_unexpectedLoginFailure(self): """ If the L{Deferred} returned by L{Portal.login} fires with an exception of any type other than L{UnauthorizedLogin}, the exception is logged and the client is informed that the authentication attempt has failed. """ loginArgs = [] self.server.portal = self.portalFactory(loginArgs) self.server.dataReceived('EHLO\r\n') self.transport.clear() self.assertServerResponse( 'AUTH LOGIN ' + 'username'.encode('base64').strip() + '\r\n', ['334 ' + 'Password\0'.encode('base64').strip()]) self.assertServerResponse( 'password'.encode('base64').strip() + '\r\n', []) d, credentials, mind, interfaces = loginArgs.pop() d.errback(RuntimeError("Something wrong with the server")) self.assertEqual( '451 Requested action aborted: local error in processing\r\n', self.transport.value()) self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 1) class SMTPClientErrorTests(unittest.TestCase): """ Tests for L{smtp.SMTPClientError}. """ def test_str(self): """ The string representation of a L{SMTPClientError} instance includes the response code and response string. """ err = smtp.SMTPClientError(123, "some text") self.assertEqual(str(err), "123 some text") def test_strWithNegativeCode(self): """ If the response code supplied to L{SMTPClientError} is negative, it is excluded from the string representation. """ err = smtp.SMTPClientError(-1, "foo bar") self.assertEqual(str(err), "foo bar") def test_strWithLog(self): """ If a line log is supplied to L{SMTPClientError}, its contents are included in the string representation of the exception instance. """ log = LineLog(10) log.append("testlog") log.append("secondline") err = smtp.SMTPClientError(100, "test error", log=log.str()) self.assertEqual( str(err), "100 test error\n" "testlog\n" "secondline\n") class SenderMixinSentMailTests(unittest.TestCase): """ Tests for L{smtp.SenderMixin.sentMail}, used in particular by L{smtp.SMTPSenderFactory} and L{smtp.ESMTPSenderFactory}. """ def test_onlyLogFailedAddresses(self): """ L{smtp.SenderMixin.sentMail} adds only the addresses with failing SMTP response codes to the log passed to the factory's errback. """ onDone = self.assertFailure(defer.Deferred(), smtp.SMTPDeliveryError) onDone.addCallback(lambda e: self.assertEqual( e.log, "bob@example.com: 199 Error in sending.\n")) clientFactory = smtp.SMTPSenderFactory( 'source@address', 'recipient@address', StringIO("Message body"), onDone, retries=0, timeout=0.5) client = clientFactory.buildProtocol( address.IPv4Address('TCP', 'example.net', 25)) addresses = [("alice@example.com", 200, "No errors here!"), ("bob@example.com", 199, "Error in sending.")] client.sentMail(199, "Test response", 1, addresses, client.log) return onDone class ESMTPDowngradeTestCase(unittest.TestCase): """ Tests for the ESMTP -> SMTP downgrade functionality in L{smtp.ESMTPClient}. """ def setUp(self): self.clientProtocol = smtp.ESMTPClient( b"testpassword", None, b"testuser") def test_requireHELOFallbackOperates(self): """ If both authentication and transport security are not required, and it is asked for, it will fall back to allowing HELO. """ transport = StringTransport() self.clientProtocol.requireAuthentication = False self.clientProtocol.requireTransportSecurity = False self.clientProtocol.heloFallback = True self.clientProtocol.makeConnection(transport) self.clientProtocol.dataReceived(b"220 localhost\r\n") transport.clear() self.clientProtocol.dataReceived(b"500 not an esmtp server\r\n") self.assertEqual(b"HELO testuser\r\n", transport.value()) def test_requireAuthFailsHELOFallback(self): """ If authentication is required, and HELO fallback is on, HELO fallback must not be honoured, as authentication requires EHLO to succeed. """ transport = StringTransport() self.clientProtocol.requireAuthentication = True self.clientProtocol.requireTransportSecurity = False self.clientProtocol.heloFallback = True self.clientProtocol.makeConnection(transport) self.clientProtocol.dataReceived(b"220 localhost\r\n") transport.clear() self.clientProtocol.dataReceived(b"500 not an esmtp server\r\n") self.assertEqual("QUIT\r\n", transport.value()) def test_requireTLSFailsHELOFallback(self): """ If TLS is required and the connection is insecure, HELO fallback must not be honoured, as STARTTLS requires EHLO to succeed. """ transport = StringTransport() self.clientProtocol.requireAuthentication = False self.clientProtocol.requireTransportSecurity = True self.clientProtocol.heloFallback = True self.clientProtocol.makeConnection(transport) self.clientProtocol.dataReceived(b"220 localhost\r\n") transport.clear() self.clientProtocol.dataReceived(b"500 not an esmtp server\r\n") self.assertEqual(b"QUIT\r\n", transport.value()) def test_requireTLSAndHELOFallbackSucceedsIfOverTLS(self): """ If TLS is provided at the transport level, we can honour the HELO fallback if we're set to require TLS. """ transport = StringTransport() directlyProvides(transport, interfaces.ISSLTransport) self.clientProtocol.requireAuthentication = False self.clientProtocol.requireTransportSecurity = True self.clientProtocol.heloFallback = True self.clientProtocol.makeConnection(transport) self.clientProtocol.dataReceived(b"220 localhost\r\n") transport.clear() self.clientProtocol.dataReceived(b"500 not an esmtp server\r\n") self.assertEqual(b"HELO testuser\r\n", transport.value()) class SSLTestCase(unittest.TestCase): """ Tests for the TLS negotiation done by L{smtp.ESMTPClient}. """ if sslSkip is not None: skip = sslSkip SERVER_GREETING = "220 localhost NO UCE NO UBE NO RELAY PROBES ESMTP\r\n" EHLO_RESPONSE = "250-localhost Hello 127.0.0.1, nice to meet you\r\n" def setUp(self): self.clientProtocol = smtp.ESMTPClient( "testpassword", ClientTLSContext(), "testuser") self.clientProtocol.requireTransportSecurity = True self.clientProtocol.getMailFrom = lambda: "test@example.org" def _requireTransportSecurityOverSSLTest(self, capabilities): """ Verify that when L{smtp.ESMTPClient} connects to a server over a transport providing L{ISSLTransport}, C{requireTransportSecurity} is C{True}, and it is presented with the given capabilities, it will try to send its mail and not first attempt to negotiate TLS using the I{STARTTLS} protocol action. @param capabilities: Bytes to include in the test server's capability response. These must be formatted exactly as required by the protocol, including a line which ends the capability response. @type param: L{bytes} @raise: C{self.failureException} if the behavior of C{self.clientProtocol} is not as described. """ transport = StringTransport() directlyProvides(transport, interfaces.ISSLTransport) self.clientProtocol.makeConnection(transport) # Get the handshake out of the way self.clientProtocol.dataReceived(self.SERVER_GREETING) transport.clear() # Tell the client about the server's capabilities self.clientProtocol.dataReceived(self.EHLO_RESPONSE + capabilities) # The client should now try to send a message - without first trying to # negotiate TLS, since the transport is already secure. self.assertEqual( b"MAIL FROM:<test@example.org>\r\n", transport.value()) def test_requireTransportSecurityOverSSL(self): """ When C{requireTransportSecurity} is C{True} and the client is connected over an SSL transport, mail may be delivered. """ self._requireTransportSecurityOverSSLTest(b"250 AUTH LOGIN\r\n") def test_requireTransportSecurityTLSOffered(self): """ When C{requireTransportSecurity} is C{True} and the client is connected over a non-SSL transport, if the server offers the I{STARTTLS} extension, it is used before mail is delivered. """ transport = StringTransport() self.clientProtocol.makeConnection(transport) # Get the handshake out of the way self.clientProtocol.dataReceived(self.SERVER_GREETING) transport.clear() # Tell the client about the server's capabilities - including STARTTLS self.clientProtocol.dataReceived( self.EHLO_RESPONSE + "250-AUTH LOGIN\r\n" "250 STARTTLS\r\n") # The client should try to start TLS before sending the message. self.assertEqual("STARTTLS\r\n", transport.value()) def test_requireTransportSecurityTLSOfferedOverSSL(self): """ When C{requireTransportSecurity} is C{True} and the client is connected over an SSL transport, if the server offers the I{STARTTLS} extension, it is not used before mail is delivered. """ self._requireTransportSecurityOverSSLTest( b"250-AUTH LOGIN\r\n" b"250 STARTTLS\r\n") def test_requireTransportSecurityTLSNotOffered(self): """ When C{requireTransportSecurity} is C{True} and the client is connected over a non-SSL transport, if the server does not offer the I{STARTTLS} extension, mail is not delivered. """ transport = StringTransport() self.clientProtocol.makeConnection(transport) # Get the handshake out of the way self.clientProtocol.dataReceived(self.SERVER_GREETING) transport.clear() # Tell the client about the server's capabilities - excluding STARTTLS self.clientProtocol.dataReceived( self.EHLO_RESPONSE + "250 AUTH LOGIN\r\n") # The client give up self.assertEqual("QUIT\r\n", transport.value()) def test_esmtpClientTlsModeDeprecationGet(self): """ L{smtp.ESMTPClient.tlsMode} is deprecated. """ val = self.clientProtocol.tlsMode del val warningsShown = self.flushWarnings( offendingFunctions=[self.test_esmtpClientTlsModeDeprecationGet]) self.assertEqual(len(warningsShown), 1) self.assertIdentical( warningsShown[0]['category'], DeprecationWarning) self.assertEqual( warningsShown[0]['message'], "tlsMode attribute of twisted.mail.smtp.ESMTPClient " "is deprecated since Twisted 13.0") def test_esmtpClientTlsModeDeprecationGetAttributeError(self): """ L{smtp.ESMTPClient.__getattr__} raises an attribute error for other attribute names which do not exist. """ self.assertRaises( AttributeError, lambda: self.clientProtocol.doesNotExist) def test_esmtpClientTlsModeDeprecationSet(self): """ L{smtp.ESMTPClient.tlsMode} is deprecated. """ self.clientProtocol.tlsMode = False warningsShown = self.flushWarnings( offendingFunctions=[self.test_esmtpClientTlsModeDeprecationSet]) self.assertEqual(len(warningsShown), 1) self.assertIdentical( warningsShown[0]['category'], DeprecationWarning) self.assertEqual( warningsShown[0]['message'], "tlsMode attribute of twisted.mail.smtp.ESMTPClient " "is deprecated since Twisted 13.0") class AbortableStringTransport(StringTransport): """ A version of L{StringTransport} that supports C{abortConnection}. """ # This should be replaced by a common version in #6530. aborting = False def abortConnection(self): """ A testable version of the C{ITCPTransport.abortConnection} method. Since this is a special case of closing the connection, C{loseConnection} is also called. """ self.aborting = True self.loseConnection() class SendmailTests(unittest.TestCase): """ Tests for L{twisted.mail.smtp.sendmail}. """ def test_defaultReactorIsGlobalReactor(self): """ The default C{reactor} parameter of L{twisted.mail.smtp.sendmail} is L{twisted.internet.reactor}. """ args, varArgs, keywords, defaults = inspect.getargspec(smtp.sendmail) self.assertEqual(reactor, defaults[2]) def test_honorsESMTPArguments(self): """ L{twisted.mail.smtp.sendmail} creates the ESMTP factory with the ESMTP arguments. """ reactor = MemoryReactor() smtp.sendmail("localhost", "source@address", "recipient@address", "message", reactor=reactor, username="foo", password="bar", requireTransportSecurity=True, requireAuthentication=True) factory = reactor.tcpClients[0][2] self.assertEqual(factory._requireTransportSecurity, True) self.assertEqual(factory._requireAuthentication, True) self.assertEqual(factory.username, "foo") self.assertEqual(factory.password, "bar") def test_messageFilePassthrough(self): """ L{twisted.mail.smtp.sendmail} will pass through the message untouched if it is a file-like object. """ reactor = MemoryReactor() messageFile = StringIO(b"File!") smtp.sendmail("localhost", "source@address", "recipient@address", messageFile, reactor=reactor) factory = reactor.tcpClients[0][2] self.assertIs(factory.file, messageFile) def test_messageStringMadeFile(self): """ L{twisted.mail.smtp.sendmail} will turn non-file-like objects (eg. strings) into file-like objects before sending. """ reactor = MemoryReactor() smtp.sendmail("localhost", "source@address", "recipient@address", "message", reactor=reactor) factory = reactor.tcpClients[0][2] messageFile = factory.file messageFile.seek(0) self.assertEqual(messageFile.read(), "message") def test_senderDomainName(self): """ L{twisted.mail.smtp.sendmail} passes through the sender domain name, if provided. """ reactor = MemoryReactor() smtp.sendmail("localhost", "source@address", "recipient@address", "message", reactor=reactor, senderDomainName="foo") factory = reactor.tcpClients[0][2] self.assertEqual(factory.domain, "foo") def test_cancelBeforeConnectionMade(self): """ When a user cancels L{twisted.mail.smtp.sendmail} before the connection is made, the connection is closed by L{twisted.internet.interfaces.IConnector.disconnect}. """ reactor = MemoryReactor() d = smtp.sendmail("localhost", "source@address", "recipient@address", "message", reactor=reactor) d.cancel() self.assertEqual(reactor.connectors[0]._disconnected, True) failure = self.failureResultOf(d) failure.trap(defer.CancelledError) def test_cancelAfterConnectionMade(self): """ When a user cancels L{twisted.mail.smtp.sendmail} after the connection is made, the connection is closed by L{twisted.internet.interfaces.ITransport.abortConnection}. """ reactor = MemoryReactor() transport = AbortableStringTransport() d = smtp.sendmail("localhost", "source@address", "recipient@address", "message", reactor=reactor) factory = reactor.tcpClients[0][2] p = factory.buildProtocol(None) p.makeConnection(transport) d.cancel() self.assertEqual(transport.aborting, True) self.assertEqual(transport.disconnecting, True) failure = self.failureResultOf(d) failure.trap(defer.CancelledError)
gpl-2.0
blaquee/volatility
volatility/plugins/malware/idt.py
13
14640
# Volatility # Copyright (C) 2007-2013 Volatility Foundation # Copyright (c) 2010, 2011, 2012 Michael Ligh <michael.ligh@mnin.org> # # This file is part of Volatility. # # Volatility is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Volatility is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Volatility. If not, see <http://www.gnu.org/licenses/>. # import volatility.utils as utils import volatility.obj as obj import volatility.plugins.common as common import volatility.win32.modules as modules import volatility.win32.tasks as tasks import volatility.debug as debug import volatility.plugins.malware.malfind as malfind import volatility.exceptions as exceptions from volatility.renderers import TreeGrid from volatility.renderers.basic import Address, Hex #-------------------------------------------------------------------------------- # constants #-------------------------------------------------------------------------------- GDT_DESCRIPTORS = dict(enumerate([ "Data RO", "Data RO Ac", "Data RW", "Data RW Ac", "Data RO E", "Data RO EA", "Data RW E", "Data RW EA", "Code EO", "Code EO Ac", "Code RE", "Code RE Ac", "Code EO C", "Code EO CA", "Code RE C", "Code RE CA", "<Reserved>", "TSS16 Avl", "LDT", "TSS16 Busy", "CallGate16", "TaskGate", "Int Gate16", "TrapGate16", "<Reserved>", "TSS32 Avl", "<Reserved>", "TSS32 Busy", "CallGate32", "<Reserved>", "Int Gate32", "TrapGate32", ])) #-------------------------------------------------------------------------------- # object classes #-------------------------------------------------------------------------------- class _KIDTENTRY(obj.CType): """Class for interrupt descriptors""" @property def Address(self): """Return the address of the IDT entry handler""" if self.ExtendedOffset == 0: return 0 return (self.ExtendedOffset.v() << 16 | self.Offset.v()) class _KGDTENTRY(obj.CType): """A class for GDT entries""" @property def Type(self): """Get a string name of the descriptor type""" flag = self.HighWord.Bits.Type.v() & 1 << 4 typeval = self.HighWord.Bits.Type.v() & ~(1 << 4) if flag == 0: typeval += 16 return GDT_DESCRIPTORS.get(typeval, "UNKNOWN") @property def Base(self): """Get the base (start) of memory for this GDT""" return (self.BaseLow + ((self.HighWord.Bits.BaseMid + (self.HighWord.Bits.BaseHi << 8)) << 16)) @property def Limit(self): """Get the limit (end) of memory for this GDT""" limit = (self.HighWord.Bits.LimitHi.v() << 16) | self.LimitLow.v() if self.HighWord.Bits.Granularity == 1: limit = (limit + 1) * 0x1000 limit -= 1 return limit @property def CallGate(self): """Get the call gate address""" return self.HighWord.v() & 0xffff0000 | self.LimitLow.v() @property def Present(self): """Returns True if the entry is present""" return self.HighWord.Bits.Pres == 1 @property def Granularity(self): """Returns True if page granularity is used. Otherwise returns False indicating byte granularity is used.""" return self.HighWord.Bits.Granularity == 1 @property def Dpl(self): """Returns the descriptor privilege level""" return self.HighWord.Bits.Dpl #-------------------------------------------------------------------------------- # profile modifications #-------------------------------------------------------------------------------- class MalwareIDTGDTx86(obj.ProfileModification): before = ['WindowsObjectClasses', 'WindowsOverlay'] conditions = {'os': lambda x: x == 'windows', 'memory_model': lambda x: x == '32bit'} def modification(self, profile): profile.object_classes.update({ '_KIDTENTRY': _KIDTENTRY, '_KGDTENTRY': _KGDTENTRY, }) profile.merge_overlay({"_KPCR" : [None, {'IDT': [None, ["pointer", ["array", 256, ['_KIDTENTRY']]]], }]}) # Since the real GDT size is read from a register, we'll just assume # that there are 128 entries (which is normal for most OS) profile.merge_overlay({"_KPCR" : [None, {'GDT': [None, ["pointer", ["array", 128, ['_KGDTENTRY']]]], }]}) #-------------------------------------------------------------------------------- # GDT plugin #-------------------------------------------------------------------------------- class GDT(common.AbstractWindowsCommand): "Display Global Descriptor Table" @staticmethod def is_valid_profile(profile): return (profile.metadata.get('os', 'unknown') == 'windows' and profile.metadata.get('memory_model', '32bit') == '32bit') def calculate(self): addr_space = utils.load_as(self._config) # Currently we only support x86. The x64 does still have a GDT # but hooking is prohibited and results in bugcheck. if not self.is_valid_profile(addr_space.profile): debug.error("This command does not support the selected profile.") for kpcr in tasks.get_kdbg(addr_space).kpcrs(): for i, entry in kpcr.gdt_entries(): yield i, entry def unified_output(self, data): # Note: binary data is left out for now in VERBOSE mode return TreeGrid([("CPU", int), ("Sel", Address), ("Base", Address), ("Limit", Address), ("Type", str), ("DPL", int), ("Gr", str), ("Pr", str)], self.generator(data)) def generator(self, data): for n, entry in data: selector = n * 8 # Is the entry present? This applies to all types of GDT entries if entry.Present: present = "P" else: present = "Np" # The base, limit, and granularity is calculated differently # for 32bit call gates than they are for all other types. if entry.Type == 'CallGate32': base = entry.CallGate limit = 0 granularity = '-' else: base = entry.Base limit = entry.Limit if entry.Granularity: granularity = "Pg" else: granularity = "By" # The parent is GDT. The grand-parent is _KPCR cpu_number = entry.obj_parent.obj_parent.ProcessorBlock.Number yield (0, [int(cpu_number), Address(selector), Address(base), Address(limit), str(entry.Type), int(entry.Dpl), str(granularity), str(present)]) def render_text(self, outfd, data): self.table_header(outfd, [('CPU', '>6'), ('Sel', '[addr]'), ('Base', '[addrpad]'), ('Limit', '[addrpad]'), ('Type', '<14'), ('DPL', '>6'), ('Gr', '<4'), ('Pr', '<4') ]) for n, entry in data: selector = n * 8 # Is the entry present? This applies to all types of GDT entries if entry.Present: present = "P" else: present = "Np" # The base, limit, and granularity is calculated differently # for 32bit call gates than they are for all other types. if entry.Type == 'CallGate32': base = entry.CallGate limit = 0 granularity = '-' else: base = entry.Base limit = entry.Limit if entry.Granularity: granularity = "Pg" else: granularity = "By" # The parent is GDT. The grand-parent is _KPCR cpu_number = entry.obj_parent.obj_parent.ProcessorBlock.Number self.table_row(outfd, cpu_number, selector, base, limit, entry.Type, entry.Dpl, granularity, present) #-------------------------------------------------------------------------------- # IDT plugin #-------------------------------------------------------------------------------- class IDT(common.AbstractWindowsCommand): "Display Interrupt Descriptor Table" @staticmethod def is_valid_profile(profile): return (profile.metadata.get('os', 'unknown') == 'windows' and profile.metadata.get('memory_model', '32bit') == '32bit') @staticmethod def get_section_name(mod, addr): """Get the name of the PE section containing the specified address. @param mod: an _LDR_DATA_TABLE_ENTRY @param addr: virtual address to lookup @returns string PE section name """ try: dos_header = obj.Object("_IMAGE_DOS_HEADER", offset = mod.DllBase, vm = mod.obj_vm) nt_header = dos_header.get_nt_header() except (ValueError, exceptions.SanityCheckException): return '' for sec in nt_header.get_sections(): if (addr > mod.DllBase + sec.VirtualAddress and addr < sec.Misc.VirtualSize + (mod.DllBase + sec.VirtualAddress)): return str(sec.Name or '') return '' def calculate(self): addr_space = utils.load_as(self._config) # Currently we only support x86. The x64 does still have a IDT # but hooking is prohibited and results in bugcheck. if not self.is_valid_profile(addr_space.profile): debug.error("This command does not support the selected profile.") mods = dict((addr_space.address_mask(mod.DllBase), mod) for mod in modules.lsmod(addr_space)) mod_addrs = sorted(mods.keys()) for kpcr in tasks.get_kdbg(addr_space).kpcrs(): # Get the GDT for access to selector bases gdt = dict((i * 8, sd) for i, sd in kpcr.gdt_entries()) for i, entry in kpcr.idt_entries(): # Where the IDT entry points. addr = entry.Address # Per MITRE, add the GDT selector base if available. # This allows us to detect sneaky attempts to hook IDT # entries by changing the entry's GDT selector. gdt_entry = gdt.get(entry.Selector.v()) if gdt_entry != None and "Code" in gdt_entry.Type: addr += gdt_entry.Base # Lookup the function's owner module = tasks.find_module(mods, mod_addrs, addr_space.address_mask(addr)) yield i, entry, addr, module def unified_output(self, data): # Note: binary data is left out for now in VERBOSE mode return TreeGrid([("CPU", Hex), ("Index", Hex), ("Selector", Address), ("Value", Address), ("Module", str), ("Section", str)], self.generator(data)) def generator(self, data): for n, entry, addr, module in data: if addr == 0: module_name = "NOT USED" sect_name = "" elif module: module_name = str(module.BaseDllName or "") sect_name = self.get_section_name(module, addr) else: module_name = "UNKNOWN" sect_name = "" # The parent is IDT. The grand-parent is _KPCR. cpu_number = entry.obj_parent.obj_parent.ProcessorBlock.Number yield (0, [Hex(cpu_number), Hex(n), Address(entry.Selector), Address(addr), str(module_name), str(sect_name)]) def render_text(self, outfd, data): self.table_header(outfd, [('CPU', '>6X'), ('Index', '>6X'), ('Selector', '[addr]'), ('Value', '[addrpad]'), ('Module', '20'), ('Section', '12'), ]) for n, entry, addr, module in data: if addr == 0: module_name = "NOT USED" sect_name = '' elif module: module_name = str(module.BaseDllName or '') sect_name = self.get_section_name(module, addr) else: module_name = "UNKNOWN" sect_name = '' # The parent is IDT. The grand-parent is _KPCR. cpu_number = entry.obj_parent.obj_parent.ProcessorBlock.Number self.table_row(outfd, cpu_number, n, entry.Selector, addr, module_name, sect_name) if self._config.verbose: data = entry.obj_vm.zread(addr, 32) outfd.write("\n".join( ["{0:#x} {1:<16} {2}".format(o, h, i) for o, i, h in malfind.Disassemble(data = data, start = addr, stoponret = True) ])) outfd.write("\n")
gpl-2.0
azag0/pyberny
src/berny/Math.py
1
4059
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import numpy as np __all__ = ['fit_cubic', 'fit_quartic', 'findroot'] def rms(A): if A.size == 0: return None return np.sqrt(np.sum(A ** 2) / A.size) def pinv(A, log=lambda _: None): U, D, V = np.linalg.svd(A) thre = 1e3 thre_log = 1e8 gaps = D[:-1] / D[1:] try: n = np.flatnonzero(gaps > thre)[0] except IndexError: n = len(gaps) else: gap = gaps[n] if gap < thre_log: log('Pseudoinverse gap of only: {:.1e}'.format(gap)) D[n + 1 :] = 0 D[: n + 1] = 1 / D[: n + 1] return U.dot(np.diag(D)).dot(V) def cross(a, b): return np.array( [ a[1] * b[2] - a[2] * b[1], a[2] * b[0] - a[0] * b[2], a[0] * b[1] - a[1] * b[0], ] ) def fit_cubic(y0, y1, g0, g1): """Fit cubic polynomial to function values and derivatives at x = 0, 1. Returns position and function value of minimum if fit succeeds. Fit does not succeeds if 1. polynomial doesn't have extrema or 2. maximum is from (0,1) or 3. maximum is closer to 0.5 than minimum """ a = 2 * (y0 - y1) + g0 + g1 b = -3 * (y0 - y1) - 2 * g0 - g1 p = np.array([a, b, g0, y0]) r = np.roots(np.polyder(p)) if not np.isreal(r).all(): return None, None r = sorted(x.real for x in r) if p[0] > 0: maxim, minim = r else: minim, maxim = r if 0 < maxim < 1 and abs(minim - 0.5) > abs(maxim - 0.5): return None, None return minim, np.polyval(p, minim) def fit_quartic(y0, y1, g0, g1): """Fit constrained quartic polynomial to function values and erivatives at x = 0,1. Returns position and function value of minimum or None if fit fails or has a maximum. Quartic polynomial is constrained such that it's 2nd derivative is zero at just one point. This ensures that it has just one local extremum. No such or two such quartic polynomials always exist. From the two, the one with lower minimum is chosen. """ def g(y0, y1, g0, g1, c): a = c + 3 * (y0 - y1) + 2 * g0 + g1 b = -2 * c - 4 * (y0 - y1) - 3 * g0 - g1 return np.array([a, b, c, g0, y0]) def quart_min(p): r = np.roots(np.polyder(p)) is_real = np.isreal(r) if is_real.sum() == 1: minim = r[is_real][0].real else: minim = r[(r == max(-abs(r))) | (r == -max(-abs(r)))][0].real return minim, np.polyval(p, minim) # discriminant of d^2y/dx^2=0 D = -((g0 + g1) ** 2) - 2 * g0 * g1 + 6 * (y1 - y0) * (g0 + g1) - 6 * (y1 - y0) ** 2 if D < 1e-11: return None, None else: m = -5 * g0 - g1 - 6 * y0 + 6 * y1 p1 = g(y0, y1, g0, g1, 0.5 * (m + np.sqrt(2 * D))) p2 = g(y0, y1, g0, g1, 0.5 * (m - np.sqrt(2 * D))) if p1[0] < 0 and p2[0] < 0: return None, None [minim1, minval1] = quart_min(p1) [minim2, minval2] = quart_min(p2) if minval1 < minval2: return minim1, minval1 else: return minim2, minval2 class FindrootException(Exception): pass def findroot(f, lim): """Find root of increasing function on (-inf,lim). Assumes f(-inf) < 0, f(lim) > 0. """ d = 1.0 for _ in range(1000): val = f(lim - d) if val > 0: break d = d / 2 # find d so that f(lim-d) > 0 else: raise RuntimeError('Cannot find f(x) > 0') x = lim - d # initial guess dx = 1e-10 # step for numerical derivative fx = f(x) err = abs(fx) for _ in range(1000): fxpdx = f(x + dx) dxf = (fxpdx - fx) / dx x = x - fx / dxf fx = f(x) err_new = abs(fx) if err_new >= err: return x err = err_new else: raise FindrootException()
mpl-2.0
Moriadry/tensorflow
tensorflow/python/kernel_tests/stack_ops_test.py
49
10505
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.ops.stack_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_data_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test class StackOpTest(test.TestCase): def _testStackPushPop(self, use_gpu): with self.test_session(use_gpu=use_gpu): h = gen_data_flow_ops._stack_v2( -1, elem_type=dtypes.float32, stack_name="foo") c = gen_data_flow_ops._stack_push_v2(h, [[4.0, 5.0]]) with ops.control_dependencies([c]): c1 = gen_data_flow_ops._stack_pop_v2(h, dtypes.float32) self.assertAllClose([[4.0, 5.0]], c1.eval()) def testStackPushPop(self): self._testStackPushPop(use_gpu=False) self._testStackPushPop(use_gpu=True) def _testStackPushPopSwap(self, use_gpu): with self.test_session(use_gpu=use_gpu): a = np.arange(2000) x = constant_op.constant(a, dtype=dtypes.float32) h = gen_data_flow_ops._stack_v2( -1, elem_type=dtypes.float32, stack_name="foo") c = gen_data_flow_ops._stack_push_v2(h, x, swap_memory=True) with ops.control_dependencies([c]): c1 = gen_data_flow_ops._stack_pop_v2(h, dtypes.float32) self.assertAllClose(a, c1.eval()) def testStackPushPopSwap(self): self._testStackPushPopSwap(use_gpu=False) self._testStackPushPopSwap(use_gpu=True) def _testStackWhileSwap(self, use_gpu): with self.test_session(use_gpu=use_gpu): n = constant_op.constant(0) h = gen_data_flow_ops._stack_v2( -1, elem_type=dtypes.float32, stack_name="foo") def c(x): return math_ops.less(x, 10) def b(x): with ops.control_dependencies([x]): a = constant_op.constant(np.ones(2000), dtype=dtypes.float32) v = gen_data_flow_ops._stack_push_v2(h, a, swap_memory=True) with ops.control_dependencies([v]): return math_ops.add(x, 1) r = control_flow_ops.while_loop(c, b, [n]) v = constant_op.constant(np.zeros(2000), dtype=dtypes.float32) def c1(x, y): del y return math_ops.greater(x, 0) def b1(x, y): nx = math_ops.subtract(x, 1) ny = y + gen_data_flow_ops._stack_pop_v2(h, dtypes.float32) return [nx, ny] _, ry = control_flow_ops.while_loop( c1, b1, [r, v], [r.get_shape(), tensor_shape.unknown_shape()]) self.assertAllClose(np.ones(2000) * 10.0, ry.eval()) def testStackWhileSwap(self): self._testStackWhileSwap(use_gpu=False) self._testStackWhileSwap(use_gpu=True) def _testMultiStack(self, use_gpu): with self.test_session(use_gpu=use_gpu): h1 = gen_data_flow_ops._stack_v2( -1, elem_type=dtypes.float32, stack_name="foo") c1 = gen_data_flow_ops._stack_push_v2(h1, 4.0) with ops.control_dependencies([c1]): c1 = gen_data_flow_ops._stack_pop_v2(h1, dtypes.float32) h2 = gen_data_flow_ops._stack_v2( -1, elem_type=dtypes.float32, stack_name="bar") c2 = gen_data_flow_ops._stack_push_v2(h2, 5.0) with ops.control_dependencies([c2]): c2 = gen_data_flow_ops._stack_pop_v2(h2, dtypes.float32) r = c1 + c2 self.assertAllClose(9.0, r.eval()) def testMultiStack(self): self._testMultiStack(use_gpu=False) self._testMultiStack(use_gpu=True) def _testSameNameStacks(self, use_gpu): """Different stacks with the same name do not interfere.""" with self.test_session(use_gpu=use_gpu) as sess: h1 = gen_data_flow_ops._stack_v2( -1, elem_type=dtypes.float32, stack_name="foo") h2 = gen_data_flow_ops._stack_v2( -1, elem_type=dtypes.float32, stack_name="foo") c1 = gen_data_flow_ops._stack_push_v2(h1, 4.0) with ops.control_dependencies([c1]): c2 = gen_data_flow_ops._stack_push_v2(h2, 5.0) with ops.control_dependencies([c2]): pop1 = gen_data_flow_ops._stack_pop_v2(h1, dtypes.float32) pop2 = gen_data_flow_ops._stack_pop_v2(h2, dtypes.float32) out1, out2 = sess.run([pop1, pop2]) self.assertAllClose(out1, 4.0) self.assertAllClose(out2, 5.0) def testSameNameStacks(self): self._testSameNameStacks(use_gpu=False) self._testSameNameStacks(use_gpu=True) def _testCloseStack(self, use_gpu): with self.test_session(use_gpu=use_gpu) as sess: h = gen_data_flow_ops._stack_v2( -1, elem_type=dtypes.float32, stack_name="foo") c1 = gen_data_flow_ops._stack_close_v2(h) sess.run(c1) def testCloseStack(self): self._testCloseStack(use_gpu=False) self._testCloseStack(use_gpu=True) def _testPushCloseStack(self, use_gpu): with self.test_session(use_gpu=use_gpu) as sess: h = gen_data_flow_ops._stack_v2( -1, elem_type=dtypes.float32, stack_name="foo") c = gen_data_flow_ops._stack_push_v2(h, [[4.0, 5.0]]) with ops.control_dependencies([c]): c1 = gen_data_flow_ops._stack_close_v2(h) sess.run(c1) def testPushCloseStack(self): self._testPushCloseStack(use_gpu=False) self._testPushCloseStack(use_gpu=True) class StackOpRefTest(test.TestCase): """Tests for deprecated non-resource variant of stack ops.""" def _testStackPushPop(self, use_gpu): with self.test_session(use_gpu=use_gpu): h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo") c = gen_data_flow_ops._stack_push(h, [[4.0, 5.0]]) with ops.control_dependencies([c]): c1 = gen_data_flow_ops._stack_pop(h, dtypes.float32) self.assertAllClose([[4.0, 5.0]], c1.eval()) def testStackPushPop(self): self._testStackPushPop(use_gpu=False) self._testStackPushPop(use_gpu=True) def _testStackPushPopSwap(self, use_gpu): with self.test_session(use_gpu=use_gpu): a = np.arange(2000) x = constant_op.constant(a, dtype=dtypes.float32) h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo") c = gen_data_flow_ops._stack_push(h, x, swap_memory=True) with ops.control_dependencies([c]): c1 = gen_data_flow_ops._stack_pop(h, dtypes.float32) self.assertAllClose(a, c1.eval()) def testStackPushPopSwap(self): self._testStackPushPopSwap(use_gpu=False) self._testStackPushPopSwap(use_gpu=True) def _testMultiStack(self, use_gpu): with self.test_session(use_gpu=use_gpu): h1 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo") c1 = gen_data_flow_ops._stack_push(h1, 4.0) with ops.control_dependencies([c1]): c1 = gen_data_flow_ops._stack_pop(h1, dtypes.float32) h2 = gen_data_flow_ops._stack(dtypes.float32, stack_name="bar") c2 = gen_data_flow_ops._stack_push(h2, 5.0) with ops.control_dependencies([c2]): c2 = gen_data_flow_ops._stack_pop(h2, dtypes.float32) r = c1 + c2 self.assertAllClose(9.0, r.eval()) def _testStackWhileSwap(self, use_gpu): with self.test_session(use_gpu=use_gpu): n = constant_op.constant(0) h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo") def c(x): return math_ops.less(x, 10) def b(x): with ops.control_dependencies([x]): a = constant_op.constant(np.ones(2000), dtype=dtypes.float32) v = gen_data_flow_ops._stack_push(h, a, swap_memory=True) with ops.control_dependencies([v]): return math_ops.add(x, 1) r = control_flow_ops.while_loop(c, b, [n]) v = constant_op.constant(np.zeros(2000), dtype=dtypes.float32) def c1(x, y): del y return math_ops.greater(x, 0) def b1(x, y): nx = math_ops.subtract(x, 1) ny = y + gen_data_flow_ops._stack_pop(h, dtypes.float32) return [nx, ny] _, ry = control_flow_ops.while_loop( c1, b1, [r, v], [r.get_shape(), tensor_shape.unknown_shape()]) self.assertAllClose(np.ones(2000) * 10.0, ry.eval()) def testStackWhileSwap(self): self._testStackWhileSwap(use_gpu=False) self._testStackWhileSwap(use_gpu=True) def testMultiStack(self): self._testMultiStack(use_gpu=False) self._testMultiStack(use_gpu=True) def _testSameNameStacks(self, use_gpu): with self.test_session(use_gpu=use_gpu): h1 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo") c1 = gen_data_flow_ops._stack_push(h1, 4.0) h2 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo") c2 = gen_data_flow_ops._stack_push(h2, 5.0) _ = c1 + c2 self.assertNotEqual(h1.eval()[1], h2.eval()[1]) def testSameNameStacks(self): self._testSameNameStacks(use_gpu=False) self._testSameNameStacks(use_gpu=True) def _testCloseStack(self, use_gpu): with self.test_session(use_gpu=use_gpu) as sess: h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo") c1 = gen_data_flow_ops._stack_close(h) sess.run(c1) def testCloseStack(self): self._testCloseStack(use_gpu=False) self._testCloseStack(use_gpu=True) def _testPushCloseStack(self, use_gpu): with self.test_session(use_gpu=use_gpu) as sess: h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo") c = gen_data_flow_ops._stack_push(h, [[4.0, 5.0]]) with ops.control_dependencies([c]): c1 = gen_data_flow_ops._stack_close(h) sess.run(c1) def testPushCloseStack(self): self._testPushCloseStack(use_gpu=False) self._testPushCloseStack(use_gpu=True) if __name__ == "__main__": test.main()
apache-2.0
ntuecon/server
pyenv/Lib/site-packages/twisted/python/test/pullpipe.py
21
1220
# -*- test-case-name: twisted.python.test.test_sendmsg -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. import sys import os import socket from struct import unpack from twisted.python.sendmsg import recvmsg def recvfd(socketfd): """ Receive a file descriptor from a L{sendmsg} message on the given C{AF_UNIX} socket. @param socketfd: An C{AF_UNIX} socket, attached to another process waiting to send sockets via the ancillary data mechanism in L{send1msg}. @param fd: C{int} @return: a 2-tuple of (new file descriptor, description). @rtype: 2-tuple of (C{int}, C{bytes}) """ ourSocket = socket.fromfd(socketfd, socket.AF_UNIX, socket.SOCK_STREAM) data, ancillary, flags = recvmsg(ourSocket) [(cmsgLevel, cmsgType, packedFD)] = ancillary # cmsgLevel and cmsgType really need to be SOL_SOCKET / SCM_RIGHTS, but # since those are the *only* standard values, there's not much point in # checking. [unpackedFD] = unpack("i", packedFD) return (unpackedFD, data) if __name__ == '__main__': fd, description = recvfd(int(sys.argv[1])) os.write(fd, b"Test fixture data: " + description + b".\n") os.close(fd)
bsd-3-clause
voutilad/courtlistener
cl/search/migrations/0009_nuke_nulls_on_char_fields.py
1
2724
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import cl.lib.model_helpers import cl.lib.storage class Migration(migrations.Migration): dependencies = [ ('search', '0008_load_new_courts'), ('search', '0009_blankify_fields'), ] operations = [ migrations.AlterField( model_name='docket', name='cause', field=models.CharField(default='', help_text=b'The cause for the case.', max_length=200, blank=True), preserve_default=False, ), migrations.AlterField( model_name='docket', name='docket_number', field=models.CharField(default='', help_text=b'The docket numbers of a case, can be consolidated and quite long', max_length=5000, db_index=True, blank=True), preserve_default=False, ), migrations.AlterField( model_name='docket', name='filepath_ia', field=models.CharField(default='', help_text=b'Path to the Docket XML page in The Internet Archive', max_length=1000, blank=True), preserve_default=False, ), migrations.AlterField( model_name='docket', name='filepath_local', field=models.FileField(default='', upload_to=cl.lib.model_helpers.make_recap_path, storage=cl.lib.storage.IncrementingFileSystemStorage(), max_length=1000, blank=True, help_text=b"Path to RECAP's Docket XML page."), preserve_default=False, ), migrations.AlterField( model_name='docket', name='jurisdiction_type', field=models.CharField(default='', help_text=b"Stands for jurisdiction in RECAP XML docket. For example, 'Diversity', 'U.S. Government Defendant'.", max_length=100, blank=True), preserve_default=False, ), migrations.AlterField( model_name='docket', name='jury_demand', field=models.CharField(default='', help_text=b'The compensation demand.', max_length=500, blank=True), preserve_default=False, ), migrations.AlterField( model_name='docket', name='nature_of_suit', field=models.CharField(default='', help_text=b'The nature of suit code from PACER.', max_length=100, blank=True), preserve_default=False, ), migrations.AlterField( model_name='docket', name='slug', field=models.SlugField(default='', help_text=b'URL that the document should map to (the slug)', max_length=75, db_index=False, blank=True), preserve_default=False, ), ]
agpl-3.0
wkfwkf/statsmodels
statsmodels/sandbox/regression/tests/results_gmm_griliches_iter.py
34
7976
import numpy as np est = dict( rank = 13, N = 758, Q = .0150568875809373, J = 11.41312078635046, J_df = 2, k_1 = 13, converged = 1, has_xtinst = 0, type = 1, n_eq = 1, k = 13, n_moments = 15, k_aux = 13, k_eq_model = 0, ic = 6, k_eq = 13, cmdline = "gmm (lw - {xb:s iq expr tenure rns smsa dyear*} - {b0}) , instruments(expr tenure rns smsa dyear* med kww age mrt) igmm", cmd = "gmm", estat_cmd = "gmm_estat", predict = "gmm_p", marginsnotok = "_ALL", eqnames = "1", technique = "gn", winit = "Unadjusted", estimator = "igmm", wmatrix = "robust", vce = "robust", vcetype = "Robust", params = "xb_s xb_iq xb_expr xb_tenure xb_rns xb_smsa xb_dyear_67 xb_dyear_68 xb_dyear_69 xb_dyear_70 xb_dyear_71 xb_dyear_73 b0", inst_1 = "expr tenure rns smsa dyear_67 dyear_68 dyear_69 dyear_70 dyear_71 dyear_73 med kww age mrt _cons", params_1 = "xb_s xb_iq xb_expr xb_tenure xb_rns xb_smsa xb_dyear_67 xb_dyear_68 xb_dyear_69 xb_dyear_70 xb_dyear_71 xb_dyear_73 b0", sexp_1 = "lw - ({xb_s} *s + {xb_iq} *iq + {xb_expr} *expr + {xb_tenure} *tenure + {xb_rns} *rns + {xb_smsa} *smsa + {xb_dyear_67} *dyear_67 + {xb_dyear_68} *dyear_68 + {xb_dyear_69} *dyear_69 + {xb_dyear_70} *dyear_70 + {xb_dyear_71} *dyear_71 + {xb_dyear_73} *dyear_73) - {b0}", properties = "b V", ) params_table = np.array([ .17587739850768, .02085563162829, 8.4330890400415, 3.366583555e-17, .1350011116414, .21675368537396, np.nan, 1.9599639845401, 0, -.00928586712743, .00491894287617, -1.88777697997, .05905589683705, -.01892681800673, .00035508375188, np.nan, 1.9599639845401, 0, .05031651549731, .00810558790493, 6.2076330659127, 5.378855978e-10, .03442985513012, .0662031758645, np.nan, 1.9599639845401, 0, .04246235782951, .00956418082077, 4.4397276280375, 9.007280073e-06, .02371690787918, .06120780777985, np.nan, 1.9599639845401, 0, -.1039476753865, .03373281188749, -3.0815004611293, .00205960157647, -.17006277178325, -.03783257898975, np.nan, 1.9599639845401, 0, .12477256813508, .03099244898605, 4.0259021864082, .0000567572801, .06402848432973, .18551665194043, np.nan, 1.9599639845401, 0, -.05297127223127, .0517946935923, -1.0227162003936, .30644204936546, -.15448700626247, .04854446179993, np.nan, 1.9599639845401, 0, .04564516152971, .05001865637643, .91256272831865, .36147256434055, -.05238960352318, .1436799265826, np.nan, 1.9599639845401, 0, .15574543741982, .04802004585645, 3.2433421218593, .00118136262363, .06162787700523, .24986299783442, np.nan, 1.9599639845401, 0, .16681173496168, .06134387289984, 2.7192892635594, .00654223677971, .0465799534058, .28704351651757, np.nan, 1.9599639845401, 0, .08417610675323, .05582688740597, 1.507805838092, .13160422753823, -.02524258193145, .19359479543791, np.nan, 1.9599639845401, 0, .09964580476612, .06124947866865, 1.6268841291727, .10376170930541, -.02040096749628, .21969257702853, np.nan, 1.9599639845401, 0, 4.0027753075622, .33649589464938, 11.895465505554, 1.249543428e-32, 3.3432554731038, 4.6622951420205, np.nan, 1.9599639845401, 0]).reshape(13,9) params_table_colnames = 'b se z pvalue ll ul df crit eform'.split() params_table_rownames = '_cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons'.split() cov = np.array([ .00043495737061, -.00007938790704, .00002809207919, .00001486824321, -.00017806650894, -6.696078938e-06, -.00011595347261, -.00018816769626, -.00012205118386, -.00008281236274, -.00031504876539, -.00063574245306, .00264272738846, -.00007938790704, .00002419599902, 4.932871670e-06, -.00001114848619, .00006618803917, -.00002202930782, 4.808220835e-07, .00003206765662, -.00002261059773, -.00006024105579, -.00001412126593, .00001474591556, -.00144330101198, .00002809207919, 4.932871670e-06, .00006570055528, -.0000203894891, .00005213529923, -.00003297805448, .00003595284891, .00008758906787, .00003058926358, .00001696423798, -.00008568569767, -.00013140753648, -.00094326672008, .00001486824321, -.00001114848619, -.0000203894891, .00009147355477, -.00003774547245, 7.828122784e-06, .00008484461309, .00006729820252, .00011236802193, .00010082715772, .00011217081931, .00009440153548, .00075659901252, -.00017806650894, .00006618803917, .00005213529923, -.00003774547245, .00113790259784, .00013005865302, .00018021354375, .00018779266096, -9.435310865e-06, .0000165483542, -.00005323328914, .00008265052168, -.00499436873124, -6.696078938e-06, -.00002202930782, -.00003297805448, 7.828122784e-06, .00013005865302, .00096053189415, .00005704546746, .00011160225767, .00025285680201, .00010656723202, .00030213005331, .00030792696913, .00157128168902, -.00011595347261, 4.808220835e-07, .00003595284891, .00008484461309, .00018021354375, .00005704546746, .00268269028432, .00085942321667, .00091151417222, .00096327250114, .00090372304081, .00102768195348, .00034563629591, -.00018816769626, .00003206765662, .00008758906787, .00006729820252, .00018779266096, .00011160225767, .00085942321667, .0025018659857, .00092591134763, .00088266305412, .0008241186538, .00095084381197, -.00206285154639, -.00012205118386, -.00002261059773, .00003058926358, .00011236802193, -9.435310865e-06, .00025285680201, .00091151417222, .00092591134763, .00230592480406, .00118265696692, .0011106470199, .00129290662149, .00256049741814, -.00008281236274, -.00006024105579, .00001696423798, .00010082715772, .0000165483542, .00010656723202, .00096327250114, .00088266305412, .00118265696692, .00376307074235, .00124584145426, .00155915431219, .00599086304364, -.00031504876539, -.00001412126593, -.00008568569767, .00011217081931, -.00005323328914, .00030213005331, .00090372304081, .0008241186538, .0011106470199, .00124584145426, .00311664135744, .0018437604357, .00431259131307, -.00063574245306, .00001474591556, -.00013140753648, .00009440153548, .00008265052168, .00030792696913, .00102768195348, .00095084381197, .00129290662149, .00155915431219, .0018437604357, .00375149863718, .00538769349865, .00264272738846, -.00144330101198, -.00094326672008, .00075659901252, -.00499436873124, .00157128168902, .00034563629591, -.00206285154639, .00256049741814, .00599086304364, .00431259131307, .00538769349865, .11322948711589]).reshape(13,13) cov_colnames = '_cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons'.split() cov_rownames = '_cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons'.split() class Bunch(dict): def __init__(self, **kw): dict.__init__(self, kw) self.__dict__ = self for i,att in enumerate(['params', 'bse', 'tvalues', 'pvalues']): self[att] = self.params_table[:,i] results = Bunch( params_table=params_table, params_table_colnames=params_table_colnames, params_table_rownames=params_table_rownames, cov=cov, cov_colnames=cov_colnames, cov_rownames=cov_rownames, **est )
bsd-3-clause
tkem/mopidy
tests/internal/network/test_server.py
4
8093
from __future__ import absolute_import, unicode_literals import errno import socket import unittest from mock import Mock, patch, sentinel from mopidy.internal import network from mopidy.internal.gi import GObject from tests import any_int class ServerTest(unittest.TestCase): def setUp(self): # noqa: N802 self.mock = Mock(spec=network.Server) def test_init_calls_create_server_socket(self): network.Server.__init__( self.mock, sentinel.host, sentinel.port, sentinel.protocol) self.mock.create_server_socket.assert_called_once_with( sentinel.host, sentinel.port) def test_init_calls_register_server(self): sock = Mock(spec=socket.SocketType) sock.fileno.return_value = sentinel.fileno self.mock.create_server_socket.return_value = sock network.Server.__init__( self.mock, sentinel.host, sentinel.port, sentinel.protocol) self.mock.register_server_socket.assert_called_once_with( sentinel.fileno) def test_init_fails_on_fileno_call(self): sock = Mock(spec=socket.SocketType) sock.fileno.side_effect = socket.error self.mock.create_server_socket.return_value = sock with self.assertRaises(socket.error): network.Server.__init__( self.mock, sentinel.host, sentinel.port, sentinel.protocol) def test_init_stores_values_in_attributes(self): # This need to be a mock and no a sentinel as fileno() is called on it sock = Mock(spec=socket.SocketType) self.mock.create_server_socket.return_value = sock network.Server.__init__( self.mock, sentinel.host, sentinel.port, sentinel.protocol, max_connections=sentinel.max_connections, timeout=sentinel.timeout) self.assertEqual(sentinel.protocol, self.mock.protocol) self.assertEqual(sentinel.max_connections, self.mock.max_connections) self.assertEqual(sentinel.timeout, self.mock.timeout) self.assertEqual(sock, self.mock.server_socket) @patch.object(network, 'create_socket', spec=socket.SocketType) def test_create_server_socket_sets_up_listener(self, create_socket): sock = create_socket.return_value network.Server.create_server_socket( self.mock, sentinel.host, sentinel.port) sock.setblocking.assert_called_once_with(False) sock.bind.assert_called_once_with((sentinel.host, sentinel.port)) sock.listen.assert_called_once_with(any_int) @patch.object(network, 'create_socket', new=Mock()) def test_create_server_socket_fails(self): network.create_socket.side_effect = socket.error with self.assertRaises(socket.error): network.Server.create_server_socket( self.mock, sentinel.host, sentinel.port) @patch.object(network, 'create_socket', new=Mock()) def test_create_server_bind_fails(self): sock = network.create_socket.return_value sock.bind.side_effect = socket.error with self.assertRaises(socket.error): network.Server.create_server_socket( self.mock, sentinel.host, sentinel.port) @patch.object(network, 'create_socket', new=Mock()) def test_create_server_listen_fails(self): sock = network.create_socket.return_value sock.listen.side_effect = socket.error with self.assertRaises(socket.error): network.Server.create_server_socket( self.mock, sentinel.host, sentinel.port) @patch.object(GObject, 'io_add_watch', new=Mock()) def test_register_server_socket_sets_up_io_watch(self): network.Server.register_server_socket(self.mock, sentinel.fileno) GObject.io_add_watch.assert_called_once_with( sentinel.fileno, GObject.IO_IN, self.mock.handle_connection) def test_handle_connection(self): self.mock.accept_connection.return_value = ( sentinel.sock, sentinel.addr) self.mock.maximum_connections_exceeded.return_value = False self.assertTrue(network.Server.handle_connection( self.mock, sentinel.fileno, GObject.IO_IN)) self.mock.accept_connection.assert_called_once_with() self.mock.maximum_connections_exceeded.assert_called_once_with() self.mock.init_connection.assert_called_once_with( sentinel.sock, sentinel.addr) self.assertEqual(0, self.mock.reject_connection.call_count) def test_handle_connection_exceeded_connections(self): self.mock.accept_connection.return_value = ( sentinel.sock, sentinel.addr) self.mock.maximum_connections_exceeded.return_value = True self.assertTrue(network.Server.handle_connection( self.mock, sentinel.fileno, GObject.IO_IN)) self.mock.accept_connection.assert_called_once_with() self.mock.maximum_connections_exceeded.assert_called_once_with() self.mock.reject_connection.assert_called_once_with( sentinel.sock, sentinel.addr) self.assertEqual(0, self.mock.init_connection.call_count) def test_accept_connection(self): sock = Mock(spec=socket.SocketType) sock.accept.return_value = (sentinel.sock, sentinel.addr) self.mock.server_socket = sock sock, addr = network.Server.accept_connection(self.mock) self.assertEqual(sentinel.sock, sock) self.assertEqual(sentinel.addr, addr) def test_accept_connection_recoverable_error(self): sock = Mock(spec=socket.SocketType) self.mock.server_socket = sock for error in (errno.EAGAIN, errno.EINTR): sock.accept.side_effect = socket.error(error, '') with self.assertRaises(network.ShouldRetrySocketCall): network.Server.accept_connection(self.mock) # FIXME decide if this should be allowed to propegate def test_accept_connection_unrecoverable_error(self): sock = Mock(spec=socket.SocketType) self.mock.server_socket = sock sock.accept.side_effect = socket.error with self.assertRaises(socket.error): network.Server.accept_connection(self.mock) def test_maximum_connections_exceeded(self): self.mock.max_connections = 10 self.mock.number_of_connections.return_value = 11 self.assertTrue(network.Server.maximum_connections_exceeded(self.mock)) self.mock.number_of_connections.return_value = 10 self.assertTrue(network.Server.maximum_connections_exceeded(self.mock)) self.mock.number_of_connections.return_value = 9 self.assertFalse( network.Server.maximum_connections_exceeded(self.mock)) @patch('pykka.registry.ActorRegistry.get_by_class') def test_number_of_connections(self, get_by_class): self.mock.protocol = sentinel.protocol get_by_class.return_value = [1, 2, 3] self.assertEqual(3, network.Server.number_of_connections(self.mock)) get_by_class.return_value = [] self.assertEqual(0, network.Server.number_of_connections(self.mock)) @patch.object(network, 'Connection', new=Mock()) def test_init_connection(self): self.mock.protocol = sentinel.protocol self.mock.protocol_kwargs = {} self.mock.timeout = sentinel.timeout network.Server.init_connection(self.mock, sentinel.sock, sentinel.addr) network.Connection.assert_called_once_with( sentinel.protocol, {}, sentinel.sock, sentinel.addr, sentinel.timeout) def test_reject_connection(self): sock = Mock(spec=socket.SocketType) network.Server.reject_connection( self.mock, sock, (sentinel.host, sentinel.port)) sock.close.assert_called_once_with() def test_reject_connection_error(self): sock = Mock(spec=socket.SocketType) sock.close.side_effect = socket.error network.Server.reject_connection( self.mock, sock, (sentinel.host, sentinel.port)) sock.close.assert_called_once_with()
apache-2.0
bakhtout/odoo-educ
openerp/addons/base/ir/ir_mail_server.py
21
26670
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2011-2014 OpenERP S.A. (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # ############################################################################## from email.mime.text import MIMEText from email.mime.base import MIMEBase from email.mime.multipart import MIMEMultipart from email.charset import Charset from email.header import Header from email.utils import formatdate, make_msgid, COMMASPACE, getaddresses, formataddr from email import Encoders import logging import re import smtplib import threading from openerp import SUPERUSER_ID from openerp.osv import osv, fields from openerp.tools.translate import _ from openerp.tools import html2text import openerp.tools as tools # ustr was originally from tools.misc. # it is moved to loglevels until we refactor tools. from openerp.loglevels import ustr _logger = logging.getLogger(__name__) _test_logger = logging.getLogger('openerp.tests') class MailDeliveryException(osv.except_osv): """Specific exception subclass for mail delivery errors""" def __init__(self, name, value): super(MailDeliveryException, self).__init__(name, value) class WriteToLogger(object): """debugging helper: behave as a fd and pipe to logger at the given level""" def __init__(self, logger, level=logging.DEBUG): self.logger = logger self.level = level def write(self, s): self.logger.log(self.level, s) def try_coerce_ascii(string_utf8): """Attempts to decode the given utf8-encoded string as ASCII after coercing it to UTF-8, then return the confirmed 7-bit ASCII string. If the process fails (because the string contains non-ASCII characters) returns ``None``. """ try: string_utf8.decode('ascii') except UnicodeDecodeError: return return string_utf8 def encode_header(header_text): """Returns an appropriate representation of the given header value, suitable for direct assignment as a header value in an email.message.Message. RFC2822 assumes that headers contain only 7-bit characters, so we ensure it is the case, using RFC2047 encoding when needed. :param header_text: unicode or utf-8 encoded string with header value :rtype: string | email.header.Header :return: if ``header_text`` represents a plain ASCII string, return the same 7-bit string, otherwise returns an email.header.Header that will perform the appropriate RFC2047 encoding of non-ASCII values. """ if not header_text: return "" # convert anything to utf-8, suitable for testing ASCIIness, as 7-bit chars are # encoded as ASCII in utf-8 header_text_utf8 = tools.ustr(header_text).encode('utf-8') header_text_ascii = try_coerce_ascii(header_text_utf8) # if this header contains non-ASCII characters, # we'll need to wrap it up in a message.header.Header # that will take care of RFC2047-encoding it as # 7-bit string. return header_text_ascii if header_text_ascii\ else Header(header_text_utf8, 'utf-8') def encode_header_param(param_text): """Returns an appropriate RFC2047 encoded representation of the given header parameter value, suitable for direct assignation as the param value (e.g. via Message.set_param() or Message.add_header()) RFC2822 assumes that headers contain only 7-bit characters, so we ensure it is the case, using RFC2047 encoding when needed. :param param_text: unicode or utf-8 encoded string with header value :rtype: string :return: if ``param_text`` represents a plain ASCII string, return the same 7-bit string, otherwise returns an ASCII string containing the RFC2047 encoded text. """ # For details see the encode_header() method that uses the same logic if not param_text: return "" param_text_utf8 = tools.ustr(param_text).encode('utf-8') param_text_ascii = try_coerce_ascii(param_text_utf8) return param_text_ascii if param_text_ascii\ else Charset('utf8').header_encode(param_text_utf8) # TODO master, remove me, no longer used internaly name_with_email_pattern = re.compile(r'("[^<@>]+")\s*<([^ ,<@]+@[^> ,]+)>') address_pattern = re.compile(r'([^ ,<@]+@[^> ,]+)') def extract_rfc2822_addresses(text): """Returns a list of valid RFC2822 addresses that can be found in ``source``, ignoring malformed ones and non-ASCII ones. """ if not text: return [] candidates = address_pattern.findall(tools.ustr(text).encode('utf-8')) return filter(try_coerce_ascii, candidates) def encode_rfc2822_address_header(header_text): """If ``header_text`` contains non-ASCII characters, attempts to locate patterns of the form ``"Name" <address@domain>`` and replace the ``"Name"`` portion by the RFC2047-encoded version, preserving the address part untouched. """ def encode_addr(addr): name, email = addr if not try_coerce_ascii(name): name = str(Header(name, 'utf-8')) return formataddr((name, email)) addresses = getaddresses([tools.ustr(header_text).encode('utf-8')]) return COMMASPACE.join(map(encode_addr, addresses)) class ir_mail_server(osv.osv): """Represents an SMTP server, able to send outgoing emails, with SSL and TLS capabilities.""" _name = "ir.mail_server" NO_VALID_RECIPIENT = ("At least one valid recipient address should be " "specified for outgoing emails (To/Cc/Bcc)") _columns = { 'name': fields.char('Description', required=True, select=True), 'smtp_host': fields.char('SMTP Server', required=True, help="Hostname or IP of SMTP server"), 'smtp_port': fields.integer('SMTP Port', size=5, required=True, help="SMTP Port. Usually 465 for SSL, and 25 or 587 for other cases."), 'smtp_user': fields.char('Username', size=64, help="Optional username for SMTP authentication"), 'smtp_pass': fields.char('Password', size=64, help="Optional password for SMTP authentication"), 'smtp_encryption': fields.selection([('none','None'), ('starttls','TLS (STARTTLS)'), ('ssl','SSL/TLS')], string='Connection Security', required=True, help="Choose the connection encryption scheme:\n" "- None: SMTP sessions are done in cleartext.\n" "- TLS (STARTTLS): TLS encryption is requested at start of SMTP session (Recommended)\n" "- SSL/TLS: SMTP sessions are encrypted with SSL/TLS through a dedicated port (default: 465)"), 'smtp_debug': fields.boolean('Debugging', help="If enabled, the full output of SMTP sessions will " "be written to the server log at DEBUG level" "(this is very verbose and may include confidential info!)"), 'sequence': fields.integer('Priority', help="When no specific mail server is requested for a mail, the highest priority one " "is used. Default priority is 10 (smaller number = higher priority)"), 'active': fields.boolean('Active') } _defaults = { 'smtp_port': 25, 'active': True, 'sequence': 10, 'smtp_encryption': 'none', } def __init__(self, *args, **kwargs): # Make sure we pipe the smtplib outputs to our own DEBUG logger if not isinstance(smtplib.stderr, WriteToLogger): logpiper = WriteToLogger(_logger) smtplib.stderr = logpiper smtplib.stdout = logpiper super(ir_mail_server, self).__init__(*args,**kwargs) def name_get(self, cr, uid, ids, context=None): return [(a["id"], "(%s)" % (a['name'])) for a in self.read(cr, uid, ids, ['name'], context=context)] def test_smtp_connection(self, cr, uid, ids, context=None): for smtp_server in self.browse(cr, uid, ids, context=context): smtp = False try: smtp = self.connect(smtp_server.smtp_host, smtp_server.smtp_port, user=smtp_server.smtp_user, password=smtp_server.smtp_pass, encryption=smtp_server.smtp_encryption, smtp_debug=smtp_server.smtp_debug) except Exception, e: raise osv.except_osv(_("Connection Test Failed!"), _("Here is what we got instead:\n %s") % tools.ustr(e)) finally: try: if smtp: smtp.quit() except Exception: # ignored, just a consequence of the previous exception pass raise osv.except_osv(_("Connection Test Succeeded!"), _("Everything seems properly set up!")) def connect(self, host, port, user=None, password=None, encryption=False, smtp_debug=False): """Returns a new SMTP connection to the give SMTP server, authenticated with ``user`` and ``password`` if provided, and encrypted as requested by the ``encryption`` parameter. :param host: host or IP of SMTP server to connect to :param int port: SMTP port to connect to :param user: optional username to authenticate with :param password: optional password to authenticate with :param string encryption: optional, ``'ssl'`` | ``'starttls'`` :param bool smtp_debug: toggle debugging of SMTP sessions (all i/o will be output in logs) """ if encryption == 'ssl': if not 'SMTP_SSL' in smtplib.__all__: raise osv.except_osv( _("SMTP-over-SSL mode unavailable"), _("Your OpenERP Server does not support SMTP-over-SSL. You could use STARTTLS instead." "If SSL is needed, an upgrade to Python 2.6 on the server-side should do the trick.")) connection = smtplib.SMTP_SSL(host, port) else: connection = smtplib.SMTP(host, port) connection.set_debuglevel(smtp_debug) if encryption == 'starttls': # starttls() will perform ehlo() if needed first # and will discard the previous list of services # after successfully performing STARTTLS command, # (as per RFC 3207) so for example any AUTH # capability that appears only on encrypted channels # will be correctly detected for next step connection.starttls() if user: # Attempt authentication - will raise if AUTH service not supported # The user/password must be converted to bytestrings in order to be usable for # certain hashing schemes, like HMAC. # See also bug #597143 and python issue #5285 user = tools.ustr(user).encode('utf-8') password = tools.ustr(password).encode('utf-8') connection.login(user, password) return connection def build_email(self, email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False, attachments=None, message_id=None, references=None, object_id=False, subtype='plain', headers=None, body_alternative=None, subtype_alternative='plain'): """Constructs an RFC2822 email.message.Message object based on the keyword arguments passed, and returns it. :param string email_from: sender email address :param list email_to: list of recipient addresses (to be joined with commas) :param string subject: email subject (no pre-encoding/quoting necessary) :param string body: email body, of the type ``subtype`` (by default, plaintext). If html subtype is used, the message will be automatically converted to plaintext and wrapped in multipart/alternative, unless an explicit ``body_alternative`` version is passed. :param string body_alternative: optional alternative body, of the type specified in ``subtype_alternative`` :param string reply_to: optional value of Reply-To header :param string object_id: optional tracking identifier, to be included in the message-id for recognizing replies. Suggested format for object-id is "res_id-model", e.g. "12345-crm.lead". :param string subtype: optional mime subtype for the text body (usually 'plain' or 'html'), must match the format of the ``body`` parameter. Default is 'plain', making the content part of the mail "text/plain". :param string subtype_alternative: optional mime subtype of ``body_alternative`` (usually 'plain' or 'html'). Default is 'plain'. :param list attachments: list of (filename, filecontents) pairs, where filecontents is a string containing the bytes of the attachment :param list email_cc: optional list of string values for CC header (to be joined with commas) :param list email_bcc: optional list of string values for BCC header (to be joined with commas) :param dict headers: optional map of headers to set on the outgoing mail (may override the other headers, including Subject, Reply-To, Message-Id, etc.) :rtype: email.message.Message (usually MIMEMultipart) :return: the new RFC2822 email message """ email_from = email_from or tools.config.get('email_from') assert email_from, "You must either provide a sender address explicitly or configure "\ "a global sender address in the server configuration or with the "\ "--email-from startup parameter." # Note: we must force all strings to to 8-bit utf-8 when crafting message, # or use encode_header() for headers, which does it automatically. headers = headers or {} # need valid dict later if not email_cc: email_cc = [] if not email_bcc: email_bcc = [] if not body: body = u'' email_body_utf8 = ustr(body).encode('utf-8') email_text_part = MIMEText(email_body_utf8, _subtype=subtype, _charset='utf-8') msg = MIMEMultipart() if not message_id: if object_id: message_id = tools.generate_tracking_message_id(object_id) else: message_id = make_msgid() msg['Message-Id'] = encode_header(message_id) if references: msg['references'] = encode_header(references) msg['Subject'] = encode_header(subject) msg['From'] = encode_rfc2822_address_header(email_from) del msg['Reply-To'] if reply_to: msg['Reply-To'] = encode_rfc2822_address_header(reply_to) else: msg['Reply-To'] = msg['From'] msg['To'] = encode_rfc2822_address_header(COMMASPACE.join(email_to)) if email_cc: msg['Cc'] = encode_rfc2822_address_header(COMMASPACE.join(email_cc)) if email_bcc: msg['Bcc'] = encode_rfc2822_address_header(COMMASPACE.join(email_bcc)) msg['Date'] = formatdate() # Custom headers may override normal headers or provide additional ones for key, value in headers.iteritems(): msg[ustr(key).encode('utf-8')] = encode_header(value) if subtype == 'html' and not body_alternative and html2text: # Always provide alternative text body ourselves if possible. text_utf8 = tools.html2text(email_body_utf8.decode('utf-8')).encode('utf-8') alternative_part = MIMEMultipart(_subtype="alternative") alternative_part.attach(MIMEText(text_utf8, _charset='utf-8', _subtype='plain')) alternative_part.attach(email_text_part) msg.attach(alternative_part) elif body_alternative: # Include both alternatives, as specified, within a multipart/alternative part alternative_part = MIMEMultipart(_subtype="alternative") body_alternative_utf8 = ustr(body_alternative).encode('utf-8') alternative_body_part = MIMEText(body_alternative_utf8, _subtype=subtype_alternative, _charset='utf-8') alternative_part.attach(alternative_body_part) alternative_part.attach(email_text_part) msg.attach(alternative_part) else: msg.attach(email_text_part) if attachments: for (fname, fcontent) in attachments: filename_rfc2047 = encode_header_param(fname) part = MIMEBase('application', "octet-stream") # The default RFC2231 encoding of Message.add_header() works in Thunderbird but not GMail # so we fix it by using RFC2047 encoding for the filename instead. part.set_param('name', filename_rfc2047) part.add_header('Content-Disposition', 'attachment', filename=filename_rfc2047) part.set_payload(fcontent) Encoders.encode_base64(part) msg.attach(part) return msg def _get_default_bounce_address(self, cr, uid, context=None): '''Compute the default bounce address. The default bounce address is used to set the envelop address if no envelop address is provided in the message. It is formed by properly joining the parameters "mail.catchall.alias" and "mail.catchall.domain". If "mail.catchall.alias" is not set it defaults to "postmaster-odoo". If "mail.catchall.domain" is not set, return None. ''' get_param = self.pool['ir.config_parameter'].get_param postmaster = get_param(cr, uid, 'mail.bounce.alias', default='postmaster-odoo', context=context,) domain = get_param(cr, uid, 'mail.catchall.domain', context=context) if postmaster and domain: return '%s@%s' % (postmaster, domain) def send_email(self, cr, uid, message, mail_server_id=None, smtp_server=None, smtp_port=None, smtp_user=None, smtp_password=None, smtp_encryption=None, smtp_debug=False, context=None): """Sends an email directly (no queuing). No retries are done, the caller should handle MailDeliveryException in order to ensure that the mail is never lost. If the mail_server_id is provided, sends using this mail server, ignoring other smtp_* arguments. If mail_server_id is None and smtp_server is None, use the default mail server (highest priority). If mail_server_id is None and smtp_server is not None, use the provided smtp_* arguments. If both mail_server_id and smtp_server are None, look for an 'smtp_server' value in server config, and fails if not found. :param message: the email.message.Message to send. The envelope sender will be extracted from the ``Return-Path`` (if present), or will be set to the default bounce address. The envelope recipients will be extracted from the combined list of ``To``, ``CC`` and ``BCC`` headers. :param mail_server_id: optional id of ir.mail_server to use for sending. overrides other smtp_* arguments. :param smtp_server: optional hostname of SMTP server to use :param smtp_encryption: optional TLS mode, one of 'none', 'starttls' or 'ssl' (see ir.mail_server fields for explanation) :param smtp_port: optional SMTP port, if mail_server_id is not passed :param smtp_user: optional SMTP user, if mail_server_id is not passed :param smtp_password: optional SMTP password to use, if mail_server_id is not passed :param smtp_debug: optional SMTP debug flag, if mail_server_id is not passed :return: the Message-ID of the message that was just sent, if successfully sent, otherwise raises MailDeliveryException and logs root cause. """ # Use the default bounce address **only if** no Return-Path was # provided by caller. Caller may be using Variable Envelope Return # Path (VERP) to detect no-longer valid email addresses. smtp_from = message['Return-Path'] if not smtp_from: smtp_from = self._get_default_bounce_address(cr, uid, context=context) if not smtp_from: smtp_from = message['From'] assert smtp_from, "The Return-Path or From header is required for any outbound email" # The email's "Envelope From" (Return-Path), and all recipient addresses must only contain ASCII characters. from_rfc2822 = extract_rfc2822_addresses(smtp_from) assert from_rfc2822, ("Malformed 'Return-Path' or 'From' address: %r - " "It should contain one valid plain ASCII email") % smtp_from # use last extracted email, to support rarities like 'Support@MyComp <support@mycompany.com>' smtp_from = from_rfc2822[-1] email_to = message['To'] email_cc = message['Cc'] email_bcc = message['Bcc'] smtp_to_list = filter(None, tools.flatten(map(extract_rfc2822_addresses,[email_to, email_cc, email_bcc]))) assert smtp_to_list, self.NO_VALID_RECIPIENT x_forge_to = message['X-Forge-To'] if x_forge_to: # `To:` header forged, e.g. for posting on mail.groups, to avoid confusion del message['X-Forge-To'] del message['To'] # avoid multiple To: headers! message['To'] = x_forge_to # Do not actually send emails in testing mode! if getattr(threading.currentThread(), 'testing', False): _test_logger.info("skip sending email in test mode") return message['Message-Id'] # Get SMTP Server Details from Mail Server mail_server = None if mail_server_id: mail_server = self.browse(cr, SUPERUSER_ID, mail_server_id) elif not smtp_server: mail_server_ids = self.search(cr, SUPERUSER_ID, [], order='sequence', limit=1) if mail_server_ids: mail_server = self.browse(cr, SUPERUSER_ID, mail_server_ids[0]) if mail_server: smtp_server = mail_server.smtp_host smtp_user = mail_server.smtp_user smtp_password = mail_server.smtp_pass smtp_port = mail_server.smtp_port smtp_encryption = mail_server.smtp_encryption smtp_debug = smtp_debug or mail_server.smtp_debug else: # we were passed an explicit smtp_server or nothing at all smtp_server = smtp_server or tools.config.get('smtp_server') smtp_port = tools.config.get('smtp_port', 25) if smtp_port is None else smtp_port smtp_user = smtp_user or tools.config.get('smtp_user') smtp_password = smtp_password or tools.config.get('smtp_password') if smtp_encryption is None and tools.config.get('smtp_ssl'): smtp_encryption = 'starttls' # STARTTLS is the new meaning of the smtp_ssl flag as of v7.0 if not smtp_server: raise osv.except_osv( _("Missing SMTP Server"), _("Please define at least one SMTP server, or provide the SMTP parameters explicitly.")) try: message_id = message['Message-Id'] # Add email in Maildir if smtp_server contains maildir. if smtp_server.startswith('maildir:/'): from mailbox import Maildir maildir_path = smtp_server[8:] mdir = Maildir(maildir_path, factory=None, create = True) mdir.add(message.as_string(True)) return message_id smtp = None try: smtp = self.connect(smtp_server, smtp_port, smtp_user, smtp_password, smtp_encryption or False, smtp_debug) smtp.sendmail(smtp_from, smtp_to_list, message.as_string()) finally: if smtp is not None: smtp.quit() except Exception, e: msg = _("Mail delivery failed via SMTP server '%s'.\n%s: %s") % (tools.ustr(smtp_server), e.__class__.__name__, tools.ustr(e)) _logger.error(msg) raise MailDeliveryException(_("Mail Delivery Failed"), msg) return message_id def on_change_encryption(self, cr, uid, ids, smtp_encryption): if smtp_encryption == 'ssl': result = {'value': {'smtp_port': 465}} if not 'SMTP_SSL' in smtplib.__all__: result['warning'] = {'title': _('Warning'), 'message': _('Your server does not seem to support SSL, you may want to try STARTTLS instead')} else: result = {'value': {'smtp_port': 25}} return result # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
allenlavoie/tensorflow
tensorflow/python/kernel_tests/spacetobatch_op_test.py
18
24897
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for SpaceToBatch and BatchToSpace ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import math_ops from tensorflow.python.platform import test def space_to_batch_direct(input_array, block_shape, paddings): """Direct Python implementation of space-to-batch conversion. This is used for tests only. Args: input_array: N-D array block_shape: 1-D array of shape [num_block_dims]. paddings: 2-D array of shape [num_block_dims, 2]. Returns: Converted tensor. """ input_array = np.array(input_array) block_shape = np.array(block_shape) num_block_dims = len(block_shape) paddings = np.array(paddings).reshape((len(block_shape), 2)) padded = np.pad(input_array, pad_width=([[0, 0]] + list(paddings) + [[0, 0]] * (input_array.ndim - 1 - num_block_dims)), mode="constant") reshaped_padded_shape = [input_array.shape[0]] output_shape = [input_array.shape[0] * np.prod(block_shape)] for block_dim, block_shape_value in enumerate(block_shape): reduced_size = padded.shape[block_dim + 1] // block_shape_value reshaped_padded_shape.append(reduced_size) output_shape.append(reduced_size) reshaped_padded_shape.append(block_shape_value) reshaped_padded_shape.extend(input_array.shape[num_block_dims + 1:]) output_shape.extend(input_array.shape[num_block_dims + 1:]) reshaped_padded = padded.reshape(reshaped_padded_shape) permuted_reshaped_padded = np.transpose(reshaped_padded, ( list(np.arange(num_block_dims) * 2 + 2) + [0] + list(np.arange(num_block_dims) * 2 + 1) + list( np.arange(input_array.ndim - num_block_dims - 1) + 1 + num_block_dims * 2))) return permuted_reshaped_padded.reshape(output_shape) class PythonOpImpl(object): @staticmethod def space_to_batch(*args, **kwargs): return array_ops.space_to_batch(*args, **kwargs) @staticmethod def batch_to_space(*args, **kwargs): return array_ops.batch_to_space(*args, **kwargs) class CppOpImpl(object): @staticmethod def space_to_batch(*args, **kwargs): return gen_array_ops.space_to_batch(*args, **kwargs) @staticmethod def batch_to_space(*args, **kwargs): return gen_array_ops.batch_to_space(*args, **kwargs) class SpaceToBatchTest(test.TestCase, PythonOpImpl): """Tests input-output pairs for the SpaceToBatch and BatchToSpace ops. This uses the Python compatibility wrapper that forwards to space_to_batch_nd. """ def _testPad(self, inputs, paddings, block_size, outputs): with self.test_session(use_gpu=True): # outputs = space_to_batch(inputs) x_tf = self.space_to_batch( math_ops.to_float(inputs), paddings, block_size=block_size) self.assertAllEqual(x_tf.eval(), outputs) # inputs = batch_to_space(outputs) x_tf = self.batch_to_space( math_ops.to_float(outputs), paddings, block_size=block_size) self.assertAllEqual(x_tf.eval(), inputs) def _testOne(self, inputs, block_size, outputs): paddings = np.zeros((2, 2), dtype=np.int32) self._testPad(inputs, paddings, block_size, outputs) # [1, 2, 2, 1] <-> [4, 1, 1, 1] def testSmallInput2x2(self): x_np = [[[[1], [2]], [[3], [4]]]] block_size = 2 x_out = [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] self._testOne(x_np, block_size, x_out) # [1, 2, 2, 1] <-> [1, 3, 3, 1] (padding) <-> [9, 1, 1, 1] def testSmallInput2x2Pad1x0(self): x_np = [[[[1], [2]], [[3], [4]]]] paddings = np.array([[1, 0], [1, 0]], dtype=np.int32) block_size = 3 x_out = [[[[0]]], [[[0]]], [[[0]]], [[[0]]], [[[1]]], [[[2]]], [[[0]]], [[[3]]], [[[4]]]] self._testPad(x_np, paddings, block_size, x_out) # Test with depth larger than 1. # [1, 2, 2, 3] <-> [4, 1, 1, 3] def testDepthInput2x2(self): x_np = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] block_size = 2 x_out = [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] self._testOne(x_np, block_size, x_out) # Test for larger input dimensions. # [1, 4, 4, 1] <-> [4, 2, 2, 1] def testLargerInput2x2(self): x_np = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]]] block_size = 2 x_out = [[[[1], [3]], [[9], [11]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] self._testOne(x_np, block_size, x_out) # Test with batch larger than 1. # [2, 2, 4, 1] <-> [8, 1, 2, 1] def testBatchInput2x2(self): x_np = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]] block_size = 2 x_out = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] self._testOne(x_np, block_size, x_out) # Tests for larger input spatial dimensions AND batch larger than 1, to ensure # that elements are correctly laid out spatially and properly interleaved # along the batch dimension. # [2, 4, 4, 1] <-> [8, 2, 2, 1] def testLargerInputBatch2x2(self): x_np = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]], [[[17], [18], [19], [20]], [[21], [22], [23], [24]], [[25], [26], [27], [28]], [[29], [30], [31], [32]]]] x_out = [[[[1], [3]], [[9], [11]]], [[[17], [19]], [[25], [27]]], [[[2], [4]], [[10], [12]]], [[[18], [20]], [[26], [28]]], [[[5], [7]], [[13], [15]]], [[[21], [23]], [[29], [31]]], [[[6], [8]], [[14], [16]]], [[[22], [24]], [[30], [32]]]] block_size = 2 self._testOne(x_np, block_size, x_out) class SpaceToBatchCppTest(SpaceToBatchTest, CppOpImpl): """Tests input-output pairs for the SpaceToBatch and BatchToSpace ops. This uses the C++ ops. """ pass class SpaceToBatchNDTest(test.TestCase): """Tests input-output pairs for the SpaceToBatchND and BatchToSpaceND ops.""" def _testPad(self, inputs, block_shape, paddings, outputs): block_shape = np.array(block_shape) paddings = np.array(paddings).reshape((len(block_shape), 2)) for use_gpu in [False, True]: with self.test_session(use_gpu=use_gpu): # outputs = space_to_batch(inputs) x_tf = array_ops.space_to_batch_nd( math_ops.to_float(inputs), block_shape, paddings) self.assertAllEqual(x_tf.eval(), outputs) # inputs = batch_to_space(outputs) x_tf = array_ops.batch_to_space_nd( math_ops.to_float(outputs), block_shape, paddings) self.assertAllEqual(x_tf.eval(), inputs) def _testDirect(self, input_shape, block_shape, paddings): inputs = np.arange(np.prod(input_shape), dtype=np.float32) inputs = inputs.reshape(input_shape) self._testPad(inputs, block_shape, paddings, space_to_batch_direct(inputs, block_shape, paddings)) def testZeroBlockDimsZeroRemainingDims(self): self._testPad( inputs=[1, 2], block_shape=[], paddings=[], outputs=[1, 2],) def testZeroBlockDimsOneRemainingDim(self): self._testPad( inputs=[[1, 2], [3, 4]], block_shape=[], paddings=[], outputs=[[1, 2], [3, 4]]) # Same thing, but with a no-op block dim. self._testPad( inputs=[[1, 2], [3, 4]], block_shape=[1], paddings=[[0, 0]], outputs=[[1, 2], [3, 4]]) def testZeroBlockDimsTwoRemainingDims(self): self._testPad( inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], block_shape=[], paddings=[], outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # Same thing, but with a no-op block dim. self._testPad( inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], block_shape=[1], paddings=[[0, 0]], outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # Same thing, but with two no-op block dims. self._testPad( inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], block_shape=[1, 1], paddings=[[0, 0], [0, 0]], outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) def testOneBlockDimZeroRemainingDims(self): self._testPad( inputs=[[1, 2, 3], [4, 5, 6]], block_shape=[2], paddings=[1, 0], outputs=[[0, 2], [0, 5], [1, 3], [4, 6]]) def testOneBlockDimOneRemainingDim(self): self._testPad( inputs=[[[1, 11], [2, 21], [3, 31]], [[4, 41], [5, 51], [6, 61]]], block_shape=[2], paddings=[1, 0], outputs=[[[0, 0], [2, 21]], [[0, 0], [5, 51]], [[1, 11], [3, 31]], [[4, 41], [6, 61]]]) def testDirect(self): # Test with zero-size remaining dimension. self._testDirect( input_shape=[3, 1, 2, 0], block_shape=[3], paddings=[[0, 2]]) # Test with zero-size blocked dimension. self._testDirect( input_shape=[3, 0, 2, 5], block_shape=[3], paddings=[[0, 0]]) # Test with padding up from zero size. self._testDirect( input_shape=[3, 0, 2, 5], block_shape=[3], paddings=[[1, 2]]) self._testDirect( input_shape=[3, 3, 4, 5, 2], block_shape=[3, 4, 2], paddings=[[1, 2], [0, 0], [3, 0]]) self._testDirect( input_shape=[3, 3, 4, 5, 2], block_shape=[3, 4, 2, 2], paddings=[[1, 2], [0, 0], [3, 0], [0, 0]]) self._testDirect( input_shape=[3, 2, 2, 3, 4, 5, 2, 5], block_shape=[1, 1, 3, 4, 2, 2], paddings=[[0, 0], [0, 0], [1, 2], [0, 0], [3, 0], [0, 0]]) self._testDirect( input_shape=[3, 2, 2, 3, 4, 5, 2, 5], block_shape=[1, 1, 3, 4, 2, 2, 1], paddings=[[0, 0], [0, 0], [1, 2], [0, 0], [3, 0], [0, 0], [0, 0]]) class SpaceToBatchSpaceToDepth(test.TestCase, PythonOpImpl): # Verifies that: space_to_batch(x) = transpose(space_to_depth(transpose(x))) def testSpaceToDepthTranspose(self): x = np.arange(5 * 10 * 16 * 7, dtype=np.float32).reshape([5, 10, 16, 7]) block_size = 2 paddings = np.zeros((2, 2), dtype=np.int32) y1 = self.space_to_batch(x, paddings, block_size=block_size) y2 = array_ops.transpose( array_ops.space_to_depth( array_ops.transpose(x, [3, 1, 2, 0]), block_size=block_size), [3, 1, 2, 0]) with self.test_session(use_gpu=True): self.assertAllEqual(y1.eval(), y2.eval()) class SpaceToBatchSpaceToDepthCpp(SpaceToBatchSpaceToDepth, CppOpImpl): pass class SpaceToBatchErrorHandlingTest(test.TestCase, PythonOpImpl): def testInputWrongDimMissingBatch(self): # The input is missing the first dimension ("batch") x_np = [[[1], [2]], [[3], [4]]] paddings = np.zeros((2, 2), dtype=np.int32) block_size = 2 with self.assertRaises(ValueError): _ = self.space_to_batch(x_np, paddings, block_size) def testBlockSize0(self): # The block size is 0. x_np = [[[[1], [2]], [[3], [4]]]] paddings = np.zeros((2, 2), dtype=np.int32) block_size = 0 with self.assertRaises(ValueError): out_tf = self.space_to_batch(x_np, paddings, block_size) out_tf.eval() def testBlockSizeOne(self): # The block size is 1. The block size needs to be > 1. x_np = [[[[1], [2]], [[3], [4]]]] paddings = np.zeros((2, 2), dtype=np.int32) block_size = 1 with self.assertRaises(ValueError): out_tf = self.space_to_batch(x_np, paddings, block_size) out_tf.eval() def testBlockSizeLarger(self): # The block size is too large for this input. x_np = [[[[1], [2]], [[3], [4]]]] paddings = np.zeros((2, 2), dtype=np.int32) block_size = 10 with self.assertRaises(ValueError): out_tf = self.space_to_batch(x_np, paddings, block_size) out_tf.eval() def testBlockSizeNotDivisibleWidth(self): # The block size divides width but not height. x_np = [[[[1], [2], [3]], [[3], [4], [7]]]] paddings = np.zeros((2, 2), dtype=np.int32) block_size = 3 with self.assertRaises(ValueError): _ = self.space_to_batch(x_np, paddings, block_size) def testBlockSizeNotDivisibleHeight(self): # The block size divides height but not width. x_np = [[[[1], [2]], [[3], [4]], [[5], [6]]]] paddings = np.zeros((2, 2), dtype=np.int32) block_size = 3 with self.assertRaises(ValueError): _ = self.space_to_batch(x_np, paddings, block_size) def testBlockSizeNotDivisibleBoth(self): # The block size does not divide neither width or height. x_np = [[[[1], [2]], [[3], [4]]]] paddings = np.zeros((2, 2), dtype=np.int32) block_size = 3 with self.assertRaises(ValueError): _ = self.space_to_batch(x_np, paddings, block_size) def testUnknownShape(self): t = self.space_to_batch( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.int32), block_size=4) self.assertEqual(4, t.get_shape().ndims) class SpaceToBatchErrorHandlingCppTest(SpaceToBatchErrorHandlingTest, CppOpImpl): pass class SpaceToBatchNDErrorHandlingTest(test.TestCase): def _testStaticShape(self, input_shape, block_shape, paddings, error): block_shape = np.array(block_shape) paddings = np.array(paddings) # Try with sizes known at graph construction time. with self.assertRaises(error): _ = array_ops.space_to_batch_nd( np.zeros(input_shape, np.float32), block_shape, paddings) def _testDynamicShape(self, input_shape, block_shape, paddings): block_shape = np.array(block_shape) paddings = np.array(paddings) # Try with sizes unknown at graph construction time. input_placeholder = array_ops.placeholder(dtypes.float32) block_shape_placeholder = array_ops.placeholder( dtypes.int32, shape=block_shape.shape) paddings_placeholder = array_ops.placeholder(dtypes.int32) t = array_ops.space_to_batch_nd(input_placeholder, block_shape_placeholder, paddings_placeholder) with self.assertRaises(ValueError): _ = t.eval({ input_placeholder: np.zeros(input_shape, np.float32), block_shape_placeholder: block_shape, paddings_placeholder: paddings }) def _testShape(self, input_shape, block_shape, paddings, error): self._testStaticShape(input_shape, block_shape, paddings, error) self._testDynamicShape(input_shape, block_shape, paddings) def testBlockSize0(self): # The block size is 0. self._testShape([1, 2, 2], [0, 2], [[0, 0], [0, 0]], ValueError) def testBlockSizeNegative(self): self._testShape([1, 2, 2], [-1, 2], [[0, 0], [0, 0]], ValueError) def testNegativePadding(self): # The padding is negative. self._testShape([1, 2, 2], [1, 1], [[0, -1], [0, 0]], ValueError) def testBlockSizeNotDivisible(self): # The padded size is not divisible by the block size. self._testShape([1, 2, 3, 1], [3, 3], [[0, 0], [0, 0]], ValueError) def testBlockDimsMismatch(self): # Shape of block_shape does not match shape of paddings. self._testStaticShape([1, 3, 3, 1], [3, 3], [[0, 0]], ValueError) def testUnknown(self): # Verify that input shape and paddings shape can be unknown. _ = array_ops.space_to_batch_nd( array_ops.placeholder(dtypes.float32), array_ops.placeholder( dtypes.int32, shape=(2,)), array_ops.placeholder(dtypes.int32)) # Only number of input dimensions is known. t = array_ops.space_to_batch_nd( array_ops.placeholder( dtypes.float32, shape=(None, None, None, None)), array_ops.placeholder( dtypes.int32, shape=(2,)), array_ops.placeholder(dtypes.int32)) self.assertEqual(4, t.get_shape().ndims) # Dimensions are partially known. t = array_ops.space_to_batch_nd( array_ops.placeholder( dtypes.float32, shape=(None, None, None, 2)), array_ops.placeholder( dtypes.int32, shape=(2,)), array_ops.placeholder(dtypes.int32)) self.assertEqual([None, None, None, 2], t.get_shape().as_list()) # Dimensions are partially known. t = array_ops.space_to_batch_nd( array_ops.placeholder( dtypes.float32, shape=(3, None, None, 2)), [2, 3], array_ops.placeholder(dtypes.int32)) self.assertEqual([3 * 2 * 3, None, None, 2], t.get_shape().as_list()) # Dimensions are partially known. t = array_ops.space_to_batch_nd( array_ops.placeholder( dtypes.float32, shape=(3, None, 2, 2)), [2, 3], [[1, 1], [0, 1]]) self.assertEqual([3 * 2 * 3, None, 1, 2], t.get_shape().as_list()) # Dimensions are fully known. t = array_ops.space_to_batch_nd( array_ops.placeholder( dtypes.float32, shape=(3, 2, 3, 2)), [2, 3], [[1, 1], [0, 0]]) self.assertEqual([3 * 2 * 3, 2, 1, 2], t.get_shape().as_list()) class SpaceToBatchGradientTest(test.TestCase, PythonOpImpl): # Check the gradients. def _checkGrad(self, x, paddings, block_size): assert 4 == x.ndim with self.test_session(use_gpu=True): tf_x = ops.convert_to_tensor(x) tf_y = self.space_to_batch(tf_x, paddings, block_size) epsilon = 1e-5 ((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient( tf_x, x.shape, tf_y, tf_y.get_shape().as_list(), x_init_value=x, delta=epsilon) self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon) # Tests a gradient for space_to_batch of x which is a four dimensional # tensor of shape [b, h * block_size, w * block_size, d]. def _compare(self, b, h, w, d, block_size, pad_beg, pad_end): block_size_sq = block_size * block_size x = np.random.normal(0, 1, b * h * w * d * block_size_sq).astype(np.float32).reshape( [b, h * block_size, w * block_size, d]) paddings = np.array( [[pad_beg, pad_end], [pad_beg, pad_end]], dtype=np.int32) self._checkGrad(x, paddings, block_size) # Don't use very large numbers as dimensions here as the result is tensor # with cartesian product of the dimensions. def testSmall(self): block_size = 2 pad_beg = 0 pad_end = 0 self._compare(1, 2, 3, 5, block_size, pad_beg, pad_end) def testSmall2(self): block_size = 2 pad_beg = 0 pad_end = 0 self._compare(2, 4, 3, 2, block_size, pad_beg, pad_end) def testSmallPad1x1(self): block_size = 2 pad_beg = 1 pad_end = 1 self._compare(1, 2, 3, 5, block_size, pad_beg, pad_end) class SpaceToBatchGradientCppTest(SpaceToBatchGradientTest, CppOpImpl): pass class SpaceToBatchNDGradientTest(test.TestCase): # Check the gradients. def _checkGrad(self, x, block_shape, paddings): block_shape = np.array(block_shape) paddings = np.array(paddings).reshape((len(block_shape), 2)) with self.test_session(): tf_x = ops.convert_to_tensor(x) tf_y = array_ops.space_to_batch_nd(tf_x, block_shape, paddings) epsilon = 1e-5 ((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient( tf_x, x.shape, tf_y, tf_y.get_shape().as_list(), x_init_value=x, delta=epsilon) self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon) def _compare(self, input_shape, block_shape, paddings): x = np.random.normal( 0, 1, np.prod(input_shape)).astype(np.float32).reshape(input_shape) self._checkGrad(x, block_shape, paddings) # Don't use very large numbers as dimensions here as the result is tensor # with cartesian product of the dimensions. def testSmall(self): self._compare([1, 4, 6, 5], [2, 2], [[0, 0], [0, 0]]) def testSmall2(self): self._compare([2, 8, 6, 2], [2, 2], [[0, 0], [0, 0]]) def testSmallPad1(self): self._compare([2, 4, 6, 2], [2, 2], [[1, 1], [1, 1]]) def testSmallPadThreeBlockDims(self): self._compare([2, 2, 4, 3, 2], [2, 2, 2], [[1, 1], [1, 1], [1, 0]]) class RequiredSpaceToBatchPaddingsTest(test.TestCase): def _checkProperties(self, input_shape, block_shape, base_paddings, paddings, crops): """Checks that `paddings` and `crops` satisfy invariants.""" num_block_dims = len(block_shape) self.assertEqual(len(input_shape), num_block_dims) if base_paddings is None: base_paddings = np.zeros((num_block_dims, 2), np.int32) self.assertEqual(base_paddings.shape, (num_block_dims, 2)) self.assertEqual(paddings.shape, (num_block_dims, 2)) self.assertEqual(crops.shape, (num_block_dims, 2)) for i in range(num_block_dims): self.assertEqual(paddings[i, 0], base_paddings[i, 0]) self.assertLessEqual(0, paddings[i, 1] - base_paddings[i, 1]) self.assertLess(paddings[i, 1] - base_paddings[i, 1], block_shape[i]) self.assertEqual( (input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i], 0) self.assertEqual(crops[i, 0], 0) self.assertEqual(crops[i, 1], paddings[i, 1] - base_paddings[i, 1]) def _test(self, input_shape, block_shape, base_paddings): input_shape = np.array(input_shape) block_shape = np.array(block_shape) if base_paddings is not None: base_paddings = np.array(base_paddings) # Check with constants. paddings, crops = array_ops.required_space_to_batch_paddings(input_shape, block_shape, base_paddings) paddings_const = tensor_util.constant_value(paddings) crops_const = tensor_util.constant_value(crops) self.assertIsNotNone(paddings_const) self.assertIsNotNone(crops_const) self._checkProperties(input_shape, block_shape, base_paddings, paddings_const, crops_const) # Check with non-constants. assignments = {} input_shape_placeholder = array_ops.placeholder(dtypes.int32) assignments[input_shape_placeholder] = input_shape block_shape_placeholder = array_ops.placeholder(dtypes.int32, [len(block_shape)]) assignments[block_shape_placeholder] = block_shape if base_paddings is not None: base_paddings_placeholder = array_ops.placeholder(dtypes.int32, [len(block_shape), 2]) assignments[base_paddings_placeholder] = base_paddings else: base_paddings_placeholder = None t_paddings, t_crops = array_ops.required_space_to_batch_paddings( input_shape_placeholder, block_shape_placeholder, base_paddings_placeholder) with self.test_session(): paddings_result = t_paddings.eval(assignments) crops_result = t_crops.eval(assignments) self.assertAllEqual(paddings_result, paddings_const) self.assertAllEqual(crops_result, crops_const) def testSimple(self): self._test( input_shape=np.zeros((0,), np.int32), block_shape=np.zeros((0,), np.int32), base_paddings=None) self._test( input_shape=np.zeros((0,), np.int32), block_shape=np.zeros((0,), np.int32), base_paddings=np.zeros((0, 2), np.int32)) self._test(input_shape=[1], block_shape=[2], base_paddings=None) self._test(input_shape=[1], block_shape=[2], base_paddings=[[1, 0]]) self._test(input_shape=[3], block_shape=[1], base_paddings=[[1, 2]]) self._test(input_shape=[1], block_shape=[2], base_paddings=[[2, 3]]) self._test(input_shape=[4, 5], block_shape=[3, 2], base_paddings=None) self._test( input_shape=[4, 5], block_shape=[3, 2], base_paddings=[[0, 0], [0, 1]]) if __name__ == "__main__": test.main()
apache-2.0
webostin/django-btc
tests/template_tests/syntax_tests/test_if.py
2
21411
from django.template.base import TemplateSyntaxError from django.template.loader import get_template from django.test import SimpleTestCase from ..utils import render, setup, TestObj class IfTagTests(SimpleTestCase): @setup({'if-tag01': '{% if foo %}yes{% else %}no{% endif %}'}) def test_if_tag01(self): output = render('if-tag01', {'foo': True}) self.assertEqual(output, 'yes') @setup({'if-tag02': '{% if foo %}yes{% else %}no{% endif %}'}) def test_if_tag02(self): output = render('if-tag02', {'foo': False}) self.assertEqual(output, 'no') @setup({'if-tag03': '{% if foo %}yes{% else %}no{% endif %}'}) def test_if_tag03(self): output = render('if-tag03') self.assertEqual(output, 'no') @setup({'if-tag04': '{% if foo %}foo{% elif bar %}bar{% endif %}'}) def test_if_tag04(self): output = render('if-tag04', {'foo': True}) self.assertEqual(output, 'foo') @setup({'if-tag05': '{% if foo %}foo{% elif bar %}bar{% endif %}'}) def test_if_tag05(self): output = render('if-tag05', {'bar': True}) self.assertEqual(output, 'bar') @setup({'if-tag06': '{% if foo %}foo{% elif bar %}bar{% endif %}'}) def test_if_tag06(self): output = render('if-tag06') self.assertEqual(output, '') @setup({'if-tag07': '{% if foo %}foo{% elif bar %}bar{% else %}nothing{% endif %}'}) def test_if_tag07(self): output = render('if-tag07', {'foo': True}) self.assertEqual(output, 'foo') @setup({'if-tag08': '{% if foo %}foo{% elif bar %}bar{% else %}nothing{% endif %}'}) def test_if_tag08(self): output = render('if-tag08', {'bar': True}) self.assertEqual(output, 'bar') @setup({'if-tag09': '{% if foo %}foo{% elif bar %}bar{% else %}nothing{% endif %}'}) def test_if_tag09(self): output = render('if-tag09') self.assertEqual(output, 'nothing') @setup({'if-tag10': '{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}'}) def test_if_tag10(self): output = render('if-tag10', {'foo': True}) self.assertEqual(output, 'foo') @setup({'if-tag11': '{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}'}) def test_if_tag11(self): output = render('if-tag11', {'bar': True}) self.assertEqual(output, 'bar') @setup({'if-tag12': '{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}'}) def test_if_tag12(self): output = render('if-tag12', {'baz': True}) self.assertEqual(output, 'baz') @setup({'if-tag13': '{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}'}) def test_if_tag13(self): output = render('if-tag13') self.assertEqual(output, 'nothing') # Filters @setup({'if-tag-filter01': '{% if foo|length == 5 %}yes{% else %}no{% endif %}'}) def test_if_tag_filter01(self): output = render('if-tag-filter01', {'foo': 'abcde'}) self.assertEqual(output, 'yes') @setup({'if-tag-filter02': '{% if foo|upper == \'ABC\' %}yes{% else %}no{% endif %}'}) def test_if_tag_filter02(self): output = render('if-tag-filter02') self.assertEqual(output, 'no') # Equality @setup({'if-tag-eq01': '{% if foo == bar %}yes{% else %}no{% endif %}'}) def test_if_tag_eq01(self): output = render('if-tag-eq01') self.assertEqual(output, 'yes') @setup({'if-tag-eq02': '{% if foo == bar %}yes{% else %}no{% endif %}'}) def test_if_tag_eq02(self): output = render('if-tag-eq02', {'foo': 1}) self.assertEqual(output, 'no') @setup({'if-tag-eq03': '{% if foo == bar %}yes{% else %}no{% endif %}'}) def test_if_tag_eq03(self): output = render('if-tag-eq03', {'foo': 1, 'bar': 1}) self.assertEqual(output, 'yes') @setup({'if-tag-eq04': '{% if foo == bar %}yes{% else %}no{% endif %}'}) def test_if_tag_eq04(self): output = render('if-tag-eq04', {'foo': 1, 'bar': 2}) self.assertEqual(output, 'no') @setup({'if-tag-eq05': '{% if foo == \'\' %}yes{% else %}no{% endif %}'}) def test_if_tag_eq05(self): output = render('if-tag-eq05') self.assertEqual(output, 'no') # Comparison @setup({'if-tag-gt-01': '{% if 2 > 1 %}yes{% else %}no{% endif %}'}) def test_if_tag_gt_01(self): output = render('if-tag-gt-01') self.assertEqual(output, 'yes') @setup({'if-tag-gt-02': '{% if 1 > 1 %}yes{% else %}no{% endif %}'}) def test_if_tag_gt_02(self): output = render('if-tag-gt-02') self.assertEqual(output, 'no') @setup({'if-tag-gte-01': '{% if 1 >= 1 %}yes{% else %}no{% endif %}'}) def test_if_tag_gte_01(self): output = render('if-tag-gte-01') self.assertEqual(output, 'yes') @setup({'if-tag-gte-02': '{% if 1 >= 2 %}yes{% else %}no{% endif %}'}) def test_if_tag_gte_02(self): output = render('if-tag-gte-02') self.assertEqual(output, 'no') @setup({'if-tag-lt-01': '{% if 1 < 2 %}yes{% else %}no{% endif %}'}) def test_if_tag_lt_01(self): output = render('if-tag-lt-01') self.assertEqual(output, 'yes') @setup({'if-tag-lt-02': '{% if 1 < 1 %}yes{% else %}no{% endif %}'}) def test_if_tag_lt_02(self): output = render('if-tag-lt-02') self.assertEqual(output, 'no') @setup({'if-tag-lte-01': '{% if 1 <= 1 %}yes{% else %}no{% endif %}'}) def test_if_tag_lte_01(self): output = render('if-tag-lte-01') self.assertEqual(output, 'yes') @setup({'if-tag-lte-02': '{% if 2 <= 1 %}yes{% else %}no{% endif %}'}) def test_if_tag_lte_02(self): output = render('if-tag-lte-02') self.assertEqual(output, 'no') # Contains @setup({'if-tag-in-01': '{% if 1 in x %}yes{% else %}no{% endif %}'}) def test_if_tag_in_01(self): output = render('if-tag-in-01', {'x': [1]}) self.assertEqual(output, 'yes') @setup({'if-tag-in-02': '{% if 2 in x %}yes{% else %}no{% endif %}'}) def test_if_tag_in_02(self): output = render('if-tag-in-02', {'x': [1]}) self.assertEqual(output, 'no') @setup({'if-tag-not-in-01': '{% if 1 not in x %}yes{% else %}no{% endif %}'}) def test_if_tag_not_in_01(self): output = render('if-tag-not-in-01', {'x': [1]}) self.assertEqual(output, 'no') @setup({'if-tag-not-in-02': '{% if 2 not in x %}yes{% else %}no{% endif %}'}) def test_if_tag_not_in_02(self): output = render('if-tag-not-in-02', {'x': [1]}) self.assertEqual(output, 'yes') # AND @setup({'if-tag-and01': '{% if foo and bar %}yes{% else %}no{% endif %}'}) def test_if_tag_and01(self): output = render('if-tag-and01', {'foo': True, 'bar': True}) self.assertEqual(output, 'yes') @setup({'if-tag-and02': '{% if foo and bar %}yes{% else %}no{% endif %}'}) def test_if_tag_and02(self): output = render('if-tag-and02', {'foo': True, 'bar': False}) self.assertEqual(output, 'no') @setup({'if-tag-and03': '{% if foo and bar %}yes{% else %}no{% endif %}'}) def test_if_tag_and03(self): output = render('if-tag-and03', {'foo': False, 'bar': True}) self.assertEqual(output, 'no') @setup({'if-tag-and04': '{% if foo and bar %}yes{% else %}no{% endif %}'}) def test_if_tag_and04(self): output = render('if-tag-and04', {'foo': False, 'bar': False}) self.assertEqual(output, 'no') @setup({'if-tag-and05': '{% if foo and bar %}yes{% else %}no{% endif %}'}) def test_if_tag_and05(self): output = render('if-tag-and05', {'foo': False}) self.assertEqual(output, 'no') @setup({'if-tag-and06': '{% if foo and bar %}yes{% else %}no{% endif %}'}) def test_if_tag_and06(self): output = render('if-tag-and06', {'bar': False}) self.assertEqual(output, 'no') @setup({'if-tag-and07': '{% if foo and bar %}yes{% else %}no{% endif %}'}) def test_if_tag_and07(self): output = render('if-tag-and07', {'foo': True}) self.assertEqual(output, 'no') @setup({'if-tag-and08': '{% if foo and bar %}yes{% else %}no{% endif %}'}) def test_if_tag_and08(self): output = render('if-tag-and08', {'bar': True}) self.assertEqual(output, 'no') # OR @setup({'if-tag-or01': '{% if foo or bar %}yes{% else %}no{% endif %}'}) def test_if_tag_or01(self): output = render('if-tag-or01', {'foo': True, 'bar': True}) self.assertEqual(output, 'yes') @setup({'if-tag-or02': '{% if foo or bar %}yes{% else %}no{% endif %}'}) def test_if_tag_or02(self): output = render('if-tag-or02', {'foo': True, 'bar': False}) self.assertEqual(output, 'yes') @setup({'if-tag-or03': '{% if foo or bar %}yes{% else %}no{% endif %}'}) def test_if_tag_or03(self): output = render('if-tag-or03', {'foo': False, 'bar': True}) self.assertEqual(output, 'yes') @setup({'if-tag-or04': '{% if foo or bar %}yes{% else %}no{% endif %}'}) def test_if_tag_or04(self): output = render('if-tag-or04', {'foo': False, 'bar': False}) self.assertEqual(output, 'no') @setup({'if-tag-or05': '{% if foo or bar %}yes{% else %}no{% endif %}'}) def test_if_tag_or05(self): output = render('if-tag-or05', {'foo': False}) self.assertEqual(output, 'no') @setup({'if-tag-or06': '{% if foo or bar %}yes{% else %}no{% endif %}'}) def test_if_tag_or06(self): output = render('if-tag-or06', {'bar': False}) self.assertEqual(output, 'no') @setup({'if-tag-or07': '{% if foo or bar %}yes{% else %}no{% endif %}'}) def test_if_tag_or07(self): output = render('if-tag-or07', {'foo': True}) self.assertEqual(output, 'yes') @setup({'if-tag-or08': '{% if foo or bar %}yes{% else %}no{% endif %}'}) def test_if_tag_or08(self): output = render('if-tag-or08', {'bar': True}) self.assertEqual(output, 'yes') @setup({'if-tag-or09': '{% if foo or bar or baz %}yes{% else %}no{% endif %}'}) def test_if_tag_or09(self): """ multiple ORs """ output = render('if-tag-or09', {'baz': True}) self.assertEqual(output, 'yes') # NOT @setup({'if-tag-not01': '{% if not foo %}no{% else %}yes{% endif %}'}) def test_if_tag_not01(self): output = render('if-tag-not01', {'foo': True}) self.assertEqual(output, 'yes') @setup({'if-tag-not02': '{% if not not foo %}no{% else %}yes{% endif %}'}) def test_if_tag_not02(self): output = render('if-tag-not02', {'foo': True}) self.assertEqual(output, 'no') @setup({'if-tag-not06': '{% if foo and not bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not06(self): output = render('if-tag-not06') self.assertEqual(output, 'no') @setup({'if-tag-not07': '{% if foo and not bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not07(self): output = render('if-tag-not07', {'foo': True, 'bar': True}) self.assertEqual(output, 'no') @setup({'if-tag-not08': '{% if foo and not bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not08(self): output = render('if-tag-not08', {'foo': True, 'bar': False}) self.assertEqual(output, 'yes') @setup({'if-tag-not09': '{% if foo and not bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not09(self): output = render('if-tag-not09', {'foo': False, 'bar': True}) self.assertEqual(output, 'no') @setup({'if-tag-not10': '{% if foo and not bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not10(self): output = render('if-tag-not10', {'foo': False, 'bar': False}) self.assertEqual(output, 'no') @setup({'if-tag-not11': '{% if not foo and bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not11(self): output = render('if-tag-not11') self.assertEqual(output, 'no') @setup({'if-tag-not12': '{% if not foo and bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not12(self): output = render('if-tag-not12', {'foo': True, 'bar': True}) self.assertEqual(output, 'no') @setup({'if-tag-not13': '{% if not foo and bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not13(self): output = render('if-tag-not13', {'foo': True, 'bar': False}) self.assertEqual(output, 'no') @setup({'if-tag-not14': '{% if not foo and bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not14(self): output = render('if-tag-not14', {'foo': False, 'bar': True}) self.assertEqual(output, 'yes') @setup({'if-tag-not15': '{% if not foo and bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not15(self): output = render('if-tag-not15', {'foo': False, 'bar': False}) self.assertEqual(output, 'no') @setup({'if-tag-not16': '{% if foo or not bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not16(self): output = render('if-tag-not16') self.assertEqual(output, 'yes') @setup({'if-tag-not17': '{% if foo or not bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not17(self): output = render('if-tag-not17', {'foo': True, 'bar': True}) self.assertEqual(output, 'yes') @setup({'if-tag-not18': '{% if foo or not bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not18(self): output = render('if-tag-not18', {'foo': True, 'bar': False}) self.assertEqual(output, 'yes') @setup({'if-tag-not19': '{% if foo or not bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not19(self): output = render('if-tag-not19', {'foo': False, 'bar': True}) self.assertEqual(output, 'no') @setup({'if-tag-not20': '{% if foo or not bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not20(self): output = render('if-tag-not20', {'foo': False, 'bar': False}) self.assertEqual(output, 'yes') @setup({'if-tag-not21': '{% if not foo or bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not21(self): output = render('if-tag-not21') self.assertEqual(output, 'yes') @setup({'if-tag-not22': '{% if not foo or bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not22(self): output = render('if-tag-not22', {'foo': True, 'bar': True}) self.assertEqual(output, 'yes') @setup({'if-tag-not23': '{% if not foo or bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not23(self): output = render('if-tag-not23', {'foo': True, 'bar': False}) self.assertEqual(output, 'no') @setup({'if-tag-not24': '{% if not foo or bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not24(self): output = render('if-tag-not24', {'foo': False, 'bar': True}) self.assertEqual(output, 'yes') @setup({'if-tag-not25': '{% if not foo or bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not25(self): output = render('if-tag-not25', {'foo': False, 'bar': False}) self.assertEqual(output, 'yes') @setup({'if-tag-not26': '{% if not foo and not bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not26(self): output = render('if-tag-not26') self.assertEqual(output, 'yes') @setup({'if-tag-not27': '{% if not foo and not bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not27(self): output = render('if-tag-not27', {'foo': True, 'bar': True}) self.assertEqual(output, 'no') @setup({'if-tag-not28': '{% if not foo and not bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not28(self): output = render('if-tag-not28', {'foo': True, 'bar': False}) self.assertEqual(output, 'no') @setup({'if-tag-not29': '{% if not foo and not bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not29(self): output = render('if-tag-not29', {'foo': False, 'bar': True}) self.assertEqual(output, 'no') @setup({'if-tag-not30': '{% if not foo and not bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not30(self): output = render('if-tag-not30', {'foo': False, 'bar': False}) self.assertEqual(output, 'yes') @setup({'if-tag-not31': '{% if not foo or not bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not31(self): output = render('if-tag-not31') self.assertEqual(output, 'yes') @setup({'if-tag-not32': '{% if not foo or not bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not32(self): output = render('if-tag-not32', {'foo': True, 'bar': True}) self.assertEqual(output, 'no') @setup({'if-tag-not33': '{% if not foo or not bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not33(self): output = render('if-tag-not33', {'foo': True, 'bar': False}) self.assertEqual(output, 'yes') @setup({'if-tag-not34': '{% if not foo or not bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not34(self): output = render('if-tag-not34', {'foo': False, 'bar': True}) self.assertEqual(output, 'yes') @setup({'if-tag-not35': '{% if not foo or not bar %}yes{% else %}no{% endif %}'}) def test_if_tag_not35(self): output = render('if-tag-not35', {'foo': False, 'bar': False}) self.assertEqual(output, 'yes') # Various syntax errors @setup({'if-tag-error01': '{% if %}yes{% endif %}'}) def test_if_tag_error01(self): with self.assertRaises(TemplateSyntaxError): get_template('if-tag-error01') @setup({'if-tag-error02': '{% if foo and %}yes{% else %}no{% endif %}'}) def test_if_tag_error02(self): with self.assertRaises(TemplateSyntaxError): render('if-tag-error02', {'foo': True}) @setup({'if-tag-error03': '{% if foo or %}yes{% else %}no{% endif %}'}) def test_if_tag_error03(self): with self.assertRaises(TemplateSyntaxError): render('if-tag-error03', {'foo': True}) @setup({'if-tag-error04': '{% if not foo and %}yes{% else %}no{% endif %}'}) def test_if_tag_error04(self): with self.assertRaises(TemplateSyntaxError): render('if-tag-error04', {'foo': True}) @setup({'if-tag-error05': '{% if not foo or %}yes{% else %}no{% endif %}'}) def test_if_tag_error05(self): with self.assertRaises(TemplateSyntaxError): render('if-tag-error05', {'foo': True}) @setup({'if-tag-error06': '{% if abc def %}yes{% endif %}'}) def test_if_tag_error06(self): with self.assertRaises(TemplateSyntaxError): get_template('if-tag-error06') @setup({'if-tag-error07': '{% if not %}yes{% endif %}'}) def test_if_tag_error07(self): with self.assertRaises(TemplateSyntaxError): get_template('if-tag-error07') @setup({'if-tag-error08': '{% if and %}yes{% endif %}'}) def test_if_tag_error08(self): with self.assertRaises(TemplateSyntaxError): get_template('if-tag-error08') @setup({'if-tag-error09': '{% if or %}yes{% endif %}'}) def test_if_tag_error09(self): with self.assertRaises(TemplateSyntaxError): get_template('if-tag-error09') @setup({'if-tag-error10': '{% if == %}yes{% endif %}'}) def test_if_tag_error10(self): with self.assertRaises(TemplateSyntaxError): get_template('if-tag-error10') @setup({'if-tag-error11': '{% if 1 == %}yes{% endif %}'}) def test_if_tag_error11(self): with self.assertRaises(TemplateSyntaxError): get_template('if-tag-error11') @setup({'if-tag-error12': '{% if a not b %}yes{% endif %}'}) def test_if_tag_error12(self): with self.assertRaises(TemplateSyntaxError): get_template('if-tag-error12') @setup({'if-tag-shortcircuit01': '{% if x.is_true or x.is_bad %}yes{% else %}no{% endif %}'}) def test_if_tag_shortcircuit01(self): """ If evaluations are shortcircuited where possible """ output = render('if-tag-shortcircuit01', {'x': TestObj()}) self.assertEqual(output, 'yes') @setup({'if-tag-shortcircuit02': '{% if x.is_false and x.is_bad %}yes{% else %}no{% endif %}'}) def test_if_tag_shortcircuit02(self): """ The is_bad() function should not be evaluated. If it is, an exception is raised. """ output = render('if-tag-shortcircuit02', {'x': TestObj()}) self.assertEqual(output, 'no') @setup({'if-tag-badarg01': '{% if x|default_if_none:y %}yes{% endif %}'}) def test_if_tag_badarg01(self): """ Non-existent args """ output = render('if-tag-badarg01') self.assertEqual(output, '') @setup({'if-tag-badarg02': '{% if x|default_if_none:y %}yes{% endif %}'}) def test_if_tag_badarg02(self): output = render('if-tag-badarg02', {'y': 0}) self.assertEqual(output, '') @setup({'if-tag-badarg03': '{% if x|default_if_none:y %}yes{% endif %}'}) def test_if_tag_badarg03(self): output = render('if-tag-badarg03', {'y': 1}) self.assertEqual(output, 'yes') @setup({'if-tag-badarg04': '{% if x|default_if_none:y %}yes{% else %}no{% endif %}'}) def test_if_tag_badarg04(self): output = render('if-tag-badarg04') self.assertEqual(output, 'no')
bsd-3-clause
jimberlage/servo
tests/wpt/webgl/tests/deqp/functional/gles3/draw/draw_test_generator.py
51
3058
#!/usr/bin/env python # Copyright (c) 2016 The Khronos Group Inc. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and/or associated documentation files (the # "Materials"), to deal in the Materials without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Materials, and to # permit persons to whom the Materials are furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Materials. # # THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. """ Generator for draw* tests. This file needs to be run in its folder. """ import sys _DO_NOT_EDIT_WARNING = """<!-- This file is auto-generated from draw_test_generator.py DO NOT EDIT! --> """ _HTML_TEMPLATE = """<html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <title>WebGL Draw Conformance Tests</title> <link rel="stylesheet" href="../../../../resources/js-test-style.css"/> <script src="../../../../js/js-test-pre.js"></script> <script src="../../../../js/webgl-test-utils.js"></script> <script src="../../../../closure-library/closure/goog/base.js"></script> <script src="../../../deqp-deps.js"></script> <script>goog.require('functional.gles3.es3fDrawTests');</script> </head> <body> <div id="description"></div> <div id="console"></div> <canvas id="canvas" width="200" height="100"> </canvas> <script> var wtu = WebGLTestUtils; var gl = wtu.create3DContext('canvas', null, 2); functional.gles3.es3fDrawTests.run(gl, [%(start)s, %(end)s]); </script> </body> </html> """ _GROUPS = [ 'draw_arrays', 'draw_elements', 'draw_arrays_instanced', 'draw_elements_instanced', 'draw_range_elements', 'instancing', 'random', ] def WriteTest(filename, start, end): """Write one test.""" file = open(filename, "wb") file.write(_DO_NOT_EDIT_WARNING) file.write(_HTML_TEMPLATE % { 'start': start, 'end': end }) file.close def GenerateTests(): """Generate all tests.""" filelist = [] for ii in range(len(_GROUPS)): filename = _GROUPS[ii] + '.html' filelist.append(filename) WriteTest(filename, ii, ii + 1) return filelist def GenerateTestList(filelist): file = open("00_test_list.txt", "wb") file.write('\n'.join(filelist)) file.close def main(argv): """This is the main function.""" filelist = GenerateTests() GenerateTestList(filelist) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
mpl-2.0
wdmchaft/taskcoach
taskcoachlib/command/clipboard.py
1
1363
''' Task Coach - Your friendly task manager Copyright (C) 2004-2010 Task Coach developers <developers@taskcoach.org> Task Coach is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Task Coach is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' from taskcoachlib import patterns class Clipboard: __metaclass__ = patterns.Singleton def __init__(self): self.clear() def put(self, items, source): # pylint: disable-msg=W0201 self._contents = items self._source = source def get(self): currentContents = self._contents currentSource = self._source self.clear() return currentContents, currentSource def peek(self): return self._contents def clear(self): self._contents = [] self._source = None def __nonzero__(self): return len(self._contents)
gpl-3.0
jruiperezv/ANALYSE
common/test/acceptance/tests/studio/test_studio_outline.py
9
70862
""" Acceptance tests for studio related to the outline page. """ from datetime import datetime, timedelta import itertools from pytz import UTC from bok_choy.promise import EmptyPromise from ...pages.studio.overview import CourseOutlinePage, ContainerPage, ExpandCollapseLinkState from ...pages.studio.utils import add_discussion, drag, verify_ordering from ...pages.lms.courseware import CoursewarePage from ...pages.lms.course_nav import CourseNavPage from ...pages.lms.staff_view import StaffPage from ...fixtures.course import XBlockFixtureDesc from base_studio_test import StudioCourseTest from ..helpers import load_data_str from ...pages.lms.progress import ProgressPage SECTION_NAME = 'Test Section' SUBSECTION_NAME = 'Test Subsection' UNIT_NAME = 'Test Unit' class CourseOutlineTest(StudioCourseTest): """ Base class for all course outline tests """ def setUp(self): """ Install a course with no content using a fixture. """ super(CourseOutlineTest, self).setUp() self.course_outline_page = CourseOutlinePage( self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run'] ) def populate_course_fixture(self, course_fixture): """ Install a course with sections/problems, tabs, updates, and handouts """ course_fixture.add_children( XBlockFixtureDesc('chapter', SECTION_NAME).add_children( XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children( XBlockFixtureDesc('vertical', UNIT_NAME).add_children( XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')), XBlockFixtureDesc('html', 'Test HTML Component'), XBlockFixtureDesc('discussion', 'Test Discussion Component') ) ) ) ) def do_action_and_verify(self, outline_page, action, expected_ordering): """ Perform the supplied action and then verify the resulting ordering. """ if outline_page is None: outline_page = self.course_outline_page.visit() action(outline_page) verify_ordering(self, outline_page, expected_ordering) # Reload the page and expand all subsections to see that the change was persisted. outline_page = self.course_outline_page.visit() outline_page.q(css='.outline-item.outline-subsection.is-collapsed .ui-toggle-expansion').click() verify_ordering(self, outline_page, expected_ordering) class CourseOutlineDragAndDropTest(CourseOutlineTest): """ Tests of drag and drop within the outline page. """ __test__ = True def populate_course_fixture(self, course_fixture): """ Create a course with one section, two subsections, and four units """ # with collapsed outline self.chap_1_handle = 0 self.chap_1_seq_1_handle = 1 # with first sequential expanded self.seq_1_vert_1_handle = 2 self.seq_1_vert_2_handle = 3 self.chap_1_seq_2_handle = 4 course_fixture.add_children( XBlockFixtureDesc('chapter', "1").add_children( XBlockFixtureDesc('sequential', '1.1').add_children( XBlockFixtureDesc('vertical', '1.1.1'), XBlockFixtureDesc('vertical', '1.1.2') ), XBlockFixtureDesc('sequential', '1.2').add_children( XBlockFixtureDesc('vertical', '1.2.1'), XBlockFixtureDesc('vertical', '1.2.2') ) ) ) def drag_and_verify(self, source, target, expected_ordering, outline_page=None): self.do_action_and_verify( outline_page, lambda (outline): drag(outline, source, target), expected_ordering ) def test_drop_unit_in_collapsed_subsection(self): """ Drag vertical "1.1.2" from subsection "1.1" into collapsed subsection "1.2" which already have its own verticals. """ course_outline_page = self.course_outline_page.visit() # expand first subsection course_outline_page.q(css='.outline-item.outline-subsection.is-collapsed .ui-toggle-expansion').first.click() expected_ordering = [{"1": ["1.1", "1.2"]}, {"1.1": ["1.1.1"]}, {"1.2": ["1.1.2", "1.2.1", "1.2.2"]}] self.drag_and_verify(self.seq_1_vert_2_handle, self.chap_1_seq_2_handle, expected_ordering, course_outline_page) class WarningMessagesTest(CourseOutlineTest): """ Feature: Warning messages on sections, subsections, and units """ __test__ = True STAFF_ONLY_WARNING = 'Contains staff only content' LIVE_UNPUBLISHED_WARNING = 'Unpublished changes to live content' FUTURE_UNPUBLISHED_WARNING = 'Unpublished changes to content that will release in the future' NEVER_PUBLISHED_WARNING = 'Unpublished units will not be released' class PublishState: NEVER_PUBLISHED = 1 UNPUBLISHED_CHANGES = 2 PUBLISHED = 3 VALUES = [NEVER_PUBLISHED, UNPUBLISHED_CHANGES, PUBLISHED] class UnitState: """ Represents the state of a unit """ def __init__(self, is_released, publish_state, is_locked): """ Creates a new UnitState with the given properties """ self.is_released = is_released self.publish_state = publish_state self.is_locked = is_locked @property def name(self): """ Returns an appropriate name based on the properties of the unit """ result = "Released " if self.is_released else "Unreleased " if self.publish_state == WarningMessagesTest.PublishState.NEVER_PUBLISHED: result += "Never Published " elif self.publish_state == WarningMessagesTest.PublishState.UNPUBLISHED_CHANGES: result += "Unpublished Changes " else: result += "Published " result += "Locked" if self.is_locked else "Unlocked" return result def populate_course_fixture(self, course_fixture): """ Install a course with various configurations that could produce warning messages """ # Define the dimensions that map to the UnitState constructor features = [ [True, False], # Possible values for is_released self.PublishState.VALUES, # Possible values for publish_state [True, False] # Possible values for is_locked ] # Add a fixture for every state in the product of features course_fixture.add_children(*[ self._build_fixture(self.UnitState(*state)) for state in itertools.product(*features) ]) def _build_fixture(self, unit_state): """ Returns an XBlockFixtureDesc with a section, subsection, and possibly unit that has the given state. """ name = unit_state.name start = (datetime(1984, 3, 4) if unit_state.is_released else datetime.now(UTC) + timedelta(1)).isoformat() subsection = XBlockFixtureDesc('sequential', name, metadata={'start': start}) # Children of never published subsections will be added on demand via _ensure_unit_present return XBlockFixtureDesc('chapter', name).add_children( subsection if unit_state.publish_state == self.PublishState.NEVER_PUBLISHED else subsection.add_children( XBlockFixtureDesc('vertical', name, metadata={ 'visible_to_staff_only': True if unit_state.is_locked else None }) ) ) def test_released_never_published_locked(self): """ Tests that released never published locked units display staff only warnings """ self._verify_unit_warning( self.UnitState(is_released=True, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=True), self.STAFF_ONLY_WARNING ) def test_released_never_published_unlocked(self): """ Tests that released never published unlocked units display 'Unpublished units will not be released' """ self._verify_unit_warning( self.UnitState(is_released=True, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=False), self.NEVER_PUBLISHED_WARNING ) def test_released_unpublished_changes_locked(self): """ Tests that released unpublished changes locked units display staff only warnings """ self._verify_unit_warning( self.UnitState(is_released=True, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=True), self.STAFF_ONLY_WARNING ) def test_released_unpublished_changes_unlocked(self): """ Tests that released unpublished changes unlocked units display 'Unpublished changes to live content' """ self._verify_unit_warning( self.UnitState(is_released=True, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=False), self.LIVE_UNPUBLISHED_WARNING ) def test_released_published_locked(self): """ Tests that released published locked units display staff only warnings """ self._verify_unit_warning( self.UnitState(is_released=True, publish_state=self.PublishState.PUBLISHED, is_locked=True), self.STAFF_ONLY_WARNING ) def test_released_published_unlocked(self): """ Tests that released published unlocked units display no warnings """ self._verify_unit_warning( self.UnitState(is_released=True, publish_state=self.PublishState.PUBLISHED, is_locked=False), None ) def test_unreleased_never_published_locked(self): """ Tests that unreleased never published locked units display staff only warnings """ self._verify_unit_warning( self.UnitState(is_released=False, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=True), self.STAFF_ONLY_WARNING ) def test_unreleased_never_published_unlocked(self): """ Tests that unreleased never published unlocked units display 'Unpublished units will not be released' """ self._verify_unit_warning( self.UnitState(is_released=False, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=False), self.NEVER_PUBLISHED_WARNING ) def test_unreleased_unpublished_changes_locked(self): """ Tests that unreleased unpublished changes locked units display staff only warnings """ self._verify_unit_warning( self.UnitState(is_released=False, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=True), self.STAFF_ONLY_WARNING ) def test_unreleased_unpublished_changes_unlocked(self): """ Tests that unreleased unpublished changes unlocked units display 'Unpublished changes to content that will release in the future' """ self._verify_unit_warning( self.UnitState(is_released=False, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=False), self.FUTURE_UNPUBLISHED_WARNING ) def test_unreleased_published_locked(self): """ Tests that unreleased published locked units display staff only warnings """ self._verify_unit_warning( self.UnitState(is_released=False, publish_state=self.PublishState.PUBLISHED, is_locked=True), self.STAFF_ONLY_WARNING ) def test_unreleased_published_unlocked(self): """ Tests that unreleased published unlocked units display no warnings """ self._verify_unit_warning( self.UnitState(is_released=False, publish_state=self.PublishState.PUBLISHED, is_locked=False), None ) def _verify_unit_warning(self, unit_state, expected_status_message): """ Verifies that the given unit's messages match the expected messages. If expected_status_message is None, then the unit status message is expected to not be present. """ self._ensure_unit_present(unit_state) self.course_outline_page.visit() section = self.course_outline_page.section(unit_state.name) subsection = section.subsection_at(0) subsection.toggle_expand() unit = subsection.unit_at(0) if expected_status_message == self.STAFF_ONLY_WARNING: self.assertEqual(section.status_message, self.STAFF_ONLY_WARNING) self.assertEqual(subsection.status_message, self.STAFF_ONLY_WARNING) self.assertEqual(unit.status_message, self.STAFF_ONLY_WARNING) else: self.assertFalse(section.has_status_message) self.assertFalse(subsection.has_status_message) if expected_status_message: self.assertEqual(unit.status_message, expected_status_message) else: self.assertFalse(unit.has_status_message) def _ensure_unit_present(self, unit_state): """ Ensures that a unit with the given state is present on the course outline """ if unit_state.publish_state == self.PublishState.PUBLISHED: return name = unit_state.name self.course_outline_page.visit() subsection = self.course_outline_page.section(name).subsection(name) subsection.toggle_expand() if unit_state.publish_state == self.PublishState.UNPUBLISHED_CHANGES: unit = subsection.unit(name).go_to() add_discussion(unit) elif unit_state.publish_state == self.PublishState.NEVER_PUBLISHED: subsection.add_unit() unit = ContainerPage(self.browser, None) unit.wait_for_page() if unit.is_staff_locked != unit_state.is_locked: unit.toggle_staff_lock() class EditingSectionsTest(CourseOutlineTest): """ Feature: Editing Release date, Due date and grading type. """ __test__ = True def test_can_edit_subsection(self): """ Scenario: I can edit settings of subsection. Given that I have created a subsection Then I see release date, due date and grading policy of subsection in course outline When I click on the configuration icon Then edit modal window is shown And release date, due date and grading policy fields present And they have correct initial values Then I set new values for these fields And I click save button on the modal Then I see release date, due date and grading policy of subsection in course outline """ self.course_outline_page.visit() subsection = self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME) # Verify that Release date visible by default self.assertTrue(subsection.release_date) # Verify that Due date and Policy hidden by default self.assertFalse(subsection.due_date) self.assertFalse(subsection.policy) modal = subsection.edit() # Verify fields self.assertTrue(modal.has_release_date()) self.assertTrue(modal.has_due_date()) self.assertTrue(modal.has_policy()) # Verify initial values self.assertEqual(modal.release_date, u'1/1/1970') self.assertEqual(modal.due_date, u'') self.assertEqual(modal.policy, u'Not Graded') # Set new values modal.release_date = '3/12/1972' modal.due_date = '7/21/2014' modal.policy = 'Lab' modal.save() self.assertIn(u'Released: Mar 12, 1972', subsection.release_date) self.assertIn(u'Due: Jul 21, 2014', subsection.due_date) self.assertIn(u'Lab', subsection.policy) def test_can_edit_section(self): """ Scenario: I can edit settings of section. Given that I have created a section Then I see release date of section in course outline When I click on the configuration icon Then edit modal window is shown And release date field present And it has correct initial value Then I set new value for this field And I click save button on the modal Then I see release date of section in course outline """ self.course_outline_page.visit() section = self.course_outline_page.section(SECTION_NAME) # Verify that Release date visible by default self.assertTrue(section.release_date) # Verify that Due date and Policy are not present self.assertFalse(section.due_date) self.assertFalse(section.policy) modal = section.edit() # Verify fields self.assertTrue(modal.has_release_date()) self.assertFalse(modal.has_due_date()) self.assertFalse(modal.has_policy()) # Verify initial value self.assertEqual(modal.release_date, u'1/1/1970') # Set new value modal.release_date = '5/14/1969' modal.save() self.assertIn(u'Released: May 14, 1969', section.release_date) # Verify that Due date and Policy are not present self.assertFalse(section.due_date) self.assertFalse(section.policy) def test_subsection_is_graded_in_lms(self): """ Scenario: I can grade subsection from course outline page. Given I visit progress page And I see that problem in subsection has grading type "Practice" Then I visit course outline page And I click on the configuration icon of subsection And I set grading policy to "Lab" And I click save button on the modal Then I visit progress page And I see that problem in subsection has grading type "Problem" """ progress_page = ProgressPage(self.browser, self.course_id) progress_page.visit() progress_page.wait_for_page() self.assertEqual(u'Practice', progress_page.grading_formats[0]) self.course_outline_page.visit() subsection = self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME) modal = subsection.edit() # Set new values modal.policy = 'Lab' modal.save() progress_page.visit() self.assertEqual(u'Problem', progress_page.grading_formats[0]) def test_unchanged_release_date_is_not_saved(self): """ Scenario: Saving a subsection without changing the release date will not override the release date Given that I have created a section with a subsection When I open the settings modal for the subsection And I pressed save And I open the settings modal for the section And I change the release date to 07/20/1969 And I press save Then the subsection and the section have the release date 07/20/1969 """ self.course_outline_page.visit() modal = self.course_outline_page.section_at(0).subsection_at(0).edit() modal.save() modal = self.course_outline_page.section_at(0).edit() modal.release_date = '7/20/1969' modal.save() release_text = 'Released: Jul 20, 1969' self.assertIn(release_text, self.course_outline_page.section_at(0).release_date) self.assertIn(release_text, self.course_outline_page.section_at(0).subsection_at(0).release_date) class StaffLockTest(CourseOutlineTest): """ Feature: Sections, subsections, and units can be locked and unlocked from the course outline. """ __test__ = True def populate_course_fixture(self, course_fixture): """ Create a course with one section, two subsections, and four units """ course_fixture.add_children( XBlockFixtureDesc('chapter', '1').add_children( XBlockFixtureDesc('sequential', '1.1').add_children( XBlockFixtureDesc('vertical', '1.1.1'), XBlockFixtureDesc('vertical', '1.1.2') ), XBlockFixtureDesc('sequential', '1.2').add_children( XBlockFixtureDesc('vertical', '1.2.1'), XBlockFixtureDesc('vertical', '1.2.2') ) ) ) def _verify_descendants_are_staff_only(self, item): """Verifies that all the descendants of item are staff only""" self.assertTrue(item.is_staff_only) if hasattr(item, 'children'): for child in item.children(): self._verify_descendants_are_staff_only(child) def _remove_staff_lock_and_verify_warning(self, outline_item, expect_warning): """Removes staff lock from a course outline item and checks whether or not a warning appears.""" modal = outline_item.edit() modal.is_explicitly_locked = False if expect_warning: self.assertTrue(modal.shows_staff_lock_warning()) else: self.assertFalse(modal.shows_staff_lock_warning()) modal.save() def _toggle_lock_on_unlocked_item(self, outline_item): """Toggles outline_item's staff lock on and then off, verifying the staff lock warning""" self.assertFalse(outline_item.has_staff_lock_warning) outline_item.set_staff_lock(True) self.assertTrue(outline_item.has_staff_lock_warning) self._verify_descendants_are_staff_only(outline_item) outline_item.set_staff_lock(False) self.assertFalse(outline_item.has_staff_lock_warning) def _verify_explicit_staff_lock_remains_after_unlocking_parent(self, child_item, parent_item): """Verifies that child_item's explicit staff lock remains after removing parent_item's staff lock""" child_item.set_staff_lock(True) parent_item.set_staff_lock(True) self.assertTrue(parent_item.has_staff_lock_warning) self.assertTrue(child_item.has_staff_lock_warning) parent_item.set_staff_lock(False) self.assertFalse(parent_item.has_staff_lock_warning) self.assertTrue(child_item.has_staff_lock_warning) def test_units_can_be_locked(self): """ Scenario: Units can be locked and unlocked from the course outline page Given I have a course with a unit When I click on the configuration icon And I enable explicit staff locking And I click save Then the unit shows a staff lock warning And when I click on the configuration icon And I disable explicit staff locking And I click save Then the unit does not show a staff lock warning """ self.course_outline_page.visit() self.course_outline_page.expand_all_subsections() unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0) self._toggle_lock_on_unlocked_item(unit) def test_subsections_can_be_locked(self): """ Scenario: Subsections can be locked and unlocked from the course outline page Given I have a course with a subsection When I click on the subsection's configuration icon And I enable explicit staff locking And I click save Then the subsection shows a staff lock warning And all its descendants are staff locked And when I click on the subsection's configuration icon And I disable explicit staff locking And I click save Then the the subsection does not show a staff lock warning """ self.course_outline_page.visit() self.course_outline_page.expand_all_subsections() subsection = self.course_outline_page.section_at(0).subsection_at(0) self._toggle_lock_on_unlocked_item(subsection) def test_sections_can_be_locked(self): """ Scenario: Sections can be locked and unlocked from the course outline page Given I have a course with a section When I click on the section's configuration icon And I enable explicit staff locking And I click save Then the section shows a staff lock warning And all its descendants are staff locked And when I click on the section's configuration icon And I disable explicit staff locking And I click save Then the section does not show a staff lock warning """ self.course_outline_page.visit() self.course_outline_page.expand_all_subsections() section = self.course_outline_page.section_at(0) self._toggle_lock_on_unlocked_item(section) def test_explicit_staff_lock_remains_after_unlocking_section(self): """ Scenario: An explicitly locked unit is still locked after removing an inherited lock from a section Given I have a course with sections, subsections, and units And I have enabled explicit staff lock on a section and one of its units When I click on the section's configuration icon And I disable explicit staff locking And I click save Then the unit still shows a staff lock warning """ self.course_outline_page.visit() self.course_outline_page.expand_all_subsections() section = self.course_outline_page.section_at(0) unit = section.subsection_at(0).unit_at(0) self._verify_explicit_staff_lock_remains_after_unlocking_parent(unit, section) def test_explicit_staff_lock_remains_after_unlocking_subsection(self): """ Scenario: An explicitly locked unit is still locked after removing an inherited lock from a subsection Given I have a course with sections, subsections, and units And I have enabled explicit staff lock on a subsection and one of its units When I click on the subsection's configuration icon And I disable explicit staff locking And I click save Then the unit still shows a staff lock warning """ self.course_outline_page.visit() self.course_outline_page.expand_all_subsections() subsection = self.course_outline_page.section_at(0).subsection_at(0) unit = subsection.unit_at(0) self._verify_explicit_staff_lock_remains_after_unlocking_parent(unit, subsection) def test_section_displays_lock_when_all_subsections_locked(self): """ Scenario: All subsections in section are explicitly locked, section should display staff only warning Given I have a course one section and two subsections When I enable explicit staff lock on all the subsections Then the section shows a staff lock warning """ self.course_outline_page.visit() section = self.course_outline_page.section_at(0) section.subsection_at(0).set_staff_lock(True) section.subsection_at(1).set_staff_lock(True) self.assertTrue(section.has_staff_lock_warning) def test_section_displays_lock_when_all_units_locked(self): """ Scenario: All units in a section are explicitly locked, section should display staff only warning Given I have a course with one section, two subsections, and four units When I enable explicit staff lock on all the units Then the section shows a staff lock warning """ self.course_outline_page.visit() self.course_outline_page.expand_all_subsections() section = self.course_outline_page.section_at(0) section.subsection_at(0).unit_at(0).set_staff_lock(True) section.subsection_at(0).unit_at(1).set_staff_lock(True) section.subsection_at(1).unit_at(0).set_staff_lock(True) section.subsection_at(1).unit_at(1).set_staff_lock(True) self.assertTrue(section.has_staff_lock_warning) def test_subsection_displays_lock_when_all_units_locked(self): """ Scenario: All units in subsection are explicitly locked, subsection should display staff only warning Given I have a course with one subsection and two units When I enable explicit staff lock on all the units Then the subsection shows a staff lock warning """ self.course_outline_page.visit() self.course_outline_page.expand_all_subsections() subsection = self.course_outline_page.section_at(0).subsection_at(0) subsection.unit_at(0).set_staff_lock(True) subsection.unit_at(1).set_staff_lock(True) self.assertTrue(subsection.has_staff_lock_warning) def test_section_does_not_display_lock_when_some_subsections_locked(self): """ Scenario: Only some subsections in section are explicitly locked, section should NOT display staff only warning Given I have a course with one section and two subsections When I enable explicit staff lock on one subsection Then the section does not show a staff lock warning """ self.course_outline_page.visit() section = self.course_outline_page.section_at(0) section.subsection_at(0).set_staff_lock(True) self.assertFalse(section.has_staff_lock_warning) def test_section_does_not_display_lock_when_some_units_locked(self): """ Scenario: Only some units in section are explicitly locked, section should NOT display staff only warning Given I have a course with one section, two subsections, and four units When I enable explicit staff lock on three units Then the section does not show a staff lock warning """ self.course_outline_page.visit() self.course_outline_page.expand_all_subsections() section = self.course_outline_page.section_at(0) section.subsection_at(0).unit_at(0).set_staff_lock(True) section.subsection_at(0).unit_at(1).set_staff_lock(True) section.subsection_at(1).unit_at(1).set_staff_lock(True) self.assertFalse(section.has_staff_lock_warning) def test_subsection_does_not_display_lock_when_some_units_locked(self): """ Scenario: Only some units in subsection are explicitly locked, subsection should NOT display staff only warning Given I have a course with one subsection and two units When I enable explicit staff lock on one unit Then the subsection does not show a staff lock warning """ self.course_outline_page.visit() self.course_outline_page.expand_all_subsections() subsection = self.course_outline_page.section_at(0).subsection_at(0) subsection.unit_at(0).set_staff_lock(True) self.assertFalse(subsection.has_staff_lock_warning) def test_locked_sections_do_not_appear_in_lms(self): """ Scenario: A locked section is not visible to students in the LMS Given I have a course with two sections When I enable explicit staff lock on one section And I click the View Live button to switch to staff view Then I see two sections in the sidebar And when I click to toggle to student view Then I see one section in the sidebar """ self.course_outline_page.visit() self.course_outline_page.add_section_from_top_button() self.course_outline_page.section_at(1).set_staff_lock(True) self.course_outline_page.view_live() courseware = CoursewarePage(self.browser, self.course_id) courseware.wait_for_page() self.assertEqual(courseware.num_sections, 2) StaffPage(self.browser).toggle_staff_view() self.assertEqual(courseware.num_sections, 1) def test_locked_subsections_do_not_appear_in_lms(self): """ Scenario: A locked subsection is not visible to students in the LMS Given I have a course with two subsections When I enable explicit staff lock on one subsection And I click the View Live button to switch to staff view Then I see two subsections in the sidebar And when I click to toggle to student view Then I see one section in the sidebar """ self.course_outline_page.visit() self.course_outline_page.section_at(0).subsection_at(1).set_staff_lock(True) self.course_outline_page.view_live() courseware = CoursewarePage(self.browser, self.course_id) courseware.wait_for_page() self.assertEqual(courseware.num_subsections, 2) StaffPage(self.browser).toggle_staff_view() self.assertEqual(courseware.num_subsections, 1) def test_toggling_staff_lock_on_section_does_not_publish_draft_units(self): """ Scenario: Locking and unlocking a section will not publish its draft units Given I have a course with a section and unit And the unit has a draft and published version When I enable explicit staff lock on the section And I disable explicit staff lock on the section And I click the View Live button to switch to staff view Then I see the published version of the unit """ self.course_outline_page.visit() self.course_outline_page.expand_all_subsections() unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to() add_discussion(unit) self.course_outline_page.visit() self.course_outline_page.expand_all_subsections() section = self.course_outline_page.section_at(0) section.set_staff_lock(True) section.set_staff_lock(False) unit = section.subsection_at(0).unit_at(0).go_to() unit.view_published_version() courseware = CoursewarePage(self.browser, self.course_id) courseware.wait_for_page() self.assertEqual(courseware.num_xblock_components, 0) def test_toggling_staff_lock_on_subsection_does_not_publish_draft_units(self): """ Scenario: Locking and unlocking a subsection will not publish its draft units Given I have a course with a subsection and unit And the unit has a draft and published version When I enable explicit staff lock on the subsection And I disable explicit staff lock on the subsection And I click the View Live button to switch to staff view Then I see the published version of the unit """ self.course_outline_page.visit() self.course_outline_page.expand_all_subsections() unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to() add_discussion(unit) self.course_outline_page.visit() self.course_outline_page.expand_all_subsections() subsection = self.course_outline_page.section_at(0).subsection_at(0) subsection.set_staff_lock(True) subsection.set_staff_lock(False) unit = subsection.unit_at(0).go_to() unit.view_published_version() courseware = CoursewarePage(self.browser, self.course_id) courseware.wait_for_page() self.assertEqual(courseware.num_xblock_components, 0) def test_removing_staff_lock_from_unit_without_inherited_lock_shows_warning(self): """ Scenario: Removing explicit staff lock from a unit which does not inherit staff lock displays a warning. Given I have a course with a subsection and unit When I enable explicit staff lock on the unit And I disable explicit staff lock on the unit Then I see a modal warning. """ self.course_outline_page.visit() self.course_outline_page.expand_all_subsections() unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0) unit.set_staff_lock(True) self._remove_staff_lock_and_verify_warning(unit, True) def test_removing_staff_lock_from_subsection_without_inherited_lock_shows_warning(self): """ Scenario: Removing explicit staff lock from a subsection which does not inherit staff lock displays a warning. Given I have a course with a section and subsection When I enable explicit staff lock on the subsection And I disable explicit staff lock on the subsection Then I see a modal warning. """ self.course_outline_page.visit() self.course_outline_page.expand_all_subsections() subsection = self.course_outline_page.section_at(0).subsection_at(0) subsection.set_staff_lock(True) self._remove_staff_lock_and_verify_warning(subsection, True) def test_removing_staff_lock_from_unit_with_inherited_lock_shows_no_warning(self): """ Scenario: Removing explicit staff lock from a unit which also inherits staff lock displays no warning. Given I have a course with a subsection and unit When I enable explicit staff lock on the subsection And I enable explicit staff lock on the unit When I disable explicit staff lock on the unit Then I do not see a modal warning. """ self.course_outline_page.visit() self.course_outline_page.expand_all_subsections() subsection = self.course_outline_page.section_at(0).subsection_at(0) unit = subsection.unit_at(0) subsection.set_staff_lock(True) unit.set_staff_lock(True) self._remove_staff_lock_and_verify_warning(unit, False) def test_removing_staff_lock_from_subsection_with_inherited_lock_shows_no_warning(self): """ Scenario: Removing explicit staff lock from a subsection which also inherits staff lock displays no warning. Given I have a course with a section and subsection When I enable explicit staff lock on the section And I enable explicit staff lock on the subsection When I disable explicit staff lock on the subsection Then I do not see a modal warning. """ self.course_outline_page.visit() self.course_outline_page.expand_all_subsections() section = self.course_outline_page.section_at(0) subsection = section.subsection_at(0) section.set_staff_lock(True) subsection.set_staff_lock(True) self._remove_staff_lock_and_verify_warning(subsection, False) class EditNamesTest(CourseOutlineTest): """ Feature: Click-to-edit section/subsection names """ __test__ = True def set_name_and_verify(self, item, old_name, new_name, expected_name): """ Changes the display name of item from old_name to new_name, then verifies that its value is expected_name. """ self.assertEqual(item.name, old_name) item.change_name(new_name) self.assertFalse(item.in_editable_form()) self.assertEqual(item.name, expected_name) def test_edit_section_name(self): """ Scenario: Click-to-edit section name Given that I have created a section When I click on the name of section Then the section name becomes editable And given that I have edited the section name When I click outside of the edited section name Then the section name saves And becomes non-editable """ self.course_outline_page.visit() self.set_name_and_verify( self.course_outline_page.section_at(0), 'Test Section', 'Changed', 'Changed' ) def test_edit_subsection_name(self): """ Scenario: Click-to-edit subsection name Given that I have created a subsection When I click on the name of subsection Then the subsection name becomes editable And given that I have edited the subsection name When I click outside of the edited subsection name Then the subsection name saves And becomes non-editable """ self.course_outline_page.visit() self.set_name_and_verify( self.course_outline_page.section_at(0).subsection_at(0), 'Test Subsection', 'Changed', 'Changed' ) def test_edit_empty_section_name(self): """ Scenario: Click-to-edit section name, enter empty name Given that I have created a section And I have clicked to edit the name of the section And I have entered an empty section name When I click outside of the edited section name Then the section name does not change And becomes non-editable """ self.course_outline_page.visit() self.set_name_and_verify( self.course_outline_page.section_at(0), 'Test Section', '', 'Test Section' ) def test_edit_empty_subsection_name(self): """ Scenario: Click-to-edit subsection name, enter empty name Given that I have created a subsection And I have clicked to edit the name of the subsection And I have entered an empty subsection name When I click outside of the edited subsection name Then the subsection name does not change And becomes non-editable """ self.course_outline_page.visit() self.set_name_and_verify( self.course_outline_page.section_at(0).subsection_at(0), 'Test Subsection', '', 'Test Subsection' ) def test_editing_names_does_not_expand_collapse(self): """ Scenario: A section stays in the same expand/collapse state while its name is edited Given that I have created a section And the section is collapsed When I click on the name of the section Then the section is collapsed And given that I have entered a new name Then the section is collapsed And given that I press ENTER to finalize the name Then the section is collapsed """ self.course_outline_page.visit() self.course_outline_page.section_at(0).toggle_expand() self.assertFalse(self.course_outline_page.section_at(0).in_editable_form()) self.assertTrue(self.course_outline_page.section_at(0).is_collapsed) self.course_outline_page.section_at(0).edit_name() self.assertTrue(self.course_outline_page.section_at(0).in_editable_form()) self.assertTrue(self.course_outline_page.section_at(0).is_collapsed) self.course_outline_page.section_at(0).enter_name('Changed') self.assertTrue(self.course_outline_page.section_at(0).is_collapsed) self.course_outline_page.section_at(0).finalize_name() self.assertTrue(self.course_outline_page.section_at(0).is_collapsed) class CreateSectionsTest(CourseOutlineTest): """ Feature: Create new sections/subsections/units """ __test__ = True def populate_course_fixture(self, course_fixture): """ Start with a completely empty course to easily test adding things to it """ pass def test_create_new_section_from_top_button(self): """ Scenario: Create new section from button at top of page Given that I am on the course outline When I click the "+ Add section" button at the top of the page Then I see a new section added to the bottom of the page And the display name is in its editable form. """ self.course_outline_page.visit() self.course_outline_page.add_section_from_top_button() self.assertEqual(len(self.course_outline_page.sections()), 1) self.assertTrue(self.course_outline_page.section_at(0).in_editable_form()) def test_create_new_section_from_bottom_button(self): """ Scenario: Create new section from button at bottom of page Given that I am on the course outline When I click the "+ Add section" button at the bottom of the page Then I see a new section added to the bottom of the page And the display name is in its editable form. """ self.course_outline_page.visit() self.course_outline_page.add_section_from_bottom_button() self.assertEqual(len(self.course_outline_page.sections()), 1) self.assertTrue(self.course_outline_page.section_at(0).in_editable_form()) def test_create_new_section_from_bottom_button_plus_icon(self): """ Scenario: Create new section from button plus icon at bottom of page Given that I am on the course outline When I click the plus icon in "+ Add section" button at the bottom of the page Then I see a new section added to the bottom of the page And the display name is in its editable form. """ self.course_outline_page.visit() self.course_outline_page.add_section_from_bottom_button(click_child_icon=True) self.assertEqual(len(self.course_outline_page.sections()), 1) self.assertTrue(self.course_outline_page.section_at(0).in_editable_form()) def test_create_new_subsection(self): """ Scenario: Create new subsection Given that I have created a section When I click the "+ Add subsection" button in that section Then I see a new subsection added to the bottom of the section And the display name is in its editable form. """ self.course_outline_page.visit() self.course_outline_page.add_section_from_top_button() self.assertEqual(len(self.course_outline_page.sections()), 1) self.course_outline_page.section_at(0).add_subsection() subsections = self.course_outline_page.section_at(0).subsections() self.assertEqual(len(subsections), 1) self.assertTrue(subsections[0].in_editable_form()) def test_create_new_unit(self): """ Scenario: Create new unit Given that I have created a section And that I have created a subsection within that section When I click the "+ Add unit" button in that subsection Then I am redirected to a New Unit page And the display name is in its editable form. """ self.course_outline_page.visit() self.course_outline_page.add_section_from_top_button() self.assertEqual(len(self.course_outline_page.sections()), 1) self.course_outline_page.section_at(0).add_subsection() self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1) self.course_outline_page.section_at(0).subsection_at(0).add_unit() unit_page = ContainerPage(self.browser, None) EmptyPromise(unit_page.is_browser_on_page, 'Browser is on the unit page').fulfill() self.assertTrue(unit_page.is_inline_editing_display_name()) class DeleteContentTest(CourseOutlineTest): """ Feature: Deleting sections/subsections/units """ __test__ = True def test_delete_section(self): """ Scenario: Delete section Given that I am on the course outline When I click the delete button for a section on the course outline Then I should receive a confirmation message, asking me if I really want to delete the section When I click "Yes, I want to delete this component" Then the confirmation message should close And the section should immediately be deleted from the course outline """ self.course_outline_page.visit() self.assertEqual(len(self.course_outline_page.sections()), 1) self.course_outline_page.section_at(0).delete() self.assertEqual(len(self.course_outline_page.sections()), 0) def test_cancel_delete_section(self): """ Scenario: Cancel delete of section Given that I clicked the delte button for a section on the course outline And I received a confirmation message, asking me if I really want to delete the component When I click "Cancel" Then the confirmation message should close And the section should remain in the course outline """ self.course_outline_page.visit() self.assertEqual(len(self.course_outline_page.sections()), 1) self.course_outline_page.section_at(0).delete(cancel=True) self.assertEqual(len(self.course_outline_page.sections()), 1) def test_delete_subsection(self): """ Scenario: Delete subsection Given that I am on the course outline When I click the delete button for a subsection on the course outline Then I should receive a confirmation message, asking me if I really want to delete the subsection When I click "Yes, I want to delete this component" Then the confiramtion message should close And the subsection should immediately be deleted from the course outline """ self.course_outline_page.visit() self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1) self.course_outline_page.section_at(0).subsection_at(0).delete() self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 0) def test_cancel_delete_subsection(self): """ Scenario: Cancel delete of subsection Given that I clicked the delete button for a subsection on the course outline And I received a confirmation message, asking me if I really want to delete the subsection When I click "cancel" Then the confirmation message should close And the subsection should remain in the course outline """ self.course_outline_page.visit() self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1) self.course_outline_page.section_at(0).subsection_at(0).delete(cancel=True) self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1) def test_delete_unit(self): """ Scenario: Delete unit Given that I am on the course outline When I click the delete button for a unit on the course outline Then I should receive a confirmation message, asking me if I really want to delete the unit When I click "Yes, I want to delete this unit" Then the confirmation message should close And the unit should immediately be deleted from the course outline """ self.course_outline_page.visit() self.course_outline_page.section_at(0).subsection_at(0).toggle_expand() self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1) self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).delete() self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 0) def test_cancel_delete_unit(self): """ Scenario: Cancel delete of unit Given that I clicked the delete button for a unit on the course outline And I received a confirmation message, asking me if I really want to delete the unit When I click "Cancel" Then the confirmation message should close And the unit should remain in the course outline """ self.course_outline_page.visit() self.course_outline_page.section_at(0).subsection_at(0).toggle_expand() self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1) self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).delete(cancel=True) self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1) def test_delete_all_no_content_message(self): """ Scenario: Delete all sections/subsections/units in a course, "no content" message should appear Given that I delete all sections, subsections, and units in a course When I visit the course outline Then I will see a message that says, "You haven't added any content to this course yet" Add see a + Add Section button """ self.course_outline_page.visit() self.assertFalse(self.course_outline_page.has_no_content_message) self.course_outline_page.section_at(0).delete() self.assertEqual(len(self.course_outline_page.sections()), 0) self.assertTrue(self.course_outline_page.has_no_content_message) class ExpandCollapseMultipleSectionsTest(CourseOutlineTest): """ Feature: Courses with multiple sections can expand and collapse all sections. """ __test__ = True def populate_course_fixture(self, course_fixture): """ Start with a course with two sections """ course_fixture.add_children( XBlockFixtureDesc('chapter', 'Test Section').add_children( XBlockFixtureDesc('sequential', 'Test Subsection').add_children( XBlockFixtureDesc('vertical', 'Test Unit') ) ), XBlockFixtureDesc('chapter', 'Test Section 2').add_children( XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children( XBlockFixtureDesc('vertical', 'Test Unit 2') ) ) ) def verify_all_sections(self, collapsed): """ Verifies that all sections are collapsed if collapsed is True, otherwise all expanded. """ for section in self.course_outline_page.sections(): self.assertEqual(collapsed, section.is_collapsed) def toggle_all_sections(self): """ Toggles the expand collapse state of all sections. """ for section in self.course_outline_page.sections(): section.toggle_expand() def test_expanded_by_default(self): """ Scenario: The default layout for the outline page is to show sections in expanded view Given I have a course with sections When I navigate to the course outline page Then I see the "Collapse All Sections" link And all sections are expanded """ self.course_outline_page.visit() self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE) self.verify_all_sections(collapsed=False) def test_no_expand_link_for_empty_course(self): """ Scenario: Collapse link is removed after last section of a course is deleted Given I have a course with multiple sections And I navigate to the course outline page When I will confirm all alerts And I press the "section" delete icon Then I do not see the "Collapse All Sections" link And I will see a message that says "You haven't added any content to this course yet" """ self.course_outline_page.visit() for section in self.course_outline_page.sections(): section.delete() self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING) self.assertTrue(self.course_outline_page.has_no_content_message) def test_collapse_all_when_all_expanded(self): """ Scenario: Collapse all sections when all sections are expanded Given I navigate to the outline page of a course with sections And all sections are expanded When I click the "Collapse All Sections" link Then I see the "Expand All Sections" link And all sections are collapsed """ self.course_outline_page.visit() self.verify_all_sections(collapsed=False) self.course_outline_page.toggle_expand_collapse() self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND) self.verify_all_sections(collapsed=True) def test_collapse_all_when_some_expanded(self): """ Scenario: Collapsing all sections when 1 or more sections are already collapsed Given I navigate to the outline page of a course with sections And all sections are expanded When I collapse the first section And I click the "Collapse All Sections" link Then I see the "Expand All Sections" link And all sections are collapsed """ self.course_outline_page.visit() self.verify_all_sections(collapsed=False) self.course_outline_page.section_at(0).toggle_expand() self.course_outline_page.toggle_expand_collapse() self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND) self.verify_all_sections(collapsed=True) def test_expand_all_when_all_collapsed(self): """ Scenario: Expanding all sections when all sections are collapsed Given I navigate to the outline page of a course with multiple sections And I click the "Collapse All Sections" link When I click the "Expand All Sections" link Then I see the "Collapse All Sections" link And all sections are expanded """ self.course_outline_page.visit() self.course_outline_page.toggle_expand_collapse() self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND) self.course_outline_page.toggle_expand_collapse() self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE) self.verify_all_sections(collapsed=False) def test_expand_all_when_some_collapsed(self): """ Scenario: Expanding all sections when 1 or more sections are already expanded Given I navigate to the outline page of a course with multiple sections And I click the "Collapse All Sections" link When I expand the first section And I click the "Expand All Sections" link Then I see the "Collapse All Sections" link And all sections are expanded """ self.course_outline_page.visit() self.course_outline_page.toggle_expand_collapse() self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND) self.course_outline_page.section_at(0).toggle_expand() self.course_outline_page.toggle_expand_collapse() self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE) self.verify_all_sections(collapsed=False) class ExpandCollapseSingleSectionTest(CourseOutlineTest): """ Feature: Courses with a single section can expand and collapse all sections. """ __test__ = True def test_no_expand_link_for_empty_course(self): """ Scenario: Collapse link is removed after last section of a course is deleted Given I have a course with one section And I navigate to the course outline page When I will confirm all alerts And I press the "section" delete icon Then I do not see the "Collapse All Sections" link And I will see a message that says "You haven't added any content to this course yet" """ self.course_outline_page.visit() self.course_outline_page.section_at(0).delete() self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING) self.assertTrue(self.course_outline_page.has_no_content_message) def test_old_subsection_stays_collapsed_after_creation(self): """ Scenario: Collapsed subsection stays collapsed after creating a new subsection Given I have a course with one section and subsection And I navigate to the course outline page Then the subsection is collapsed And when I create a new subsection Then the first subsection is collapsed And the second subsection is expanded """ self.course_outline_page.visit() self.assertTrue(self.course_outline_page.section_at(0).subsection_at(0).is_collapsed) self.course_outline_page.section_at(0).add_subsection() self.assertTrue(self.course_outline_page.section_at(0).subsection_at(0).is_collapsed) self.assertFalse(self.course_outline_page.section_at(0).subsection_at(1).is_collapsed) class ExpandCollapseEmptyTest(CourseOutlineTest): """ Feature: Courses with no sections initially can expand and collapse all sections after addition. """ __test__ = True def populate_course_fixture(self, course_fixture): """ Start with an empty course """ pass def test_no_expand_link_for_empty_course(self): """ Scenario: Expand/collapse for a course with no sections Given I have a course with no sections When I navigate to the course outline page Then I do not see the "Collapse All Sections" link """ self.course_outline_page.visit() self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING) def test_link_appears_after_section_creation(self): """ Scenario: Collapse link appears after creating first section of a course Given I have a course with no sections When I navigate to the course outline page And I add a section Then I see the "Collapse All Sections" link And all sections are expanded """ self.course_outline_page.visit() self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING) self.course_outline_page.add_section_from_top_button() self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE) self.assertFalse(self.course_outline_page.section_at(0).is_collapsed) class DefaultStatesEmptyTest(CourseOutlineTest): """ Feature: Misc course outline default states/actions when starting with an empty course """ __test__ = True def populate_course_fixture(self, course_fixture): """ Start with an empty course """ pass def test_empty_course_message(self): """ Scenario: Empty course state Given that I am in a course with no sections, subsections, nor units When I visit the course outline Then I will see a message that says "You haven't added any content to this course yet" And see a + Add Section button """ self.course_outline_page.visit() self.assertTrue(self.course_outline_page.has_no_content_message) self.assertTrue(self.course_outline_page.bottom_add_section_button.is_present()) class DefaultStatesContentTest(CourseOutlineTest): """ Feature: Misc course outline default states/actions when starting with a course with content """ __test__ = True def test_view_live(self): """ Scenario: View Live version from course outline Given that I am on the course outline When I click the "View Live" button Then a new tab will open to the course on the LMS """ self.course_outline_page.visit() self.course_outline_page.view_live() courseware = CoursewarePage(self.browser, self.course_id) courseware.wait_for_page() self.assertEqual(courseware.num_xblock_components, 3) self.assertEqual(courseware.xblock_component_type(0), 'problem') self.assertEqual(courseware.xblock_component_type(1), 'html') self.assertEqual(courseware.xblock_component_type(2), 'discussion') class UnitNavigationTest(CourseOutlineTest): """ Feature: Navigate to units """ __test__ = True def test_navigate_to_unit(self): """ Scenario: Click unit name to navigate to unit page Given that I have expanded a section/subsection so I can see unit names When I click on a unit name Then I will be taken to the appropriate unit page """ self.course_outline_page.visit() self.course_outline_page.section_at(0).subsection_at(0).toggle_expand() unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to() self.assertTrue(unit.is_browser_on_page) class PublishSectionTest(CourseOutlineTest): """ Feature: Publish sections. """ __test__ = True def populate_course_fixture(self, course_fixture): """ Sets up a course structure with 2 subsections inside a single section. The first subsection has 2 units, and the second subsection has one unit. """ self.courseware = CoursewarePage(self.browser, self.course_id) self.course_nav = CourseNavPage(self.browser) course_fixture.add_children( XBlockFixtureDesc('chapter', SECTION_NAME).add_children( XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children( XBlockFixtureDesc('vertical', UNIT_NAME), XBlockFixtureDesc('vertical', 'Test Unit 2'), ), XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children( XBlockFixtureDesc('vertical', 'Test Unit 3'), ), ), ) def test_unit_publishing(self): """ Scenario: Can publish a unit and see published content in LMS Given I have a section with 2 subsections and 3 unpublished units When I go to the course outline Then I see publish button for the first unit, subsection, section When I publish the first unit Then I see that publish button for the first unit disappears And I see publish buttons for subsection, section And I see the changed content in LMS """ self._add_unpublished_content() self.course_outline_page.visit() section, subsection, unit = self._get_items() self.assertTrue(unit.publish_action) self.assertTrue(subsection.publish_action) self.assertTrue(section.publish_action) unit.publish() self.assertFalse(unit.publish_action) self.assertTrue(subsection.publish_action) self.assertTrue(section.publish_action) self.courseware.visit() self.assertEqual(1, self.courseware.num_xblock_components) def test_subsection_publishing(self): """ Scenario: Can publish a subsection and see published content in LMS Given I have a section with 2 subsections and 3 unpublished units When I go to the course outline Then I see publish button for the unit, subsection, section When I publish the first subsection Then I see that publish button for the first subsection disappears And I see that publish buttons disappear for the child units of the subsection And I see publish button for section And I see the changed content in LMS """ self._add_unpublished_content() self.course_outline_page.visit() section, subsection, unit = self._get_items() self.assertTrue(unit.publish_action) self.assertTrue(subsection.publish_action) self.assertTrue(section.publish_action) self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME).publish() self.assertFalse(unit.publish_action) self.assertFalse(subsection.publish_action) self.assertTrue(section.publish_action) self.courseware.visit() self.assertEqual(1, self.courseware.num_xblock_components) self.course_nav.go_to_sequential_position(2) self.assertEqual(1, self.courseware.num_xblock_components) def test_section_publishing(self): """ Scenario: Can publish a section and see published content in LMS Given I have a section with 2 subsections and 3 unpublished units When I go to the course outline Then I see publish button for the unit, subsection, section When I publish the section Then I see that publish buttons disappears And I see the changed content in LMS """ self._add_unpublished_content() self.course_outline_page.visit() section, subsection, unit = self._get_items() self.assertTrue(subsection.publish_action) self.assertTrue(section.publish_action) self.assertTrue(unit.publish_action) self.course_outline_page.section(SECTION_NAME).publish() self.assertFalse(subsection.publish_action) self.assertFalse(section.publish_action) self.assertFalse(unit.publish_action) self.courseware.visit() self.assertEqual(1, self.courseware.num_xblock_components) self.course_nav.go_to_sequential_position(2) self.assertEqual(1, self.courseware.num_xblock_components) self.course_nav.go_to_section(SECTION_NAME, 'Test Subsection 2') self.assertEqual(1, self.courseware.num_xblock_components) def _add_unpublished_content(self): """ Adds unpublished HTML content to first three units in the course. """ for index in xrange(3): self.course_fixture.create_xblock( self.course_fixture.get_nested_xblocks(category="vertical")[index].locator, XBlockFixtureDesc('html', 'Unpublished HTML Component ' + str(index)), ) def _get_items(self): """ Returns first section, subsection, and unit on the page. """ section = self.course_outline_page.section(SECTION_NAME) subsection = section.subsection(SUBSECTION_NAME) unit = subsection.toggle_expand().unit(UNIT_NAME) return (section, subsection, unit)
agpl-3.0
midma101/m0du1ar
.venv/lib/python2.7/site-packages/paramiko/message.py
7
8622
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com> # # This file is part of paramiko. # # Paramiko is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License as published by the Free # Software Foundation; either version 2.1 of the License, or (at your option) # any later version. # # Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with Paramiko; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. """ Implementation of an SSH2 "message". """ import struct import cStringIO from paramiko import util class Message (object): """ An SSH2 I{Message} is a stream of bytes that encodes some combination of strings, integers, bools, and infinite-precision integers (known in python as I{long}s). This class builds or breaks down such a byte stream. Normally you don't need to deal with anything this low-level, but it's exposed for people implementing custom extensions, or features that paramiko doesn't support yet. """ def __init__(self, content=None): """ Create a new SSH2 Message. @param content: the byte stream to use as the Message content (passed in only when decomposing a Message). @type content: string """ if content != None: self.packet = cStringIO.StringIO(content) else: self.packet = cStringIO.StringIO() def __str__(self): """ Return the byte stream content of this Message, as a string. @return: the contents of this Message. @rtype: string """ return self.packet.getvalue() def __repr__(self): """ Returns a string representation of this object, for debugging. @rtype: string """ return 'paramiko.Message(' + repr(self.packet.getvalue()) + ')' def rewind(self): """ Rewind the message to the beginning as if no items had been parsed out of it yet. """ self.packet.seek(0) def get_remainder(self): """ Return the bytes of this Message that haven't already been parsed and returned. @return: a string of the bytes not parsed yet. @rtype: string """ position = self.packet.tell() remainder = self.packet.read() self.packet.seek(position) return remainder def get_so_far(self): """ Returns the bytes of this Message that have been parsed and returned. The string passed into a Message's constructor can be regenerated by concatenating C{get_so_far} and L{get_remainder}. @return: a string of the bytes parsed so far. @rtype: string """ position = self.packet.tell() self.rewind() return self.packet.read(position) def get_bytes(self, n): """ Return the next C{n} bytes of the Message, without decomposing into an int, string, etc. Just the raw bytes are returned. @return: a string of the next C{n} bytes of the Message, or a string of C{n} zero bytes, if there aren't C{n} bytes remaining. @rtype: string """ b = self.packet.read(n) max_pad_size = 1<<20 # Limit padding to 1 MB if len(b) < n and n < max_pad_size: return b + '\x00' * (n - len(b)) return b def get_byte(self): """ Return the next byte of the Message, without decomposing it. This is equivalent to L{get_bytes(1)<get_bytes>}. @return: the next byte of the Message, or C{'\000'} if there aren't any bytes remaining. @rtype: string """ return self.get_bytes(1) def get_boolean(self): """ Fetch a boolean from the stream. @return: C{True} or C{False} (from the Message). @rtype: bool """ b = self.get_bytes(1) return b != '\x00' def get_int(self): """ Fetch an int from the stream. @return: a 32-bit unsigned integer. @rtype: int """ return struct.unpack('>I', self.get_bytes(4))[0] def get_int64(self): """ Fetch a 64-bit int from the stream. @return: a 64-bit unsigned integer. @rtype: long """ return struct.unpack('>Q', self.get_bytes(8))[0] def get_mpint(self): """ Fetch a long int (mpint) from the stream. @return: an arbitrary-length integer. @rtype: long """ return util.inflate_long(self.get_string()) def get_string(self): """ Fetch a string from the stream. This could be a byte string and may contain unprintable characters. (It's not unheard of for a string to contain another byte-stream Message.) @return: a string. @rtype: string """ return self.get_bytes(self.get_int()) def get_list(self): """ Fetch a list of strings from the stream. These are trivially encoded as comma-separated values in a string. @return: a list of strings. @rtype: list of strings """ return self.get_string().split(',') def add_bytes(self, b): """ Write bytes to the stream, without any formatting. @param b: bytes to add @type b: str """ self.packet.write(b) return self def add_byte(self, b): """ Write a single byte to the stream, without any formatting. @param b: byte to add @type b: str """ self.packet.write(b) return self def add_boolean(self, b): """ Add a boolean value to the stream. @param b: boolean value to add @type b: bool """ if b: self.add_byte('\x01') else: self.add_byte('\x00') return self def add_int(self, n): """ Add an integer to the stream. @param n: integer to add @type n: int """ self.packet.write(struct.pack('>I', n)) return self def add_int64(self, n): """ Add a 64-bit int to the stream. @param n: long int to add @type n: long """ self.packet.write(struct.pack('>Q', n)) return self def add_mpint(self, z): """ Add a long int to the stream, encoded as an infinite-precision integer. This method only works on positive numbers. @param z: long int to add @type z: long """ self.add_string(util.deflate_long(z)) return self def add_string(self, s): """ Add a string to the stream. @param s: string to add @type s: str """ self.add_int(len(s)) self.packet.write(s) return self def add_list(self, l): """ Add a list of strings to the stream. They are encoded identically to a single string of values separated by commas. (Yes, really, that's how SSH2 does it.) @param l: list of strings to add @type l: list(str) """ self.add_string(','.join(l)) return self def _add(self, i): if type(i) is str: return self.add_string(i) elif type(i) is int: return self.add_int(i) elif type(i) is long: if i > 0xffffffffL: return self.add_mpint(i) else: return self.add_int(i) elif type(i) is bool: return self.add_boolean(i) elif type(i) is list: return self.add_list(i) else: raise Exception('Unknown type') def add(self, *seq): """ Add a sequence of items to the stream. The values are encoded based on their type: str, int, bool, list, or long. @param seq: the sequence of items @type seq: sequence @bug: longs are encoded non-deterministically. Don't use this method. """ for item in seq: self._add(item)
mit
oroulet/freeopcua
tests/gmock/test/gmock_test_utils.py
769
3684
#!/usr/bin/env python # # Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test utilities for Google C++ Mocking Framework.""" __author__ = 'wan@google.com (Zhanyong Wan)' import os import sys # Determines path to gtest_test_utils and imports it. SCRIPT_DIR = os.path.dirname(__file__) or '.' # isdir resolves symbolic links. gtest_tests_util_dir = os.path.join(SCRIPT_DIR, '../gtest/test') if os.path.isdir(gtest_tests_util_dir): GTEST_TESTS_UTIL_DIR = gtest_tests_util_dir else: GTEST_TESTS_UTIL_DIR = os.path.join(SCRIPT_DIR, '../../gtest/test') sys.path.append(GTEST_TESTS_UTIL_DIR) import gtest_test_utils # pylint: disable-msg=C6204 def GetSourceDir(): """Returns the absolute path of the directory where the .py files are.""" return gtest_test_utils.GetSourceDir() def GetTestExecutablePath(executable_name): """Returns the absolute path of the test binary given its name. The function will print a message and abort the program if the resulting file doesn't exist. Args: executable_name: name of the test binary that the test script runs. Returns: The absolute path of the test binary. """ return gtest_test_utils.GetTestExecutablePath(executable_name) def GetExitStatus(exit_code): """Returns the argument to exit(), or -1 if exit() wasn't called. Args: exit_code: the result value of os.system(command). """ if os.name == 'nt': # On Windows, os.WEXITSTATUS() doesn't work and os.system() returns # the argument to exit() directly. return exit_code else: # On Unix, os.WEXITSTATUS() must be used to extract the exit status # from the result of os.system(). if os.WIFEXITED(exit_code): return os.WEXITSTATUS(exit_code) else: return -1 # Suppresses the "Invalid const name" lint complaint # pylint: disable-msg=C6409 # Exposes utilities from gtest_test_utils. Subprocess = gtest_test_utils.Subprocess TestCase = gtest_test_utils.TestCase environ = gtest_test_utils.environ SetEnvVar = gtest_test_utils.SetEnvVar PREMATURE_EXIT_FILE_ENV_VAR = gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR # pylint: enable-msg=C6409 def Main(): """Runs the unit test.""" gtest_test_utils.Main()
lgpl-3.0
sebadiaz/rethinkdb
test/rql_test/connections/http_support/werkzeug/utils.py
145
22826
# -*- coding: utf-8 -*- """ werkzeug.utils ~~~~~~~~~~~~~~ This module implements various utilities for WSGI applications. Most of them are used by the request and response wrappers but especially for middleware development it makes sense to use them without the wrappers. :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import re import os import sys import pkgutil try: from html.entities import name2codepoint except ImportError: from htmlentitydefs import name2codepoint from werkzeug._compat import unichr, text_type, string_types, iteritems, \ reraise, PY2 from werkzeug._internal import _DictAccessorProperty, \ _parse_signature, _missing _format_re = re.compile(r'\$(?:(%s)|\{(%s)\})' % (('[a-zA-Z_][a-zA-Z0-9_]*',) * 2)) _entity_re = re.compile(r'&([^;]+);') _filename_ascii_strip_re = re.compile(r'[^A-Za-z0-9_.-]') _windows_device_files = ('CON', 'AUX', 'COM1', 'COM2', 'COM3', 'COM4', 'LPT1', 'LPT2', 'LPT3', 'PRN', 'NUL') class cached_property(object): """A decorator that converts a function into a lazy property. The function wrapped is called the first time to retrieve the result and then that calculated result is used the next time you access the value:: class Foo(object): @cached_property def foo(self): # calculate something important here return 42 The class has to have a `__dict__` in order for this property to work. """ # implementation detail: this property is implemented as non-data # descriptor. non-data descriptors are only invoked if there is # no entry with the same name in the instance's __dict__. # this allows us to completely get rid of the access function call # overhead. If one choses to invoke __get__ by hand the property # will still work as expected because the lookup logic is replicated # in __get__ for manual invocation. def __init__(self, func, name=None, doc=None): self.__name__ = name or func.__name__ self.__module__ = func.__module__ self.__doc__ = doc or func.__doc__ self.func = func def __get__(self, obj, type=None): if obj is None: return self value = obj.__dict__.get(self.__name__, _missing) if value is _missing: value = self.func(obj) obj.__dict__[self.__name__] = value return value class environ_property(_DictAccessorProperty): """Maps request attributes to environment variables. This works not only for the Werzeug request object, but also any other class with an environ attribute: >>> class Test(object): ... environ = {'key': 'value'} ... test = environ_property('key') >>> var = Test() >>> var.test 'value' If you pass it a second value it's used as default if the key does not exist, the third one can be a converter that takes a value and converts it. If it raises :exc:`ValueError` or :exc:`TypeError` the default value is used. If no default value is provided `None` is used. Per default the property is read only. You have to explicitly enable it by passing ``read_only=False`` to the constructor. """ read_only = True def lookup(self, obj): return obj.environ class header_property(_DictAccessorProperty): """Like `environ_property` but for headers.""" def lookup(self, obj): return obj.headers class HTMLBuilder(object): """Helper object for HTML generation. Per default there are two instances of that class. The `html` one, and the `xhtml` one for those two dialects. The class uses keyword parameters and positional parameters to generate small snippets of HTML. Keyword parameters are converted to XML/SGML attributes, positional arguments are used as children. Because Python accepts positional arguments before keyword arguments it's a good idea to use a list with the star-syntax for some children: >>> html.p(class_='foo', *[html.a('foo', href='foo.html'), ' ', ... html.a('bar', href='bar.html')]) u'<p class="foo"><a href="foo.html">foo</a> <a href="bar.html">bar</a></p>' This class works around some browser limitations and can not be used for arbitrary SGML/XML generation. For that purpose lxml and similar libraries exist. Calling the builder escapes the string passed: >>> html.p(html("<foo>")) u'<p>&lt;foo&gt;</p>' """ _entity_re = re.compile(r'&([^;]+);') _entities = name2codepoint.copy() _entities['apos'] = 39 _empty_elements = set([ 'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame', 'hr', 'img', 'input', 'keygen', 'isindex', 'link', 'meta', 'param', 'source', 'wbr' ]) _boolean_attributes = set([ 'selected', 'checked', 'compact', 'declare', 'defer', 'disabled', 'ismap', 'multiple', 'nohref', 'noresize', 'noshade', 'nowrap' ]) _plaintext_elements = set(['textarea']) _c_like_cdata = set(['script', 'style']) def __init__(self, dialect): self._dialect = dialect def __call__(self, s): return escape(s) def __getattr__(self, tag): if tag[:2] == '__': raise AttributeError(tag) def proxy(*children, **arguments): buffer = '<' + tag for key, value in iteritems(arguments): if value is None: continue if key[-1] == '_': key = key[:-1] if key in self._boolean_attributes: if not value: continue if self._dialect == 'xhtml': value = '="' + key + '"' else: value = '' else: value = '="' + escape(value) + '"' buffer += ' ' + key + value if not children and tag in self._empty_elements: if self._dialect == 'xhtml': buffer += ' />' else: buffer += '>' return buffer buffer += '>' children_as_string = ''.join([text_type(x) for x in children if x is not None]) if children_as_string: if tag in self._plaintext_elements: children_as_string = escape(children_as_string) elif tag in self._c_like_cdata and self._dialect == 'xhtml': children_as_string = '/*<![CDATA[*/' + \ children_as_string + '/*]]>*/' buffer += children_as_string + '</' + tag + '>' return buffer return proxy def __repr__(self): return '<%s for %r>' % ( self.__class__.__name__, self._dialect ) html = HTMLBuilder('html') xhtml = HTMLBuilder('xhtml') def get_content_type(mimetype, charset): """Return the full content type string with charset for a mimetype. If the mimetype represents text the charset will be appended as charset parameter, otherwise the mimetype is returned unchanged. :param mimetype: the mimetype to be used as content type. :param charset: the charset to be appended in case it was a text mimetype. :return: the content type. """ if mimetype.startswith('text/') or \ mimetype == 'application/xml' or \ (mimetype.startswith('application/') and mimetype.endswith('+xml')): mimetype += '; charset=' + charset return mimetype def format_string(string, context): """String-template format a string: >>> format_string('$foo and ${foo}s', dict(foo=42)) '42 and 42s' This does not do any attribute lookup etc. For more advanced string formattings have a look at the `werkzeug.template` module. :param string: the format string. :param context: a dict with the variables to insert. """ def lookup_arg(match): x = context[match.group(1) or match.group(2)] if not isinstance(x, string_types): x = type(string)(x) return x return _format_re.sub(lookup_arg, string) def secure_filename(filename): r"""Pass it a filename and it will return a secure version of it. This filename can then safely be stored on a regular file system and passed to :func:`os.path.join`. The filename returned is an ASCII only string for maximum portability. On windows system the function also makes sure that the file is not named after one of the special device files. >>> secure_filename("My cool movie.mov") 'My_cool_movie.mov' >>> secure_filename("../../../etc/passwd") 'etc_passwd' >>> secure_filename(u'i contain cool \xfcml\xe4uts.txt') 'i_contain_cool_umlauts.txt' The function might return an empty filename. It's your responsibility to ensure that the filename is unique and that you generate random filename if the function returned an empty one. .. versionadded:: 0.5 :param filename: the filename to secure """ if isinstance(filename, text_type): from unicodedata import normalize filename = normalize('NFKD', filename).encode('ascii', 'ignore') if not PY2: filename = filename.decode('ascii') for sep in os.path.sep, os.path.altsep: if sep: filename = filename.replace(sep, ' ') filename = str(_filename_ascii_strip_re.sub('', '_'.join( filename.split()))).strip('._') # on nt a couple of special files are present in each folder. We # have to ensure that the target file is not such a filename. In # this case we prepend an underline if os.name == 'nt' and filename and \ filename.split('.')[0].upper() in _windows_device_files: filename = '_' + filename return filename def escape(s, quote=None): """Replace special characters "&", "<", ">" and (") to HTML-safe sequences. There is a special handling for `None` which escapes to an empty string. .. versionchanged:: 0.9 `quote` is now implicitly on. :param s: the string to escape. :param quote: ignored. """ if s is None: return '' elif hasattr(s, '__html__'): return text_type(s.__html__()) elif not isinstance(s, string_types): s = text_type(s) if quote is not None: from warnings import warn warn(DeprecationWarning('quote parameter is implicit now'), stacklevel=2) s = s.replace('&', '&amp;').replace('<', '&lt;') \ .replace('>', '&gt;').replace('"', "&quot;") return s def unescape(s): """The reverse function of `escape`. This unescapes all the HTML entities, not only the XML entities inserted by `escape`. :param s: the string to unescape. """ def handle_match(m): name = m.group(1) if name in HTMLBuilder._entities: return unichr(HTMLBuilder._entities[name]) try: if name[:2] in ('#x', '#X'): return unichr(int(name[2:], 16)) elif name.startswith('#'): return unichr(int(name[1:])) except ValueError: pass return u'' return _entity_re.sub(handle_match, s) def redirect(location, code=302): """Return a response object (a WSGI application) that, if called, redirects the client to the target location. Supported codes are 301, 302, 303, 305, and 307. 300 is not supported because it's not a real redirect and 304 because it's the answer for a request with a request with defined If-Modified-Since headers. .. versionadded:: 0.6 The location can now be a unicode string that is encoded using the :func:`iri_to_uri` function. :param location: the location the response should redirect to. :param code: the redirect status code. defaults to 302. """ from werkzeug.wrappers import Response display_location = escape(location) if isinstance(location, text_type): # Safe conversion is necessary here as we might redirect # to a broken URI scheme (for instance itms-services). from werkzeug.urls import iri_to_uri location = iri_to_uri(location, safe_conversion=True) response = Response( '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n' '<title>Redirecting...</title>\n' '<h1>Redirecting...</h1>\n' '<p>You should be redirected automatically to target URL: ' '<a href="%s">%s</a>. If not click the link.' % (escape(location), display_location), code, mimetype='text/html') response.headers['Location'] = location return response def append_slash_redirect(environ, code=301): """Redirect to the same URL but with a slash appended. The behavior of this function is undefined if the path ends with a slash already. :param environ: the WSGI environment for the request that triggers the redirect. :param code: the status code for the redirect. """ new_path = environ['PATH_INFO'].strip('/') + '/' query_string = environ.get('QUERY_STRING') if query_string: new_path += '?' + query_string return redirect(new_path, code) def import_string(import_name, silent=False): """Imports an object based on a string. This is useful if you want to use import paths as endpoints or something similar. An import path can be specified either in dotted notation (``xml.sax.saxutils.escape``) or with a colon as object delimiter (``xml.sax.saxutils:escape``). If `silent` is True the return value will be `None` if the import fails. :param import_name: the dotted name for the object to import. :param silent: if set to `True` import errors are ignored and `None` is returned instead. :return: imported object """ #XXX: py3 review needed assert isinstance(import_name, string_types) # force the import name to automatically convert to strings import_name = str(import_name) try: if ':' in import_name: module, obj = import_name.split(':', 1) elif '.' in import_name: module, obj = import_name.rsplit('.', 1) else: return __import__(import_name) # __import__ is not able to handle unicode strings in the fromlist # if the module is a package if PY2 and isinstance(obj, unicode): obj = obj.encode('utf-8') try: return getattr(__import__(module, None, None, [obj]), obj) except (ImportError, AttributeError): # support importing modules not yet set up by the parent module # (or package for that matter) modname = module + '.' + obj __import__(modname) return sys.modules[modname] except ImportError as e: if not silent: reraise( ImportStringError, ImportStringError(import_name, e), sys.exc_info()[2]) def find_modules(import_path, include_packages=False, recursive=False): """Find all the modules below a package. This can be useful to automatically import all views / controllers so that their metaclasses / function decorators have a chance to register themselves on the application. Packages are not returned unless `include_packages` is `True`. This can also recursively list modules but in that case it will import all the packages to get the correct load path of that module. :param import_name: the dotted name for the package to find child modules. :param include_packages: set to `True` if packages should be returned, too. :param recursive: set to `True` if recursion should happen. :return: generator """ module = import_string(import_path) path = getattr(module, '__path__', None) if path is None: raise ValueError('%r is not a package' % import_path) basename = module.__name__ + '.' for importer, modname, ispkg in pkgutil.iter_modules(path): modname = basename + modname if ispkg: if include_packages: yield modname if recursive: for item in find_modules(modname, include_packages, True): yield item else: yield modname def validate_arguments(func, args, kwargs, drop_extra=True): """Check if the function accepts the arguments and keyword arguments. Returns a new ``(args, kwargs)`` tuple that can safely be passed to the function without causing a `TypeError` because the function signature is incompatible. If `drop_extra` is set to `True` (which is the default) any extra positional or keyword arguments are dropped automatically. The exception raised provides three attributes: `missing` A set of argument names that the function expected but where missing. `extra` A dict of keyword arguments that the function can not handle but where provided. `extra_positional` A list of values that where given by positional argument but the function cannot accept. This can be useful for decorators that forward user submitted data to a view function:: from werkzeug.utils import ArgumentValidationError, validate_arguments def sanitize(f): def proxy(request): data = request.values.to_dict() try: args, kwargs = validate_arguments(f, (request,), data) except ArgumentValidationError: raise BadRequest('The browser failed to transmit all ' 'the data expected.') return f(*args, **kwargs) return proxy :param func: the function the validation is performed against. :param args: a tuple of positional arguments. :param kwargs: a dict of keyword arguments. :param drop_extra: set to `False` if you don't want extra arguments to be silently dropped. :return: tuple in the form ``(args, kwargs)``. """ parser = _parse_signature(func) args, kwargs, missing, extra, extra_positional = parser(args, kwargs)[:5] if missing: raise ArgumentValidationError(tuple(missing)) elif (extra or extra_positional) and not drop_extra: raise ArgumentValidationError(None, extra, extra_positional) return tuple(args), kwargs def bind_arguments(func, args, kwargs): """Bind the arguments provided into a dict. When passed a function, a tuple of arguments and a dict of keyword arguments `bind_arguments` returns a dict of names as the function would see it. This can be useful to implement a cache decorator that uses the function arguments to build the cache key based on the values of the arguments. :param func: the function the arguments should be bound for. :param args: tuple of positional arguments. :param kwargs: a dict of keyword arguments. :return: a :class:`dict` of bound keyword arguments. """ args, kwargs, missing, extra, extra_positional, \ arg_spec, vararg_var, kwarg_var = _parse_signature(func)(args, kwargs) values = {} for (name, has_default, default), value in zip(arg_spec, args): values[name] = value if vararg_var is not None: values[vararg_var] = tuple(extra_positional) elif extra_positional: raise TypeError('too many positional arguments') if kwarg_var is not None: multikw = set(extra) & set([x[0] for x in arg_spec]) if multikw: raise TypeError('got multiple values for keyword argument ' + repr(next(iter(multikw)))) values[kwarg_var] = extra elif extra: raise TypeError('got unexpected keyword argument ' + repr(next(iter(extra)))) return values class ArgumentValidationError(ValueError): """Raised if :func:`validate_arguments` fails to validate""" def __init__(self, missing=None, extra=None, extra_positional=None): self.missing = set(missing or ()) self.extra = extra or {} self.extra_positional = extra_positional or [] ValueError.__init__(self, 'function arguments invalid. (' '%d missing, %d additional)' % ( len(self.missing), len(self.extra) + len(self.extra_positional) )) class ImportStringError(ImportError): """Provides information about a failed :func:`import_string` attempt.""" #: String in dotted notation that failed to be imported. import_name = None #: Wrapped exception. exception = None def __init__(self, import_name, exception): self.import_name = import_name self.exception = exception msg = ( 'import_string() failed for %r. Possible reasons are:\n\n' '- missing __init__.py in a package;\n' '- package or module path not included in sys.path;\n' '- duplicated package or module name taking precedence in ' 'sys.path;\n' '- missing module, class, function or variable;\n\n' 'Debugged import:\n\n%s\n\n' 'Original exception:\n\n%s: %s') name = '' tracked = [] for part in import_name.replace(':', '.').split('.'): name += (name and '.') + part imported = import_string(name, silent=True) if imported: tracked.append((name, getattr(imported, '__file__', None))) else: track = ['- %r found in %r.' % (n, i) for n, i in tracked] track.append('- %r not found.' % name) msg = msg % (import_name, '\n'.join(track), exception.__class__.__name__, str(exception)) break ImportError.__init__(self, msg) def __repr__(self): return '<%s(%r, %r)>' % (self.__class__.__name__, self.import_name, self.exception) # circular dependencies from werkzeug.http import quote_header_value, unquote_header_value, \ cookie_date # DEPRECATED # these objects were previously in this module as well. we import # them here for backwards compatibility with old pickles. from werkzeug.datastructures import MultiDict, CombinedMultiDict, \ Headers, EnvironHeaders from werkzeug.http import parse_cookie, dump_cookie
agpl-3.0
imsparsh/python-for-android
python3-alpha/python3-src/Lib/lib2to3/fixes/fix_raise.py
203
2926
"""Fixer for 'raise E, V, T' raise -> raise raise E -> raise E raise E, V -> raise E(V) raise E, V, T -> raise E(V).with_traceback(T) raise E, None, T -> raise E.with_traceback(T) raise (((E, E'), E''), E'''), V -> raise E(V) raise "foo", V, T -> warns about string exceptions CAVEATS: 1) "raise E, V" will be incorrectly translated if V is an exception instance. The correct Python 3 idiom is raise E from V but since we can't detect instance-hood by syntax alone and since any client code would have to be changed as well, we don't automate this. """ # Author: Collin Winter # Local imports from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Name, Call, Attr, ArgList, is_tuple class FixRaise(fixer_base.BaseFix): BM_compatible = True PATTERN = """ raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] > """ def transform(self, node, results): syms = self.syms exc = results["exc"].clone() if exc.type == token.STRING: msg = "Python 3 does not support string exceptions" self.cannot_convert(node, msg) return # Python 2 supports # raise ((((E1, E2), E3), E4), E5), V # as a synonym for # raise E1, V # Since Python 3 will not support this, we recurse down any tuple # literals, always taking the first element. if is_tuple(exc): while is_tuple(exc): # exc.children[1:-1] is the unparenthesized tuple # exc.children[1].children[0] is the first element of the tuple exc = exc.children[1].children[0].clone() exc.prefix = " " if "val" not in results: # One-argument raise new = pytree.Node(syms.raise_stmt, [Name("raise"), exc]) new.prefix = node.prefix return new val = results["val"].clone() if is_tuple(val): args = [c.clone() for c in val.children[1:-1]] else: val.prefix = "" args = [val] if "tb" in results: tb = results["tb"].clone() tb.prefix = "" e = exc # If there's a traceback and None is passed as the value, then don't # add a call, since the user probably just wants to add a # traceback. See issue #9661. if val.type != token.NAME or val.value != "None": e = Call(exc, args) with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])] new = pytree.Node(syms.simple_stmt, [Name("raise")] + with_tb) new.prefix = node.prefix return new else: return pytree.Node(syms.raise_stmt, [Name("raise"), Call(exc, args)], prefix=node.prefix)
apache-2.0
fujita-shintaro/readthedocs.org
readthedocs/builds/views.py
18
2745
import logging from django.shortcuts import get_object_or_404 from django.views.generic import ListView, DetailView from django.http import HttpResponsePermanentRedirect from django.conf import settings from django.core.urlresolvers import reverse from rest_framework.renderers import JSONRenderer from readthedocs.builds.models import Build, Version from readthedocs.builds.filters import BuildFilter from readthedocs.projects.models import Project from readthedocs.restapi.serializers import BuildSerializerFull from redis import Redis, ConnectionError log = logging.getLogger(__name__) class BuildList(ListView): model = Build def get_queryset(self): self.project_slug = self.kwargs.get('project_slug', None) self.project = get_object_or_404( Project.objects.protected(self.request.user), slug=self.project_slug ) queryset = Build.objects.filter(project=self.project) return queryset def get_context_data(self, **kwargs): context = super(BuildList, self).get_context_data(**kwargs) filter = BuildFilter(self.request.GET, queryset=self.get_queryset()) active_builds = self.get_queryset().exclude(state="finished").values('id') context['project'] = self.project context['filter'] = filter context['active_builds'] = active_builds context['versions'] = Version.objects.public(user=self.request.user, project=self.project) try: redis = Redis(**settings.REDIS) context['queue_length'] = redis.llen('celery') except ConnectionError: context['queue_length'] = None return context class BuildDetail(DetailView): model = Build def get_queryset(self): self.project_slug = self.kwargs.get('project_slug', None) self.project = get_object_or_404( Project.objects.protected(self.request.user), slug=self.project_slug ) queryset = Build.objects.filter(project=self.project) return queryset def get_context_data(self, **kwargs): context = super(BuildDetail, self).get_context_data(**kwargs) context['project'] = self.project build_serializer = BuildSerializerFull(self.get_object()) build_data = build_serializer.data context['build_json'] = (JSONRenderer() .render(build_data)) return context def builds_redirect_list(request, project_slug): return HttpResponsePermanentRedirect(reverse('builds_project_list', args=[project_slug])) def builds_redirect_detail(request, project_slug, pk): return HttpResponsePermanentRedirect(reverse('builds_detail', args=[project_slug, pk]))
mit
noyeitan/cubes
tests/test_cells.py
7
9739
import unittest from cubes.cells import Cell, PointCut, SetCut, RangeCut from cubes.cells import string_from_path, cut_from_string, path_from_string from cubes.cells import cut_from_dict from cubes.errors import CubesError, ArgumentError from cubes.errors import HierarchyError, NoSuchDimensionError from .common import CubesTestCaseBase, create_provider class CutsTestCase(CubesTestCaseBase): def setUp(self): super(CutsTestCase, self).setUp() self.provider = create_provider("browser_test.json") self.cube = self.provider.cube("transactions") self.dim_date = self.cube.dimension("date") def test_cut_depth(self): dim = self.cube.dimension("date") self.assertEqual(1, PointCut(dim, [1]).level_depth()) self.assertEqual(3, PointCut(dim, [1, 1, 1]).level_depth()) self.assertEqual(1, RangeCut(dim, [1], [1]).level_depth()) self.assertEqual(3, RangeCut(dim, [1, 1, 1], [1]).level_depth()) self.assertEqual(1, SetCut(dim, [[1], [1]]).level_depth()) self.assertEqual(3, SetCut(dim, [[1], [1], [1, 1, 1]]).level_depth()) def test_cut_from_dict(self): # d = {"type":"point", "path":[2010]} # self.assertRaises(Exception, cubes.cut_from_dict, d) d = {"type": "point", "path": [2010], "dimension": "date", "level_depth": 1, "hierarchy": None, "invert": False, "hidden": False} cut = cut_from_dict(d) tcut = PointCut("date", [2010]) self.assertEqual(tcut, cut) self.assertEqual(dict(d), tcut.to_dict()) self._assert_invert(d, cut, tcut) d = {"type": "range", "from": [2010], "to": [2012, 10], "dimension": "date", "level_depth": 2, "hierarchy": None, "invert": False, "hidden": False} cut = cut_from_dict(d) tcut = RangeCut("date", [2010], [2012, 10]) self.assertEqual(tcut, cut) self.assertEqual(dict(d), tcut.to_dict()) self._assert_invert(d, cut, tcut) d = {"type": "set", "paths": [[2010], [2012, 10]], "dimension": "date", "level_depth": 2, "hierarchy": None, "invert": False, "hidden": False} cut = cut_from_dict(d) tcut = SetCut("date", [[2010], [2012, 10]]) self.assertEqual(tcut, cut) self.assertEqual(dict(d), tcut.to_dict()) self._assert_invert(d, cut, tcut) self.assertRaises(ArgumentError, cut_from_dict, {"type": "xxx"}) def _assert_invert(self, d, cut, tcut): cut.invert = True tcut.invert = True d["invert"] = True self.assertEqual(tcut, cut) self.assertEqual(dict(d), tcut.to_dict()) class StringConversionsTestCase(unittest.TestCase): def test_cut_string_conversions(self): cut = PointCut("foo", ["10"]) self.assertEqual("foo:10", str(cut)) self.assertEqual(cut, cut_from_string("foo:10")) cut = PointCut("foo", ["123_abc_", "10", "_"]) self.assertEqual("foo:123_abc_,10,_", str(cut)) self.assertEqual(cut, cut_from_string("foo:123_abc_,10,_")) cut = PointCut("foo", ["123_ abc_"]) self.assertEqual(r"foo:123_ abc_", str(cut)) self.assertEqual(cut, cut_from_string("foo:123_ abc_")) cut = PointCut("foo", ["a-b"]) self.assertEqual("foo:a\-b", str(cut)) self.assertEqual(cut, cut_from_string("foo:a\-b")) cut = PointCut("foo", ["a+b"]) self.assertEqual("foo:a+b", str(cut)) self.assertEqual(cut, cut_from_string("foo:a+b")) def test_special_characters(self): self.assertEqual('\\:q\\-we,a\\\\sd\\;,100', string_from_path([":q-we", "a\\sd;", 100])) def test_string_from_path(self): self.assertEqual('qwe,asd,100', string_from_path(["qwe", "asd", 100])) self.assertEqual('', string_from_path([])) self.assertEqual('', string_from_path(None)) def test_path_from_string(self): self.assertEqual(["qwe", "asd", "100"], path_from_string('qwe,asd,100')) self.assertEqual([], path_from_string('')) self.assertEqual([], path_from_string(None)) def test_set_cut_string(self): cut = SetCut("foo", [["1"], ["2", "3"], ["qwe", "asd", "100"]]) self.assertEqual("foo:1;2,3;qwe,asd,100", str(cut)) self.assertEqual(cut, cut_from_string("foo:1;2,3;qwe,asd,100")) # single-element SetCuts cannot go round trip, they become point cuts cut = SetCut("foo", [["a+b"]]) self.assertEqual("foo:a+b", str(cut)) self.assertEqual(PointCut("foo", ["a+b"]), cut_from_string("foo:a+b")) cut = SetCut("foo", [["a-b"]]) self.assertEqual("foo:a\-b", str(cut)) self.assertEqual(PointCut("foo", ["a-b"]), cut_from_string("foo:a\-b")) def test_range_cut_string(self): cut = RangeCut("date", ["2010"], ["2011"]) self.assertEqual("date:2010-2011", str(cut)) self.assertEqual(cut, cut_from_string("date:2010-2011")) cut = RangeCut("date", ["2010"], None) self.assertEqual("date:2010-", str(cut)) cut = cut_from_string("date:2010-") if cut.to_path: self.fail('there should be no to path, is: %s' % (cut.to_path, )) cut = RangeCut("date", None, ["2010"]) self.assertEqual("date:-2010", str(cut)) cut = cut_from_string("date:-2010") if cut.from_path: self.fail('there should be no from path is: %s' % (cut.from_path, )) cut = RangeCut("date", ["2010", "11", "12"], ["2011", "2", "3"]) self.assertEqual("date:2010,11,12-2011,2,3", str(cut)) self.assertEqual(cut, cut_from_string("date:2010,11,12-2011,2,3")) cut = RangeCut("foo", ["a+b"], ["1"]) self.assertEqual("foo:a+b-1", str(cut)) self.assertEqual(cut, cut_from_string("foo:a+b-1")) cut = RangeCut("foo", ["a-b"], ["1"]) self.assertEqual(r"foo:a\-b-1", str(cut)) self.assertEqual(cut, cut_from_string(r"foo:a\-b-1")) def test_hierarchy_cut(self): cut = PointCut("date", ["10"], "dqmy") self.assertEqual("date@dqmy:10", str(cut)) self.assertEqual(cut, cut_from_string("date@dqmy:10")) class CellInteractiveSlicingTestCase(CubesTestCaseBase): def setUp(self): super(CellInteractiveSlicingTestCase, self).setUp() self.provider = create_provider("model.json") self.cube = self.provider.cube("contracts") def test_cutting(self): full_cube = Cell(self.cube) self.assertEqual(self.cube, full_cube.cube) self.assertEqual(0, len(full_cube.cuts)) cell = full_cube.slice(PointCut("date", [2010])) self.assertEqual(1, len(cell.cuts)) cell = cell.slice(PointCut("supplier", [1234])) cell = cell.slice(PointCut("cpv", [50, 20])) self.assertEqual(3, len(cell.cuts)) self.assertEqual(self.cube, cell.cube) # Adding existing slice should result in changing the slice properties cell = cell.slice(PointCut("date", [2011])) self.assertEqual(3, len(cell.cuts)) def test_multi_slice(self): full_cube = Cell(self.cube) cuts_list = ( PointCut("date", [2010]), PointCut("cpv", [50, 20]), PointCut("supplier", [1234])) cell_list = full_cube.multi_slice(cuts_list) self.assertEqual(3, len(cell_list.cuts)) self.assertRaises(CubesError, full_cube.multi_slice, {}) def test_get_cell_dimension_cut(self): full_cube = Cell(self.cube) cell = full_cube.slice(PointCut("date", [2010])) cell = cell.slice(PointCut("supplier", [1234])) cut = cell.cut_for_dimension("date") self.assertEqual(str(cut.dimension), "date") self.assertRaises(NoSuchDimensionError, cell.cut_for_dimension, "someunknown") cut = cell.cut_for_dimension("cpv") self.assertEqual(cut, None) def test_hierarchy_path(self): dim = self.cube.dimension("cpv") hier = dim.hierarchy() levels = hier.levels_for_path([]) self.assertEqual(len(levels), 0) levels = hier.levels_for_path(None) self.assertEqual(len(levels), 0) levels = hier.levels_for_path([1, 2, 3, 4]) self.assertEqual(len(levels), 4) names = [level.name for level in levels] self.assertEqual(names, ['division', 'group', 'class', 'category']) self.assertRaises(HierarchyError, hier.levels_for_path, [1, 2, 3, 4, 5, 6, 7, 8]) def test_hierarchy_drilldown_levels(self): dim = self.cube.dimension("cpv") hier = dim.hierarchy() levels = hier.levels_for_path([], drilldown=True) self.assertEqual(len(levels), 1) self.assertEqual(levels[0].name, 'division') levels = hier.levels_for_path(None, drilldown=True) self.assertEqual(len(levels), 1) self.assertEqual(levels[0].name, 'division') def test_slice_drilldown(self): cut = PointCut("date", []) original_cell = Cell(self.cube, [cut]) cell = original_cell.drilldown("date", 2010) self.assertEqual([2010], cell.cut_for_dimension("date").path) cell = cell.drilldown("date", 1) self.assertEqual([2010, 1], cell.cut_for_dimension("date").path) cell = cell.drilldown("date", 2) self.assertEqual([2010, 1, 2], cell.cut_for_dimension("date").path) def test_suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(AggregationBrowserTestCase)) suite.addTest(unittest.makeSuite(CellsAndCutsTestCase)) return suite
mit
aknackiron/testdroid-samples
appium/sample-scripts/python/testdroid_android_hybrid.py
2
7414
# # For help on setting up your machine and configuring this TestScript go to # http://docs.bitbar.com/testing/appium/ # import os import time import unittest import subprocess from appium import webdriver from device_finder import DeviceFinder from selenium.common.exceptions import WebDriverException from testdroid_utils import TDUtils class TestdroidAndroid(unittest.TestCase): def setUp(self): # # IMPORTANT: Set the following parameters. # You can set the parameters outside the script with environment # variables. # If env var is not set the string after or is used. # testdroid_url = os.environ.get('TESTDROID_URL') or \ "https://cloud.bitbar.com" appium_url = os.environ.get('TESTDROID_APPIUM_URL') or \ 'https://appium.bitbar.com/wd/hub' testdroid_apiKey = os.environ.get('TESTDROID_APIKEY') or "" testdroid_project = os.environ.get('TESTDROID_PROJECT') or \ 'Android hybrid sample project' testdroid_testrun = os.environ.get('TESTDROID_TESTRUN') or \ 'My testrun' testdroid_app = os.environ.get('TESTDROID_APP') or "" app_package = os.environ.get('TESTDROID_APP_PACKAGE') or \ 'com.testdroid.sample.android' app_activity = os.environ.get('TESTDROID_ACTIVITY') or \ '.MM_MainMenu' new_command_timeout = os.environ.get('TESTDROID_CMD_TIMEOUT') or '60' testdroid_test_timeout = os.environ.get('TESTDROID_TEST_TIMEOUT') or '600' self.screenshot_dir = os.environ.get('TESTDROID_SCREENSHOTS') or \ os.getcwd() + "/screenshots" self.screenshot_count = 1 # Options to select device # 1) Set environment variable TESTDROID_DEVICE # 2) Set device name to this python script # 3) Do not set #1 and #2 and let DeviceFinder to find free device for # you testdroid_device = os.environ.get('TESTDROID_DEVICE') or "" deviceFinder = DeviceFinder(url=testdroid_url) if testdroid_device == "": # Loop will not exit until free device is found while testdroid_device == "": testdroid_device = deviceFinder.available_android_device() self.utils = TDUtils(self.screenshot_dir) self.utils.log("Will save screenshots at: " + self.screenshot_dir) if "localhost" in appium_url: self.api_level = subprocess.check_output(["adb", "shell", "getprop ro.build.version.sdk"]) else: self.api_level = deviceFinder.device_API_level(testdroid_device) self.utils.log("Device API level is %s" % self.api_level) self.utils.log("Starting Appium test using device '%s'" % testdroid_device) desired_capabilities_cloud = {} desired_capabilities_cloud['testdroid_apiKey'] = testdroid_apiKey if self.api_level > 16: desired_capabilities_cloud['testdroid_target'] = 'android' desired_capabilities_cloud['automationName'] = 'Appium' else: desired_capabilities_cloud['testdroid_target'] = 'selendroid' desired_capabilities_cloud['automationName'] = 'Selendroid' desired_capabilities_cloud['testdroid_apiKey'] = testdroid_apiKey desired_capabilities_cloud['testdroid_project'] = testdroid_project desired_capabilities_cloud['testdroid_testrun'] = testdroid_testrun desired_capabilities_cloud['testdroid_device'] = testdroid_device desired_capabilities_cloud['testdroid_app'] = testdroid_app desired_capabilities_cloud['appPackage'] = app_package desired_capabilities_cloud['appActivity'] = app_activity desired_capabilities_cloud['platformName'] = 'Android' desired_capabilities_cloud['deviceName'] = 'Android Phone' desired_capabilities_cloud['newCommandTimeout'] = new_command_timeout desired_capabilities_cloud['testdroid_testTimeout'] = testdroid_test_timeout # set up webdriver self.utils.log("WebDriver request initiated. Waiting for response, \ this typically takes 2-3 mins") self.driver = webdriver.Remote(appium_url, desired_capabilities_cloud) self.utils.update_driver(self.driver) self.utils.log("WebDriver response received") def tearDown(self): self.utils.log("Quitting") self.driver.quit() def testSample(self): self.utils.log(" Getting device screen size") self.utils.log(" " + str(self.driver.get_window_size())) self.utils.screenshot("app_launch") self.utils.log("Checking API level. This test works only on API 19 \ and above.") self.utils.log("API level: " + str(self.api_level)) if self.api_level < 19: raise Exception("The chosen device has API level under 19. \ Hybrid view will crash.") self.utils.log('Clicking button "hybrid app"') element = self.driver.find_element_by_id('com.testdroid.sample.android:id/mm_b_hybrid') element.click() self.utils.screenshot('hybrid_activity') url = "http://bitbar.github.io/testdroid-samples/" self.utils.log('Typing in the url ' + url) element = self.driver.find_element_by_id('com.testdroid.sample.android:id/hy_et_url') element.send_keys(url) self.utils.screenshot('url_typed') try: self.utils.log("Hiding keyboard") self.driver.hide_keyboard() except WebDriverException: pass # pass exception, if keyboard isn't visible already self.utils.screenshot('keyboard_hidden') self.utils.log('Clicking Load url button') element = self.driver.find_element_by_id('com.testdroid.sample.android:id/hy_ib_loadUrl') element.click() self.utils.screenshot('webpage_loaded') context = "undefined" end_time = time.time() + 30 while "undefined" in context and time.time() < end_time: contexts = self.driver.contexts context = str(contexts[-1]) self.utils.log("Available contexts: {}, picking: {}".format(contexts, context)) self.utils.log("Context will be " + context) self.driver.switch_to.context(context) self.utils.log("Context is " + self.driver.current_context) self.utils.log("Finding button with text 'Click for answer'") button = self.utils.wait_until_xpath_matches('//button[contains(., "Click for answer")]') self.utils.log("Clicking on button") button.click() self.utils.screenshot("answer") self.utils.log("Check answer text") self.driver.find_element_by_xpath('//p[@id="result_element" and contains(., "Bitbar")]') self.utils.log("Verify button changed color") style = str(button.get_attribute('style')) expected_style = "rgb(127, 255, 0" self.assertTrue(expected_style in style) self.driver.switch_to.context("NATIVE_APP") self.utils.log('Going back') self.driver.back() self.utils.screenshot('launch_screen') def initialize(): return TestdroidAndroid if __name__ == "__main__": suite = unittest.TestLoader().loadTestsFromTestCase(TestdroidAndroid) unittest.TextTestRunner(verbosity=2).run(suite)
apache-2.0
espenhgn/nest-simulator
pynest/examples/balancedneuron.py
5
7350
# -*- coding: utf-8 -*- # # balancedneuron.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. """Balanced neuron example ----------------------------- This script simulates a neuron driven by an excitatory and an inhibitory population of neurons firing Poisson spike trains. The aim is to find a firing rate for the inhibitory population that will make the neuron fire at the same rate as the excitatory population. Optimization is performed using the ``bisection`` method from Scipy, simulating the network repeatedly. This example is also shown in the article [1]_ References ~~~~~~~~~~~~~ .. [1] Eppler JM, Helias M, Mulller E, Diesmann M, Gewaltig MO (2009). PyNEST: A convenient interface to the NEST simulator, Front. Neuroinform. http://dx.doi.org/10.3389/neuro.11.012.2008 """ ############################################################################### # First, we import all necessary modules for simulation, analysis and # plotting. Scipy should be imported before nest. from scipy.optimize import bisect import nest import nest.voltage_trace ############################################################################### # Additionally, we set the verbosity using ``set_verbosity`` to # suppress info messages. nest.set_verbosity("M_WARNING") nest.ResetKernel() ############################################################################### # Second, the simulation parameters are assigned to variables. t_sim = 25000.0 # how long we simulate n_ex = 16000 # size of the excitatory population n_in = 4000 # size of the inhibitory population r_ex = 5.0 # mean rate of the excitatory population r_in = 20.5 # initial rate of the inhibitory population epsc = 45.0 # peak amplitude of excitatory synaptic currents ipsc = -45.0 # peak amplitude of inhibitory synaptic currents d = 1.0 # synaptic delay lower = 15.0 # lower bound of the search interval upper = 25.0 # upper bound of the search interval prec = 0.01 # how close need the excitatory rates be ############################################################################### # Third, the nodes are created using ``Create``. We store the returned # handles in variables for later reference. neuron = nest.Create("iaf_psc_alpha") noise = nest.Create("poisson_generator", 2) voltmeter = nest.Create("voltmeter") spikedetector = nest.Create("spike_detector") ################################################################################### # Fourth, the ``poisson_generator`` (`noise`) is configured using ``set``. # Note that we need not set parameters for the neuron, the spike detector, and # the voltmeter, since they have satisfactory defaults. noise.rate = [n_ex * r_ex, n_in * r_in] ############################################################################### # Fifth, the ``iaf_psc_alpha`` is connected to the ``spike_detector`` and the # ``voltmeter``, as are the two Poisson generators to the neuron. The command # ``Connect`` has different variants. Plain `Connect` just takes the handles of # pre- and post-synaptic nodes and uses the default values for weight and # delay. It can also be called with a list of weights, as in the connection # of the noise below. # Note that the connection direction for the ``voltmeter`` is reversed compared # to the ``spike_detector``, because it observes the neuron instead of # receiving events from it. Thus, ``Connect`` reflects the direction of signal # flow in the simulation kernel rather than the physical process of inserting # an electrode into the neuron. The latter semantics is presently not # available in NEST. nest.Connect(neuron, spikedetector) nest.Connect(voltmeter, neuron) nest.Connect(noise, neuron, syn_spec={'weight': [[epsc, ipsc]], 'delay': 1.0}) ############################################################################### # To determine the optimal rate of the neurons in the inhibitory population, # the network is simulated several times for different values of the # inhibitory rate while measuring the rate of the target neuron. This is done # by calling ``Simulate`` until the rate of the target neuron matches the rate # of the neurons in the excitatory population with a certain accuracy. The # algorithm is implemented in two steps: # # First, the function ``output_rate`` is defined to measure the firing rate # of the target neuron for a given rate of the inhibitory neurons. def output_rate(guess): print("Inhibitory rate estimate: %5.2f Hz" % guess) rate = float(abs(n_in * guess)) noise[1].rate = rate spikedetector.n_events = 0 nest.Simulate(t_sim) out = spikedetector.n_events * 1000.0 / t_sim print(" -> Neuron rate: %6.2f Hz (goal: %4.2f Hz)" % (out, r_ex)) return out ############################################################################### # The function takes the firing rate of the inhibitory neurons as an # argument. It scales the rate with the size of the inhibitory population and # configures the inhibitory Poisson generator (`noise[1]`) accordingly. # Then, the spike counter of the ``spike_detector`` is reset to zero. The # network is simulated using ``Simulate``, which takes the desired simulation # time in milliseconds and advances the network state by this amount of time. # During simulation, the ``spike_detector`` counts the spikes of the target # neuron and the total number is read out at the end of the simulation # period. The return value of ``output_rate()`` is the firing rate of the # target neuron in Hz. # # Second, the scipy function ``bisect`` is used to determine the optimal # firing rate of the neurons of the inhibitory population. in_rate = bisect(lambda x: output_rate(x) - r_ex, lower, upper, xtol=prec) print("Optimal rate for the inhibitory population: %.2f Hz" % in_rate) ############################################################################### # The function ``bisect`` takes four arguments: first a function whose # zero crossing is to be determined. Here, the firing rate of the target # neuron should equal the firing rate of the neurons of the excitatory # population. Thus we define an anonymous function (using `lambda`) that # returns the difference between the actual rate of the target neuron and the # rate of the excitatory Poisson generator, given a rate for the inhibitory # neurons. The next two arguments are the lower and upper bound of the # interval in which to search for the zero crossing. The fourth argument of # ``bisect`` is the desired relative precision of the zero crossing. # # Finally, we plot the target neuron's membrane potential as a function of # time. nest.voltage_trace.from_device(voltmeter) nest.voltage_trace.show()
gpl-2.0
ff94315/hiwifi-openwrt-HC5661-HC5761
staging_dir/host/lib/scons-2.1.0/SCons/compat/_scons_dbm.py
21
1801
# # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __doc__ = """ dbm compatibility module for Python versions that don't have dbm. This does not not NOT (repeat, *NOT*) provide complete dbm functionality. It's just a stub on which to hang just enough pieces of dbm functionality that the whichdb.whichdb() implementstation in the various 2.X versions of Python won't blow up even if dbm wasn't compiled in. """ __revision__ = "src/engine/SCons/compat/_scons_dbm.py 5357 2011/09/09 21:31:03 bdeegan" class error(Exception): pass def open(*args, **kw): raise error() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
gpl-2.0
houzhenggang/hiwifi-openwrt-HC5661-HC5761
staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/email/quoprimime.py
246
10848
# Copyright (C) 2001-2006 Python Software Foundation # Author: Ben Gertzfield # Contact: email-sig@python.org """Quoted-printable content transfer encoding per RFCs 2045-2047. This module handles the content transfer encoding method defined in RFC 2045 to encode US ASCII-like 8-bit data called `quoted-printable'. It is used to safely encode text that is in a character set similar to the 7-bit US ASCII character set, but that includes some 8-bit characters that are normally not allowed in email bodies or headers. Quoted-printable is very space-inefficient for encoding binary files; use the email.base64mime module for that instead. This module provides an interface to encode and decode both headers and bodies with quoted-printable encoding. RFC 2045 defines a method for including character set information in an `encoded-word' in a header. This method is commonly used for 8-bit real names in To:/From:/Cc: etc. fields, as well as Subject: lines. This module does not do the line wrapping or end-of-line character conversion necessary for proper internationalized headers; it only does dumb encoding and decoding. To deal with the various line wrapping issues, use the email.header module. """ __all__ = [ 'body_decode', 'body_encode', 'body_quopri_check', 'body_quopri_len', 'decode', 'decodestring', 'encode', 'encodestring', 'header_decode', 'header_encode', 'header_quopri_check', 'header_quopri_len', 'quote', 'unquote', ] import re from string import hexdigits from email.utils import fix_eols CRLF = '\r\n' NL = '\n' # See also Charset.py MISC_LEN = 7 hqre = re.compile(r'[^-a-zA-Z0-9!*+/ ]') bqre = re.compile(r'[^ !-<>-~\t]') # Helpers def header_quopri_check(c): """Return True if the character should be escaped with header quopri.""" return bool(hqre.match(c)) def body_quopri_check(c): """Return True if the character should be escaped with body quopri.""" return bool(bqre.match(c)) def header_quopri_len(s): """Return the length of str when it is encoded with header quopri.""" count = 0 for c in s: if hqre.match(c): count += 3 else: count += 1 return count def body_quopri_len(str): """Return the length of str when it is encoded with body quopri.""" count = 0 for c in str: if bqre.match(c): count += 3 else: count += 1 return count def _max_append(L, s, maxlen, extra=''): if not L: L.append(s.lstrip()) elif len(L[-1]) + len(s) <= maxlen: L[-1] += extra + s else: L.append(s.lstrip()) def unquote(s): """Turn a string in the form =AB to the ASCII character with value 0xab""" return chr(int(s[1:3], 16)) def quote(c): return "=%02X" % ord(c) def header_encode(header, charset="iso-8859-1", keep_eols=False, maxlinelen=76, eol=NL): """Encode a single header line with quoted-printable (like) encoding. Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but used specifically for email header fields to allow charsets with mostly 7 bit characters (and some 8 bit) to remain more or less readable in non-RFC 2045 aware mail clients. charset names the character set to use to encode the header. It defaults to iso-8859-1. The resulting string will be in the form: "=?charset?q?I_f=E2rt_in_your_g=E8n=E8ral_dire=E7tion?\\n =?charset?q?Silly_=C8nglish_Kn=EEghts?=" with each line wrapped safely at, at most, maxlinelen characters (defaults to 76 characters). If maxlinelen is None, the entire string is encoded in one chunk with no splitting. End-of-line characters (\\r, \\n, \\r\\n) will be automatically converted to the canonical email line separator \\r\\n unless the keep_eols parameter is True (the default is False). Each line of the header will be terminated in the value of eol, which defaults to "\\n". Set this to "\\r\\n" if you are using the result of this function directly in email. """ # Return empty headers unchanged if not header: return header if not keep_eols: header = fix_eols(header) # Quopri encode each line, in encoded chunks no greater than maxlinelen in # length, after the RFC chrome is added in. quoted = [] if maxlinelen is None: # An obnoxiously large number that's good enough max_encoded = 100000 else: max_encoded = maxlinelen - len(charset) - MISC_LEN - 1 for c in header: # Space may be represented as _ instead of =20 for readability if c == ' ': _max_append(quoted, '_', max_encoded) # These characters can be included verbatim elif not hqre.match(c): _max_append(quoted, c, max_encoded) # Otherwise, replace with hex value like =E2 else: _max_append(quoted, "=%02X" % ord(c), max_encoded) # Now add the RFC chrome to each encoded chunk and glue the chunks # together. BAW: should we be able to specify the leading whitespace in # the joiner? joiner = eol + ' ' return joiner.join(['=?%s?q?%s?=' % (charset, line) for line in quoted]) def encode(body, binary=False, maxlinelen=76, eol=NL): """Encode with quoted-printable, wrapping at maxlinelen characters. If binary is False (the default), end-of-line characters will be converted to the canonical email end-of-line sequence \\r\\n. Otherwise they will be left verbatim. Each line of encoded text will end with eol, which defaults to "\\n". Set this to "\\r\\n" if you will be using the result of this function directly in an email. Each line will be wrapped at, at most, maxlinelen characters (defaults to 76 characters). Long lines will have the `soft linefeed' quoted-printable character "=" appended to them, so the decoded text will be identical to the original text. """ if not body: return body if not binary: body = fix_eols(body) # BAW: We're accumulating the body text by string concatenation. That # can't be very efficient, but I don't have time now to rewrite it. It # just feels like this algorithm could be more efficient. encoded_body = '' lineno = -1 # Preserve line endings here so we can check later to see an eol needs to # be added to the output later. lines = body.splitlines(1) for line in lines: # But strip off line-endings for processing this line. if line.endswith(CRLF): line = line[:-2] elif line[-1] in CRLF: line = line[:-1] lineno += 1 encoded_line = '' prev = None linelen = len(line) # Now we need to examine every character to see if it needs to be # quopri encoded. BAW: again, string concatenation is inefficient. for j in range(linelen): c = line[j] prev = c if bqre.match(c): c = quote(c) elif j+1 == linelen: # Check for whitespace at end of line; special case if c not in ' \t': encoded_line += c prev = c continue # Check to see to see if the line has reached its maximum length if len(encoded_line) + len(c) >= maxlinelen: encoded_body += encoded_line + '=' + eol encoded_line = '' encoded_line += c # Now at end of line.. if prev and prev in ' \t': # Special case for whitespace at end of file if lineno + 1 == len(lines): prev = quote(prev) if len(encoded_line) + len(prev) > maxlinelen: encoded_body += encoded_line + '=' + eol + prev else: encoded_body += encoded_line + prev # Just normal whitespace at end of line else: encoded_body += encoded_line + prev + '=' + eol encoded_line = '' # Now look at the line we just finished and it has a line ending, we # need to add eol to the end of the line. if lines[lineno].endswith(CRLF) or lines[lineno][-1] in CRLF: encoded_body += encoded_line + eol else: encoded_body += encoded_line encoded_line = '' return encoded_body # For convenience and backwards compatibility w/ standard base64 module body_encode = encode encodestring = encode # BAW: I'm not sure if the intent was for the signature of this function to be # the same as base64MIME.decode() or not... def decode(encoded, eol=NL): """Decode a quoted-printable string. Lines are separated with eol, which defaults to \\n. """ if not encoded: return encoded # BAW: see comment in encode() above. Again, we're building up the # decoded string with string concatenation, which could be done much more # efficiently. decoded = '' for line in encoded.splitlines(): line = line.rstrip() if not line: decoded += eol continue i = 0 n = len(line) while i < n: c = line[i] if c != '=': decoded += c i += 1 # Otherwise, c == "=". Are we at the end of the line? If so, add # a soft line break. elif i+1 == n: i += 1 continue # Decode if in form =AB elif i+2 < n and line[i+1] in hexdigits and line[i+2] in hexdigits: decoded += unquote(line[i:i+3]) i += 3 # Otherwise, not in form =AB, pass literally else: decoded += c i += 1 if i == n: decoded += eol # Special case if original string did not end with eol if not encoded.endswith(eol) and decoded.endswith(eol): decoded = decoded[:-1] return decoded # For convenience and backwards compatibility w/ standard base64 module body_decode = decode decodestring = decode def _unquote_match(match): """Turn a match in the form =AB to the ASCII character with value 0xab""" s = match.group(0) return unquote(s) # Header decoding is done a bit differently def header_decode(s): """Decode a string encoded with RFC 2045 MIME header `Q' encoding. This function does not parse a full MIME header value encoded with quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use the high level email.header class for that functionality. """ s = s.replace('_', ' ') return re.sub(r'=[a-fA-F0-9]{2}', _unquote_match, s)
gpl-2.0
krishnaku/ec2x
ec2x/name_binding.py
1
2088
import datetime import os import boto3 __all__ = [ 'ec2_instance_attributes', 'export_name_bindings_to_file', 'export_name_bindings_to_environment', 'rebind_all', 'get_ec2_name_bindings', 'AWS_CONFIG_FILE' ] # Constants # Locations AWS_CONFIG_DIR = os.path.expanduser('~/.ec2x') AWS_CONFIG_FILE = os.path.expanduser(AWS_CONFIG_DIR + '/name_bindings') # Public interface def ec2_instance_attributes(): ec2 = boto3.resource('ec2') return \ [ { 'name': _get_tag_value(instance, 'Name'), 'public_dns_name': instance.public_dns_name, 'id': instance.id } for instance in ec2.instances.all() ] def get_ec2_name_bindings(): bindings = {} for instance in ec2_instance_attributes(): bindings[instance['name']] = instance['public_dns_name'] bindings[instance['name'] + '_id'] = instance['id'] return bindings def export_name_bindings_to_file(): with _open_aws_env_file('w') as name_binding: _write_file_header(name_binding) for key, value in sorted(iter(get_ec2_name_bindings().items())): name_binding.write('export ' + key.replace('-', '_') + '=' + value + '\n') def export_name_bindings_to_environment(): for key, value in iter(get_ec2_name_bindings().items()): os.environ[key] = value def rebind_all(): export_name_bindings_to_file() export_name_bindings_to_environment() # Private implementation def _get_tag_value(ec2_instance, tag_name): if ec2_instance.tags: value = None for kv_pair in ec2_instance.tags: if kv_pair['Key'] == tag_name: value = kv_pair['Value'] return value def _open_aws_env_file(mode): if not os.path.exists(AWS_CONFIG_DIR): os.mkdir(AWS_CONFIG_DIR) return open(AWS_CONFIG_FILE, mode) def _write_file_header(f): f.write("####\n# AWS Name Binding File\n# Generated by aws_utils.name_binding.py at: " + str( datetime.datetime.now()) + "\n####\n\n")
mit
bhargav2408/python-for-android
python3-alpha/python3-src/Lib/distutils/msvc9compiler.py
46
28856
"""distutils.msvc9compiler Contains MSVCCompiler, an implementation of the abstract CCompiler class for the Microsoft Visual Studio 2008. The module is compatible with VS 2005 and VS 2008. You can find legacy support for older versions of VS in distutils.msvccompiler. """ # Written by Perry Stoll # hacked by Robin Becker and Thomas Heller to do a better job of # finding DevStudio (through the registry) # ported to VS2005 and VS 2008 by Christian Heimes import os import subprocess import sys import re from distutils.errors import DistutilsExecError, DistutilsPlatformError, \ CompileError, LibError, LinkError from distutils.ccompiler import CCompiler, gen_preprocess_options, \ gen_lib_options from distutils import log from distutils.util import get_platform import winreg RegOpenKeyEx = winreg.OpenKeyEx RegEnumKey = winreg.EnumKey RegEnumValue = winreg.EnumValue RegError = winreg.error HKEYS = (winreg.HKEY_USERS, winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE, winreg.HKEY_CLASSES_ROOT) NATIVE_WIN64 = (sys.platform == 'win32' and sys.maxsize > 2**32) if NATIVE_WIN64: # Visual C++ is a 32-bit application, so we need to look in # the corresponding registry branch, if we're running a # 64-bit Python on Win64 VS_BASE = r"Software\Wow6432Node\Microsoft\VisualStudio\%0.1f" WINSDK_BASE = r"Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows" NET_BASE = r"Software\Wow6432Node\Microsoft\.NETFramework" else: VS_BASE = r"Software\Microsoft\VisualStudio\%0.1f" WINSDK_BASE = r"Software\Microsoft\Microsoft SDKs\Windows" NET_BASE = r"Software\Microsoft\.NETFramework" # A map keyed by get_platform() return values to values accepted by # 'vcvarsall.bat'. Note a cross-compile may combine these (eg, 'x86_amd64' is # the param to cross-compile on x86 targetting amd64.) PLAT_TO_VCVARS = { 'win32' : 'x86', 'win-amd64' : 'amd64', 'win-ia64' : 'ia64', } class Reg: """Helper class to read values from the registry """ def get_value(cls, path, key): for base in HKEYS: d = cls.read_values(base, path) if d and key in d: return d[key] raise KeyError(key) get_value = classmethod(get_value) def read_keys(cls, base, key): """Return list of registry keys.""" try: handle = RegOpenKeyEx(base, key) except RegError: return None L = [] i = 0 while True: try: k = RegEnumKey(handle, i) except RegError: break L.append(k) i += 1 return L read_keys = classmethod(read_keys) def read_values(cls, base, key): """Return dict of registry keys and values. All names are converted to lowercase. """ try: handle = RegOpenKeyEx(base, key) except RegError: return None d = {} i = 0 while True: try: name, value, type = RegEnumValue(handle, i) except RegError: break name = name.lower() d[cls.convert_mbcs(name)] = cls.convert_mbcs(value) i += 1 return d read_values = classmethod(read_values) def convert_mbcs(s): dec = getattr(s, "decode", None) if dec is not None: try: s = dec("mbcs") except UnicodeError: pass return s convert_mbcs = staticmethod(convert_mbcs) class MacroExpander: def __init__(self, version): self.macros = {} self.vsbase = VS_BASE % version self.load_macros(version) def set_macro(self, macro, path, key): self.macros["$(%s)" % macro] = Reg.get_value(path, key) def load_macros(self, version): self.set_macro("VCInstallDir", self.vsbase + r"\Setup\VC", "productdir") self.set_macro("VSInstallDir", self.vsbase + r"\Setup\VS", "productdir") self.set_macro("FrameworkDir", NET_BASE, "installroot") try: if version >= 8.0: self.set_macro("FrameworkSDKDir", NET_BASE, "sdkinstallrootv2.0") else: raise KeyError("sdkinstallrootv2.0") except KeyError: raise DistutilsPlatformError( """Python was built with Visual Studio 2008; extensions must be built with a compiler than can generate compatible binaries. Visual Studio 2008 was not found on this system. If you have Cygwin installed, you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""") if version >= 9.0: self.set_macro("FrameworkVersion", self.vsbase, "clr version") self.set_macro("WindowsSdkDir", WINSDK_BASE, "currentinstallfolder") else: p = r"Software\Microsoft\NET Framework Setup\Product" for base in HKEYS: try: h = RegOpenKeyEx(base, p) except RegError: continue key = RegEnumKey(h, 0) d = Reg.get_value(base, r"%s\%s" % (p, key)) self.macros["$(FrameworkVersion)"] = d["version"] def sub(self, s): for k, v in self.macros.items(): s = s.replace(k, v) return s def get_build_version(): """Return the version of MSVC that was used to build Python. For Python 2.3 and up, the version number is included in sys.version. For earlier versions, assume the compiler is MSVC 6. """ prefix = "MSC v." i = sys.version.find(prefix) if i == -1: return 6 i = i + len(prefix) s, rest = sys.version[i:].split(" ", 1) majorVersion = int(s[:-2]) - 6 minorVersion = int(s[2:3]) / 10.0 # I don't think paths are affected by minor version in version 6 if majorVersion == 6: minorVersion = 0 if majorVersion >= 6: return majorVersion + minorVersion # else we don't know what version of the compiler this is return None def normalize_and_reduce_paths(paths): """Return a list of normalized paths with duplicates removed. The current order of paths is maintained. """ # Paths are normalized so things like: /a and /a/ aren't both preserved. reduced_paths = [] for p in paths: np = os.path.normpath(p) # XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set. if np not in reduced_paths: reduced_paths.append(np) return reduced_paths def removeDuplicates(variable): """Remove duplicate values of an environment variable. """ oldList = variable.split(os.pathsep) newList = [] for i in oldList: if i not in newList: newList.append(i) newVariable = os.pathsep.join(newList) return newVariable def find_vcvarsall(version): """Find the vcvarsall.bat file At first it tries to find the productdir of VS 2008 in the registry. If that fails it falls back to the VS90COMNTOOLS env var. """ vsbase = VS_BASE % version try: productdir = Reg.get_value(r"%s\Setup\VC" % vsbase, "productdir") except KeyError: log.debug("Unable to find productdir in registry") productdir = None if not productdir or not os.path.isdir(productdir): toolskey = "VS%0.f0COMNTOOLS" % version toolsdir = os.environ.get(toolskey, None) if toolsdir and os.path.isdir(toolsdir): productdir = os.path.join(toolsdir, os.pardir, os.pardir, "VC") productdir = os.path.abspath(productdir) if not os.path.isdir(productdir): log.debug("%s is not a valid directory" % productdir) return None else: log.debug("Env var %s is not set or invalid" % toolskey) if not productdir: log.debug("No productdir found") return None vcvarsall = os.path.join(productdir, "vcvarsall.bat") if os.path.isfile(vcvarsall): return vcvarsall log.debug("Unable to find vcvarsall.bat") return None def query_vcvarsall(version, arch="x86"): """Launch vcvarsall.bat and read the settings from its environment """ vcvarsall = find_vcvarsall(version) interesting = set(("include", "lib", "libpath", "path")) result = {} if vcvarsall is None: raise DistutilsPlatformError("Unable to find vcvarsall.bat") log.debug("Calling 'vcvarsall.bat %s' (version=%s)", arch, version) popen = subprocess.Popen('"%s" %s & set' % (vcvarsall, arch), stdout=subprocess.PIPE, stderr=subprocess.PIPE) try: stdout, stderr = popen.communicate() if popen.wait() != 0: raise DistutilsPlatformError(stderr.decode("mbcs")) stdout = stdout.decode("mbcs") for line in stdout.split("\n"): line = Reg.convert_mbcs(line) if '=' not in line: continue line = line.strip() key, value = line.split('=', 1) key = key.lower() if key in interesting: if value.endswith(os.pathsep): value = value[:-1] result[key] = removeDuplicates(value) finally: popen.stdout.close() popen.stderr.close() if len(result) != len(interesting): raise ValueError(str(list(result.keys()))) return result # More globals VERSION = get_build_version() if VERSION < 8.0: raise DistutilsPlatformError("VC %0.1f is not supported by this module" % VERSION) # MACROS = MacroExpander(VERSION) class MSVCCompiler(CCompiler) : """Concrete class that implements an interface to Microsoft Visual C++, as defined by the CCompiler abstract class.""" compiler_type = 'msvc' # Just set this so CCompiler's constructor doesn't barf. We currently # don't use the 'set_executables()' bureaucracy provided by CCompiler, # as it really isn't necessary for this sort of single-compiler class. # Would be nice to have a consistent interface with UnixCCompiler, # though, so it's worth thinking about. executables = {} # Private class data (need to distinguish C from C++ source for compiler) _c_extensions = ['.c'] _cpp_extensions = ['.cc', '.cpp', '.cxx'] _rc_extensions = ['.rc'] _mc_extensions = ['.mc'] # Needed for the filename generation methods provided by the # base class, CCompiler. src_extensions = (_c_extensions + _cpp_extensions + _rc_extensions + _mc_extensions) res_extension = '.res' obj_extension = '.obj' static_lib_extension = '.lib' shared_lib_extension = '.dll' static_lib_format = shared_lib_format = '%s%s' exe_extension = '.exe' def __init__(self, verbose=0, dry_run=0, force=0): CCompiler.__init__ (self, verbose, dry_run, force) self.__version = VERSION self.__root = r"Software\Microsoft\VisualStudio" # self.__macros = MACROS self.__paths = [] # target platform (.plat_name is consistent with 'bdist') self.plat_name = None self.__arch = None # deprecated name self.initialized = False def initialize(self, plat_name=None): # multi-init means we would need to check platform same each time... assert not self.initialized, "don't init multiple times" if plat_name is None: plat_name = get_platform() # sanity check for platforms to prevent obscure errors later. ok_plats = 'win32', 'win-amd64', 'win-ia64' if plat_name not in ok_plats: raise DistutilsPlatformError("--plat-name must be one of %s" % (ok_plats,)) if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"): # Assume that the SDK set up everything alright; don't try to be # smarter self.cc = "cl.exe" self.linker = "link.exe" self.lib = "lib.exe" self.rc = "rc.exe" self.mc = "mc.exe" else: # On x86, 'vcvars32.bat amd64' creates an env that doesn't work; # to cross compile, you use 'x86_amd64'. # On AMD64, 'vcvars32.bat amd64' is a native build env; to cross # compile use 'x86' (ie, it runs the x86 compiler directly) # No idea how itanium handles this, if at all. if plat_name == get_platform() or plat_name == 'win32': # native build or cross-compile to win32 plat_spec = PLAT_TO_VCVARS[plat_name] else: # cross compile from win32 -> some 64bit plat_spec = PLAT_TO_VCVARS[get_platform()] + '_' + \ PLAT_TO_VCVARS[plat_name] vc_env = query_vcvarsall(VERSION, plat_spec) self.__paths = vc_env['path'].split(os.pathsep) os.environ['lib'] = vc_env['lib'] os.environ['include'] = vc_env['include'] if len(self.__paths) == 0: raise DistutilsPlatformError("Python was built with %s, " "and extensions need to be built with the same " "version of the compiler, but it isn't installed." % self.__product) self.cc = self.find_exe("cl.exe") self.linker = self.find_exe("link.exe") self.lib = self.find_exe("lib.exe") self.rc = self.find_exe("rc.exe") # resource compiler self.mc = self.find_exe("mc.exe") # message compiler #self.set_path_env_var('lib') #self.set_path_env_var('include') # extend the MSVC path with the current path try: for p in os.environ['path'].split(';'): self.__paths.append(p) except KeyError: pass self.__paths = normalize_and_reduce_paths(self.__paths) os.environ['path'] = ";".join(self.__paths) self.preprocess_options = None if self.__arch == "x86": self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/DNDEBUG'] self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/Z7', '/D_DEBUG'] else: # Win64 self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' , '/DNDEBUG'] self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-', '/Z7', '/D_DEBUG'] self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO'] if self.__version >= 7: self.ldflags_shared_debug = [ '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG', '/pdb:None' ] self.ldflags_static = [ '/nologo'] self.initialized = True # -- Worker methods ------------------------------------------------ def object_filenames(self, source_filenames, strip_dir=0, output_dir=''): # Copied from ccompiler.py, extended to return .res as 'object'-file # for .rc input file if output_dir is None: output_dir = '' obj_names = [] for src_name in source_filenames: (base, ext) = os.path.splitext (src_name) base = os.path.splitdrive(base)[1] # Chop off the drive base = base[os.path.isabs(base):] # If abs, chop off leading / if ext not in self.src_extensions: # Better to raise an exception instead of silently continuing # and later complain about sources and targets having # different lengths raise CompileError ("Don't know how to compile %s" % src_name) if strip_dir: base = os.path.basename (base) if ext in self._rc_extensions: obj_names.append (os.path.join (output_dir, base + self.res_extension)) elif ext in self._mc_extensions: obj_names.append (os.path.join (output_dir, base + self.res_extension)) else: obj_names.append (os.path.join (output_dir, base + self.obj_extension)) return obj_names def compile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): if not self.initialized: self.initialize() compile_info = self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs) macros, objects, extra_postargs, pp_opts, build = compile_info compile_opts = extra_preargs or [] compile_opts.append ('/c') if debug: compile_opts.extend(self.compile_options_debug) else: compile_opts.extend(self.compile_options) for obj in objects: try: src, ext = build[obj] except KeyError: continue if debug: # pass the full pathname to MSVC in debug mode, # this allows the debugger to find the source file # without asking the user to browse for it src = os.path.abspath(src) if ext in self._c_extensions: input_opt = "/Tc" + src elif ext in self._cpp_extensions: input_opt = "/Tp" + src elif ext in self._rc_extensions: # compile .RC to .RES file input_opt = src output_opt = "/fo" + obj try: self.spawn([self.rc] + pp_opts + [output_opt] + [input_opt]) except DistutilsExecError as msg: raise CompileError(msg) continue elif ext in self._mc_extensions: # Compile .MC to .RC file to .RES file. # * '-h dir' specifies the directory for the # generated include file # * '-r dir' specifies the target directory of the # generated RC file and the binary message resource # it includes # # For now (since there are no options to change this), # we use the source-directory for the include file and # the build directory for the RC file and message # resources. This works at least for win32all. h_dir = os.path.dirname(src) rc_dir = os.path.dirname(obj) try: # first compile .MC to .RC and .H file self.spawn([self.mc] + ['-h', h_dir, '-r', rc_dir] + [src]) base, _ = os.path.splitext (os.path.basename (src)) rc_file = os.path.join (rc_dir, base + '.rc') # then compile .RC to .RES file self.spawn([self.rc] + ["/fo" + obj] + [rc_file]) except DistutilsExecError as msg: raise CompileError(msg) continue else: # how to handle this file? raise CompileError("Don't know how to compile %s to %s" % (src, obj)) output_opt = "/Fo" + obj try: self.spawn([self.cc] + compile_opts + pp_opts + [input_opt, output_opt] + extra_postargs) except DistutilsExecError as msg: raise CompileError(msg) return objects def create_static_lib(self, objects, output_libname, output_dir=None, debug=0, target_lang=None): if not self.initialized: self.initialize() (objects, output_dir) = self._fix_object_args(objects, output_dir) output_filename = self.library_filename(output_libname, output_dir=output_dir) if self._need_link(objects, output_filename): lib_args = objects + ['/OUT:' + output_filename] if debug: pass # XXX what goes here? try: self.spawn([self.lib] + lib_args) except DistutilsExecError as msg: raise LibError(msg) else: log.debug("skipping %s (up-to-date)", output_filename) def link(self, target_desc, objects, output_filename, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None): if not self.initialized: self.initialize() (objects, output_dir) = self._fix_object_args(objects, output_dir) fixed_args = self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) (libraries, library_dirs, runtime_library_dirs) = fixed_args if runtime_library_dirs: self.warn ("I don't know what to do with 'runtime_library_dirs': " + str (runtime_library_dirs)) lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, libraries) if output_dir is not None: output_filename = os.path.join(output_dir, output_filename) if self._need_link(objects, output_filename): if target_desc == CCompiler.EXECUTABLE: if debug: ldflags = self.ldflags_shared_debug[1:] else: ldflags = self.ldflags_shared[1:] else: if debug: ldflags = self.ldflags_shared_debug else: ldflags = self.ldflags_shared export_opts = [] for sym in (export_symbols or []): export_opts.append("/EXPORT:" + sym) ld_args = (ldflags + lib_opts + export_opts + objects + ['/OUT:' + output_filename]) # The MSVC linker generates .lib and .exp files, which cannot be # suppressed by any linker switches. The .lib files may even be # needed! Make sure they are generated in the temporary build # directory. Since they have different names for debug and release # builds, they can go into the same directory. build_temp = os.path.dirname(objects[0]) if export_symbols is not None: (dll_name, dll_ext) = os.path.splitext( os.path.basename(output_filename)) implib_file = os.path.join( build_temp, self.library_filename(dll_name)) ld_args.append ('/IMPLIB:' + implib_file) # Embedded manifests are recommended - see MSDN article titled # "How to: Embed a Manifest Inside a C/C++ Application" # (currently at http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx) # Ask the linker to generate the manifest in the temp dir, so # we can embed it later. temp_manifest = os.path.join( build_temp, os.path.basename(output_filename) + ".manifest") ld_args.append('/MANIFESTFILE:' + temp_manifest) if extra_preargs: ld_args[:0] = extra_preargs if extra_postargs: ld_args.extend(extra_postargs) self.mkpath(os.path.dirname(output_filename)) try: self.spawn([self.linker] + ld_args) except DistutilsExecError as msg: raise LinkError(msg) # embed the manifest # XXX - this is somewhat fragile - if mt.exe fails, distutils # will still consider the DLL up-to-date, but it will not have a # manifest. Maybe we should link to a temp file? OTOH, that # implies a build environment error that shouldn't go undetected. if target_desc == CCompiler.EXECUTABLE: mfid = 1 else: mfid = 2 # Remove references to the Visual C runtime self._remove_visual_c_ref(temp_manifest) out_arg = '-outputresource:%s;%s' % (output_filename, mfid) try: self.spawn(['mt.exe', '-nologo', '-manifest', temp_manifest, out_arg]) except DistutilsExecError as msg: raise LinkError(msg) else: log.debug("skipping %s (up-to-date)", output_filename) def _remove_visual_c_ref(self, manifest_file): try: # Remove references to the Visual C runtime, so they will # fall through to the Visual C dependency of Python.exe. # This way, when installed for a restricted user (e.g. # runtimes are not in WinSxS folder, but in Python's own # folder), the runtimes do not need to be in every folder # with .pyd's. manifest_f = open(manifest_file) try: manifest_buf = manifest_f.read() finally: manifest_f.close() pattern = re.compile( r"""<assemblyIdentity.*?name=("|')Microsoft\."""\ r"""VC\d{2}\.CRT("|').*?(/>|</assemblyIdentity>)""", re.DOTALL) manifest_buf = re.sub(pattern, "", manifest_buf) pattern = "<dependentAssembly>\s*</dependentAssembly>" manifest_buf = re.sub(pattern, "", manifest_buf) manifest_f = open(manifest_file, 'w') try: manifest_f.write(manifest_buf) finally: manifest_f.close() except IOError: pass # -- Miscellaneous methods ----------------------------------------- # These are all used by the 'gen_lib_options() function, in # ccompiler.py. def library_dir_option(self, dir): return "/LIBPATH:" + dir def runtime_library_dir_option(self, dir): raise DistutilsPlatformError( "don't know how to set runtime library search path for MSVC++") def library_option(self, lib): return self.library_filename(lib) def find_library_file(self, dirs, lib, debug=0): # Prefer a debugging library if found (and requested), but deal # with it if we don't have one. if debug: try_names = [lib + "_d", lib] else: try_names = [lib] for dir in dirs: for name in try_names: libfile = os.path.join(dir, self.library_filename (name)) if os.path.exists(libfile): return libfile else: # Oops, didn't find it in *any* of 'dirs' return None # Helper methods for using the MSVC registry settings def find_exe(self, exe): """Return path to an MSVC executable program. Tries to find the program in several places: first, one of the MSVC program search paths from the registry; next, the directories in the PATH environment variable. If any of those work, return an absolute path that is known to exist. If none of them work, just return the original program name, 'exe'. """ for p in self.__paths: fn = os.path.join(os.path.abspath(p), exe) if os.path.isfile(fn): return fn # didn't find it; try existing path for p in os.environ['Path'].split(';'): fn = os.path.join(os.path.abspath(p),exe) if os.path.isfile(fn): return fn return exe
apache-2.0
eneabio/nova
nova/network/quantum/melange_ipam_lib.py
5
10628
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 Nicira Networks, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from nova import db from nova import exception from nova import flags from nova import log as logging from nova.network.quantum import melange_connection LOG = logging.getLogger(__name__) FLAGS = flags.FLAGS def get_ipam_lib(net_man): return QuantumMelangeIPAMLib() class QuantumMelangeIPAMLib(object): """Implements Quantum IP Address Management (IPAM) interface using the Melange service, which is access using the Melange web services API. """ def __init__(self): """Initialize class used to connect to Melange server""" self.m_conn = melange_connection.MelangeConnection() def create_subnet(self, context, label, project_id, quantum_net_id, priority, cidr=None, gateway=None, gateway_v6=None, cidr_v6=None, dns1=None, dns2=None): """Contact Melange and create a subnet for any non-NULL IPv4 or IPv6 subnets. Also create a entry in the Nova networks DB, but only to store values not represented in Melange or to temporarily provide compatibility with Nova code that accesses IPAM data directly via the DB (e.g., nova-api) """ tenant_id = project_id or FLAGS.quantum_default_tenant_id if cidr: self.m_conn.create_block(quantum_net_id, cidr, project_id=tenant_id, gateway=gateway, dns1=dns1, dns2=dns2) if cidr_v6: self.m_conn.create_block(quantum_net_id, cidr_v6, project_id=tenant_id, gateway=gateway_v6, dns1=dns1, dns2=dns2) net = {"uuid": quantum_net_id, "project_id": tenant_id, "priority": priority, "label": label} if FLAGS.quantum_use_dhcp: if cidr: n = netaddr.IPNetwork(cidr) net['dhcp_start'] = netaddr.IPAddress(n.first + 2) else: net['dhcp_start'] = None admin_context = context.elevated() network = db.network_create_safe(admin_context, net) def allocate_fixed_ips(self, context, project_id, quantum_net_id, network_tenant_id, vif_ref): """Pass call to allocate fixed IP on to Melange""" ips = self.m_conn.allocate_ip(quantum_net_id, network_tenant_id, vif_ref['uuid'], project_id, vif_ref['address']) return [ip['address'] for ip in ips] def delete_subnets_by_net_id(self, context, net_id, project_id): """Find Melange block associated with the Quantum UUID, then tell Melange to delete that block. """ admin_context = context.elevated() tenant_id = project_id or FLAGS.quantum_default_tenant_id all_blocks = self.m_conn.get_blocks(tenant_id) for b in all_blocks['ip_blocks']: if b['network_id'] == net_id: self.m_conn.delete_block(b['id'], tenant_id) network = db.network_get_by_uuid(admin_context, net_id) db.network_delete_safe(context, network['id']) def get_networks_by_tenant(self, admin_context, tenant_id): nets = {} blocks = self.m_conn.get_blocks(tenant_id) for ip_block in blocks['ip_blocks']: network_id = ip_block['network_id'] network = db.network_get_by_uuid(admin_context, network_id) nets[network_id] = network return nets.values() def get_global_networks(self, admin_context): return self.get_networks_by_tenant(admin_context, FLAGS.quantum_default_tenant_id) def get_project_networks(self, admin_context): try: nets = db.network_get_all(admin_context.elevated()) except exception.NoNetworksFound: return [] # only return networks with a project_id set return [net for net in nets if net['project_id']] def get_project_and_global_net_ids(self, context, project_id): """Fetches all networks associated with this project, or that are "global" (i.e., have no project set). Returns list sorted by 'priority' (lowest integer value is highest priority). """ if project_id is None: raise Exception(_("get_project_and_global_net_ids must be called" " with a non-null project_id")) admin_context = context.elevated() # Decorate with priority priority_nets = [] for tenant_id in (project_id, FLAGS.quantum_default_tenant_id): nets = self.get_networks_by_tenant(admin_context, tenant_id) for network in nets: priority = network['priority'] priority_nets.append((priority, network['uuid'], tenant_id)) # Sort by priority priority_nets.sort() # Undecorate return [(network_id, tenant_id) for priority, network_id, tenant_id in priority_nets] def get_tenant_id_by_net_id(self, context, net_id, vif_id, project_id): ipam_tenant_id = None tenant_ids = [FLAGS.quantum_default_tenant_id, project_id, None] # This is confusing, if there are IPs for the given net, vif, # tenant trifecta we assume that is the tenant for that network for tid in tenant_ids: try: self.m_conn.get_allocated_ips(net_id, vif_id, tid) except KeyError: continue ipam_tenant_id = tid break return ipam_tenant_id # TODO(bgh): Rename this method .. it's now more of a # "get_subnets_by_net_id_and_vif_id" method, but we could probably just # call it "get_subnets". def get_subnets_by_net_id(self, context, tenant_id, net_id, vif_id): """Returns information about the IPv4 and IPv6 subnets associated with a Quantum Network UUID. """ subnets = [] ips = self.m_conn.get_allocated_ips(net_id, vif_id, tenant_id) for ip_address in ips: block = ip_address['ip_block'] subnet = {'network_id': block['network_id'], 'id': block['id'], 'cidr': block['cidr'], 'gateway': block['gateway'], 'broadcast': block['broadcast'], 'netmask': block['netmask'], 'dns1': block['dns1'], 'dns2': block['dns2']} if ip_address['version'] == 4: subnet['version'] = 4 else: subnet['version'] = 6 subnets.append(subnet) return subnets def get_routes_by_ip_block(self, context, block_id, project_id): """Returns the list of routes for the IP block""" return self.m_conn.get_routes(block_id, project_id) def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id): """Returns a list of IPv4 address strings associated with the specified virtual interface. """ return self._get_ips_by_interface(context, net_id, vif_id, project_id, 4) def get_v6_ips_by_interface(self, context, net_id, vif_id, project_id): """Returns a list of IPv6 address strings associated with the specified virtual interface. """ return self._get_ips_by_interface(context, net_id, vif_id, project_id, 6) def _get_ips_by_interface(self, context, net_id, vif_id, project_id, ip_version): """Helper method to fetch v4 or v6 addresses for a particular virtual interface. """ tenant_id = project_id or FLAGS.quantum_default_tenant_id ip_list = self.m_conn.get_allocated_ips(net_id, vif_id, tenant_id) return [ip['address'] for ip in ip_list if netaddr.IPNetwork(ip['address']).version == ip_version] def verify_subnet_exists(self, context, project_id, quantum_net_id): """Confirms that a subnet exists that is associated with the specified Quantum Network UUID. """ # TODO(bgh): Would be nice if we could just do something like: # GET /ipam/tenants/{tenant_id}/networks/{network_id}/ instead # of searching through all the blocks. Checking for a 404 # will then determine whether it exists. tenant_id = project_id or FLAGS.quantum_default_tenant_id all_blocks = self.m_conn.get_blocks(tenant_id) for b in all_blocks['ip_blocks']: if b['network_id'] == quantum_net_id: return True return False def deallocate_ips_by_vif(self, context, project_id, net_id, vif_ref): """Deallocate all fixed IPs associated with the specified virtual interface. """ tenant_id = project_id or FLAGS.quantum_default_tenant_id self.m_conn.deallocate_ips(net_id, vif_ref['uuid'], tenant_id) def get_allocated_ips(self, context, subnet_id, project_id): ips = self.m_conn.get_allocated_ips_for_network(subnet_id, project_id) return [(ip['address'], ip['interface_id']) for ip in ips] def create_vif(self, vif_id, instance_id, project_id=None): """Create a new vif with the specified information. """ tenant_id = project_id or FLAGS.quantum_default_tenant_id return self.m_conn.create_vif(vif_id, instance_id, tenant_id) def get_floating_ips_by_fixed_address(self, context, fixed_address): """This call is not supported in quantum yet""" return []
apache-2.0
datsfosure/ansible
v1/ansible/utils/su_prompts.py
113
1576
# -*- coding: utf-8 -*- # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. import re SU_PROMPT_LOCALIZATIONS = [ 'Password', '암호', 'パスワード', 'Adgangskode', 'Contraseña', 'Contrasenya', 'Hasło', 'Heslo', 'Jelszó', 'Lösenord', 'Mật khẩu', 'Mot de passe', 'Parola', 'Parool', 'Pasahitza', 'Passord', 'Passwort', 'Salasana', 'Sandi', 'Senha', 'Wachtwoord', 'ססמה', 'Лозинка', 'Парола', 'Пароль', 'गुप्तशब्द', 'शब्दकूट', 'సంకేతపదము', 'හස්පදය', '密码', '密碼', ] SU_PROMPT_LOCALIZATIONS_RE = re.compile("|".join(['(\w+\'s )?' + x + ' ?: ?' for x in SU_PROMPT_LOCALIZATIONS]), flags=re.IGNORECASE) def check_su_prompt(data): return bool(SU_PROMPT_LOCALIZATIONS_RE.match(data))
gpl-3.0
jinxuan/googletest
test/gtest_throw_on_failure_test.py
2917
5766
#!/usr/bin/env python # # Copyright 2009, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests Google Test's throw-on-failure mode with exceptions disabled. This script invokes gtest_throw_on_failure_test_ (a program written with Google Test) with different environments and command line flags. """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import gtest_test_utils # Constants. # The command line flag for enabling/disabling the throw-on-failure mode. THROW_ON_FAILURE = 'gtest_throw_on_failure' # Path to the gtest_throw_on_failure_test_ program, compiled with # exceptions disabled. EXE_PATH = gtest_test_utils.GetTestExecutablePath( 'gtest_throw_on_failure_test_') # Utilities. def SetEnvVar(env_var, value): """Sets an environment variable to a given value; unsets it when the given value is None. """ env_var = env_var.upper() if value is not None: os.environ[env_var] = value elif env_var in os.environ: del os.environ[env_var] def Run(command): """Runs a command; returns True/False if its exit code is/isn't 0.""" print 'Running "%s". . .' % ' '.join(command) p = gtest_test_utils.Subprocess(command) return p.exited and p.exit_code == 0 # The tests. TODO(wan@google.com): refactor the class to share common # logic with code in gtest_break_on_failure_unittest.py. class ThrowOnFailureTest(gtest_test_utils.TestCase): """Tests the throw-on-failure mode.""" def RunAndVerify(self, env_var_value, flag_value, should_fail): """Runs gtest_throw_on_failure_test_ and verifies that it does (or does not) exit with a non-zero code. Args: env_var_value: value of the GTEST_BREAK_ON_FAILURE environment variable; None if the variable should be unset. flag_value: value of the --gtest_break_on_failure flag; None if the flag should not be present. should_fail: True iff the program is expected to fail. """ SetEnvVar(THROW_ON_FAILURE, env_var_value) if env_var_value is None: env_var_value_msg = ' is not set' else: env_var_value_msg = '=' + env_var_value if flag_value is None: flag = '' elif flag_value == '0': flag = '--%s=0' % THROW_ON_FAILURE else: flag = '--%s' % THROW_ON_FAILURE command = [EXE_PATH] if flag: command.append(flag) if should_fail: should_or_not = 'should' else: should_or_not = 'should not' failed = not Run(command) SetEnvVar(THROW_ON_FAILURE, None) msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero ' 'exit code.' % (THROW_ON_FAILURE, env_var_value_msg, ' '.join(command), should_or_not)) self.assert_(failed == should_fail, msg) def testDefaultBehavior(self): """Tests the behavior of the default mode.""" self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False) def testThrowOnFailureEnvVar(self): """Tests using the GTEST_THROW_ON_FAILURE environment variable.""" self.RunAndVerify(env_var_value='0', flag_value=None, should_fail=False) self.RunAndVerify(env_var_value='1', flag_value=None, should_fail=True) def testThrowOnFailureFlag(self): """Tests using the --gtest_throw_on_failure flag.""" self.RunAndVerify(env_var_value=None, flag_value='0', should_fail=False) self.RunAndVerify(env_var_value=None, flag_value='1', should_fail=True) def testThrowOnFailureFlagOverridesEnvVar(self): """Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE.""" self.RunAndVerify(env_var_value='0', flag_value='0', should_fail=False) self.RunAndVerify(env_var_value='0', flag_value='1', should_fail=True) self.RunAndVerify(env_var_value='1', flag_value='0', should_fail=False) self.RunAndVerify(env_var_value='1', flag_value='1', should_fail=True) if __name__ == '__main__': gtest_test_utils.Main()
bsd-3-clause
liangjiaxing/sympy
sympy/printing/tests/test_conventions.py
97
3780
from sympy import symbols, Derivative, Integral, exp, cos, oo, Function from sympy.functions.special.bessel import besselj from sympy.functions.special.polynomials import legendre from sympy.functions.combinatorial.numbers import bell from sympy.printing.conventions import split_super_sub, requires_partial def test_super_sub(): assert split_super_sub("beta_13_2") == ("beta", [], ["13", "2"]) assert split_super_sub("beta_132_20") == ("beta", [], ["132", "20"]) assert split_super_sub("beta_13") == ("beta", [], ["13"]) assert split_super_sub("x_a_b") == ("x", [], ["a", "b"]) assert split_super_sub("x_1_2_3") == ("x", [], ["1", "2", "3"]) assert split_super_sub("x_a_b1") == ("x", [], ["a", "b1"]) assert split_super_sub("x_a_1") == ("x", [], ["a", "1"]) assert split_super_sub("x_1_a") == ("x", [], ["1", "a"]) assert split_super_sub("x_1^aa") == ("x", ["aa"], ["1"]) assert split_super_sub("x_1__aa") == ("x", ["aa"], ["1"]) assert split_super_sub("x_11^a") == ("x", ["a"], ["11"]) assert split_super_sub("x_11__a") == ("x", ["a"], ["11"]) assert split_super_sub("x_a_b_c_d") == ("x", [], ["a", "b", "c", "d"]) assert split_super_sub("x_a_b^c^d") == ("x", ["c", "d"], ["a", "b"]) assert split_super_sub("x_a_b__c__d") == ("x", ["c", "d"], ["a", "b"]) assert split_super_sub("x_a^b_c^d") == ("x", ["b", "d"], ["a", "c"]) assert split_super_sub("x_a__b_c__d") == ("x", ["b", "d"], ["a", "c"]) assert split_super_sub("x^a^b_c_d") == ("x", ["a", "b"], ["c", "d"]) assert split_super_sub("x__a__b_c_d") == ("x", ["a", "b"], ["c", "d"]) assert split_super_sub("x^a^b^c^d") == ("x", ["a", "b", "c", "d"], []) assert split_super_sub("x__a__b__c__d") == ("x", ["a", "b", "c", "d"], []) assert split_super_sub("alpha_11") == ("alpha", [], ["11"]) assert split_super_sub("alpha_11_11") == ("alpha", [], ["11", "11"]) def test_requires_partial(): x, y, z, t, nu = symbols('x y z t nu') n = symbols('n', integer=True) f = x * y assert requires_partial(Derivative(f, x)) is True assert requires_partial(Derivative(f, y)) is True ## integrating out one of the variables assert requires_partial(Derivative(Integral(exp(-x * y), (x, 0, oo)), y, evaluate=False)) is False ## bessel function with smooth parameter f = besselj(nu, x) assert requires_partial(Derivative(f, x)) is True assert requires_partial(Derivative(f, nu)) is True ## bessel function with integer parameter f = besselj(n, x) assert requires_partial(Derivative(f, x)) is False # this is not really valid (differentiating with respect to an integer) # but there's no reason to use the partial derivative symbol there. make # sure we don't throw an exception here, though assert requires_partial(Derivative(f, n)) is False ## bell polynomial f = bell(n, x) assert requires_partial(Derivative(f, x)) is False # again, invalid assert requires_partial(Derivative(f, n)) is False ## legendre polynomial f = legendre(0, x) assert requires_partial(Derivative(f, x)) is False f = legendre(n, x) assert requires_partial(Derivative(f, x)) is False # again, invalid assert requires_partial(Derivative(f, n)) is False f = x ** n assert requires_partial(Derivative(f, x)) is False assert requires_partial(Derivative(Integral((x*y) ** n * exp(-x * y), (x, 0, oo)), y, evaluate=False)) is False # parametric equation f = (exp(t), cos(t)) g = sum(f) assert requires_partial(Derivative(g, t)) is False # function of unspecified variables f = symbols('f', cls=Function) assert requires_partial(Derivative(f, x)) is False assert requires_partial(Derivative(f, x, y)) is True
bsd-3-clause
nnethercote/servo
tests/wpt/web-platform-tests/webdriver/tests/element_send_keys/file_upload.py
24
7387
import pytest from tests.support.asserts import assert_error, assert_files_uploaded, assert_success from tests.support.inline import inline from . import map_files_to_multiline_text def element_send_keys(session, element, text): return session.transport.send( "POST", "/session/{session_id}/element/{element_id}/value".format( session_id=session.session_id, element_id=element.id), {"text": text}) def test_empty_text(session): session.url = inline("<input type=file>") element = session.find.css("input", all=False) response = element_send_keys(session, element, "") assert_error(response, "invalid argument") def test_multiple_files(session, create_files): files = create_files(["foo", "bar"]) session.url = inline("<input type=file multiple>") element = session.find.css("input", all=False) response = element_send_keys(session, element, map_files_to_multiline_text(files)) assert_success(response) assert_files_uploaded(session, element, files) def test_multiple_files_last_path_not_found(session, create_files): files = create_files(["foo", "bar"]) files.append("foo bar") session.url = inline("<input type=file multiple>") element = session.find.css("input", all=False) response = element_send_keys(session, element, map_files_to_multiline_text(files)) assert_error(response, "invalid argument") assert_files_uploaded(session, element, []) def test_multiple_files_without_multiple_attribute(session, create_files): files = create_files(["foo", "bar"]) session.url = inline("<input type=file>") element = session.find.css("input", all=False) response = element_send_keys(session, element, map_files_to_multiline_text(files)) assert_error(response, "invalid argument") assert_files_uploaded(session, element, []) def test_multiple_files_send_twice(session, create_files): first_files = create_files(["foo", "bar"]) second_files = create_files(["john", "doe"]) session.url = inline("<input type=file multiple>") element = session.find.css("input", all=False) response = element_send_keys(session, element, map_files_to_multiline_text(first_files)) assert_success(response) response = element_send_keys(session, element, map_files_to_multiline_text(second_files)) assert_success(response) assert_files_uploaded(session, element, first_files + second_files) def test_multiple_files_reset_with_element_clear(session, create_files): first_files = create_files(["foo", "bar"]) second_files = create_files(["john", "doe"]) session.url = inline("<input type=file multiple>") element = session.find.css("input", all=False) response = element_send_keys(session, element, map_files_to_multiline_text(first_files)) assert_success(response) # Reset already uploaded files element.clear() assert_files_uploaded(session, element, []) response = element_send_keys(session, element, map_files_to_multiline_text(second_files)) assert_success(response) assert_files_uploaded(session, element, second_files) def test_single_file(session, create_files): files = create_files(["foo"]) session.url = inline("<input type=file>") element = session.find.css("input", all=False) response = element_send_keys(session, element, str(files[0])) assert_success(response) assert_files_uploaded(session, element, files) def test_single_file_replaces_without_multiple_attribute(session, create_files): files = create_files(["foo", "bar"]) session.url = inline("<input type=file>") element = session.find.css("input", all=False) response = element_send_keys(session, element, str(files[0])) assert_success(response) response = element_send_keys(session, element, str(files[1])) assert_success(response) assert_files_uploaded(session, element, [files[1]]) def test_single_file_appends_with_multiple_attribute(session, create_files): files = create_files(["foo", "bar"]) session.url = inline("<input type=file multiple>") element = session.find.css("input", all=False) response = element_send_keys(session, element, str(files[0])) assert_success(response) response = element_send_keys(session, element, str(files[1])) assert_success(response) assert_files_uploaded(session, element, files) def test_transparent(session, create_files): files = create_files(["foo"]) session.url = inline("""<input type=file style="opacity: 0">""") element = session.find.css("input", all=False) response = element_send_keys(session, element, str(files[0])) assert_success(response) assert_files_uploaded(session, element, files) def test_obscured(session, create_files): files = create_files(["foo"]) session.url = inline(""" <style> div { position: absolute; width: 100vh; height: 100vh; background: blue; top: 0; left: 0; } </style> <input type=file> <div></div> """) element = session.find.css("input", all=False) response = element_send_keys(session, element, str(files[0])) assert_success(response) assert_files_uploaded(session, element, files) def test_outside_viewport(session, create_files): files = create_files(["foo"]) session.url = inline("""<input type=file style="margin-left: -100vh">""") element = session.find.css("input", all=False) response = element_send_keys(session, element, str(files[0])) assert_success(response) assert_files_uploaded(session, element, files) def test_hidden(session, create_files): files = create_files(["foo"]) session.url = inline("<input type=file hidden>") element = session.find.css("input", all=False) response = element_send_keys(session, element, str(files[0])) assert_success(response) assert_files_uploaded(session, element, files) def test_display_none(session, create_files): files = create_files(["foo"]) session.url = inline("""<input type=file style="display: none">""") element = session.find.css("input", all=False) response = element_send_keys(session, element, str(files[0])) assert_success(response) assert_files_uploaded(session, element, files) @pytest.mark.capabilities({"strictFileInteractability": True}) def test_strict_hidden(session, create_files): files = create_files(["foo"]) session.url = inline("<input type=file hidden>") element = session.find.css("input", all=False) response = element_send_keys(session, element, str(files[0])) assert_error(response, "element not interactable") @pytest.mark.capabilities({"strictFileInteractability": True}) def test_strict_display_none(session, create_files): files = create_files(["foo"]) session.url = inline("""<input type=file style="display: none">""") element = session.find.css("input", all=False) response = element_send_keys(session, element, str(files[0])) assert_error(response, "element not interactable")
mpl-2.0
thaim/ansible
lib/ansible/modules/cloud/spotinst/spotinst_aws_elastigroup.py
49
49425
#!/usr/bin/python # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: spotinst_aws_elastigroup version_added: 2.5 short_description: Create, update or delete Spotinst AWS Elastigroups author: Spotinst (@talzur) description: - Can create, update, or delete Spotinst AWS Elastigroups Launch configuration is part of the elastigroup configuration, so no additional modules are necessary for handling the launch configuration. You will have to have a credentials file in this location - <home>/.spotinst/credentials The credentials file must contain a row that looks like this token = <YOUR TOKEN> Full documentation available at https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible- requirements: - python >= 2.7 - spotinst_sdk >= 1.0.38 options: credentials_path: description: - (String) Optional parameter that allows to set a non-default credentials path. Default is ~/.spotinst/credentials account_id: description: - (String) Optional parameter that allows to set an account-id inside the module configuration By default this is retrieved from the credentials path availability_vs_cost: choices: - availabilityOriented - costOriented - balanced description: - (String) The strategy orientation. required: true availability_zones: description: - (List of Objects) a list of hash/dictionaries of Availability Zones that are configured in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are name (String), subnet_id (String), placement_group_name (String), required: true block_device_mappings: description: - (List of Objects) a list of hash/dictionaries of Block Device Mappings for elastigroup instances; You can specify virtual devices and EBS volumes.; '[{"key":"value", "key":"value"}]'; keys allowed are device_name (List of Strings), virtual_name (String), no_device (String), ebs (Object, expects the following keys- delete_on_termination(Boolean), encrypted(Boolean), iops (Integer), snapshot_id(Integer), volume_type(String), volume_size(Integer)) chef: description: - (Object) The Chef integration configuration.; Expects the following keys - chef_server (String), organization (String), user (String), pem_key (String), chef_version (String) draining_timeout: description: - (Integer) Time for instance to be drained from incoming requests and deregistered from ELB before termination. ebs_optimized: description: - (Boolean) Enable EBS optimization for supported instances which are not enabled by default.; Note - additional charges will be applied. type: bool ebs_volume_pool: description: - (List of Objects) a list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; '[{"key":"value", "key":"value"}]'; keys allowed are - volume_ids (List of Strings), device_name (String) ecs: description: - (Object) The ECS integration configuration.; Expects the following key - cluster_name (String) elastic_ips: description: - (List of Strings) List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances fallback_to_od: description: - (Boolean) In case of no spots available, Elastigroup will launch an On-demand instance instead type: bool health_check_grace_period: description: - (Integer) The amount of time, in seconds, after the instance has launched to start and check its health. default: 300 health_check_unhealthy_duration_before_replacement: description: - (Integer) Minimal mount of time instance should be unhealthy for us to consider it unhealthy. health_check_type: choices: - ELB - HCS - TARGET_GROUP - MLB - EC2 description: - (String) The service to use for the health check. iam_role_name: description: - (String) The instance profile iamRole name - Only use iam_role_arn, or iam_role_name iam_role_arn: description: - (String) The instance profile iamRole arn - Only use iam_role_arn, or iam_role_name id: description: - (String) The group id if it already exists and you want to update, or delete it. This will not work unless the uniqueness_by field is set to id. When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created. ignore_changes: choices: - image_id - target description: - (List of Strings) list of fields on which changes should be ignored when updating image_id: description: - (String) The image Id used to launch the instance.; In case of conflict between Instance type and image type, an error will be returned required: true key_pair: description: - (String) Specify a Key Pair to attach to the instances required: true kubernetes: description: - (Object) The Kubernetes integration configuration. Expects the following keys - api_server (String), token (String) lifetime_period: description: - (String) lifetime period load_balancers: description: - (List of Strings) List of classic ELB names max_size: description: - (Integer) The upper limit number of instances that you can scale up to required: true mesosphere: description: - (Object) The Mesosphere integration configuration. Expects the following key - api_server (String) min_size: description: - (Integer) The lower limit number of instances that you can scale down to required: true monitoring: description: - (Boolean) Describes whether instance Enhanced Monitoring is enabled required: true name: description: - (String) Unique name for elastigroup to be created, updated or deleted required: true network_interfaces: description: - (List of Objects) a list of hash/dictionaries of network interfaces to add to the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - description (String), device_index (Integer), secondary_private_ip_address_count (Integer), associate_public_ip_address (Boolean), delete_on_termination (Boolean), groups (List of Strings), network_interface_id (String), private_ip_address (String), subnet_id (String), associate_ipv6_address (Boolean), private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean)) on_demand_count: description: - (Integer) Required if risk is not set - Number of on demand instances to launch. All other instances will be spot instances.; Either set this parameter or the risk parameter on_demand_instance_type: description: - (String) On-demand instance type that will be provisioned required: true opsworks: description: - (Object) The elastigroup OpsWorks integration configration.; Expects the following key - layer_id (String) persistence: description: - (Object) The Stateful elastigroup configration.; Accepts the following keys - should_persist_root_device (Boolean), should_persist_block_devices (Boolean), should_persist_private_ip (Boolean) product: choices: - Linux/UNIX - SUSE Linux - Windows - Linux/UNIX (Amazon VPC) - SUSE Linux (Amazon VPC) - Windows description: - (String) Operation system type._ required: true rancher: description: - (Object) The Rancher integration configuration.; Expects the following keys - version (String), access_key (String), secret_key (String), master_host (String) right_scale: description: - (Object) The Rightscale integration configuration.; Expects the following keys - account_id (String), refresh_token (String) risk: description: - (Integer) required if on demand is not set. The percentage of Spot instances to launch (0 - 100). roll_config: description: - (Object) Roll configuration.; If you would like the group to roll after updating, please use this feature. Accepts the following keys - batch_size_percentage(Integer, Required), grace_period - (Integer, Required), health_check_type(String, Optional) scheduled_tasks: description: - (List of Objects) a list of hash/dictionaries of scheduled tasks to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - adjustment (Integer), scale_target_capacity (Integer), scale_min_capacity (Integer), scale_max_capacity (Integer), adjustment_percentage (Integer), batch_size_percentage (Integer), cron_expression (String), frequency (String), grace_period (Integer), task_type (String, required), is_enabled (Boolean) security_group_ids: description: - (List of Strings) One or more security group IDs. ; In case of update it will override the existing Security Group with the new given array required: true shutdown_script: description: - (String) The Base64-encoded shutdown script that executes prior to instance termination. Encode before setting. signals: description: - (List of Objects) a list of hash/dictionaries of signals to configure in the elastigroup; keys allowed are - name (String, required), timeout (Integer) spin_up_time: description: - (Integer) spin up time, in seconds, for the instance spot_instance_types: description: - (List of Strings) Spot instance type that will be provisioned. required: true state: choices: - present - absent description: - (String) create or delete the elastigroup tags: description: - (List of tagKey:tagValue paris) a list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value); target: description: - (Integer) The number of instances to launch required: true target_group_arns: description: - (List of Strings) List of target group arns instances should be registered to tenancy: choices: - default - dedicated description: - (String) dedicated vs shared tenancy terminate_at_end_of_billing_hour: description: - (Boolean) terminate at the end of billing hour type: bool unit: choices: - instance - weight description: - (String) The capacity unit to launch instances by. required: true up_scaling_policies: description: - (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - policy_name (String, required), namespace (String, required), metric_name (String, required), dimensions (List of Objects, Keys allowed are name (String, required) and value (String)), statistic (String, required) evaluation_periods (String, required), period (String, required), threshold (String, required), cooldown (String, required), unit (String, required), operator (String, required), action_type (String, required), adjustment (String), min_target_capacity (String), target (String), maximum (String), minimum (String) down_scaling_policies: description: - (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - policy_name (String, required), namespace (String, required), metric_name (String, required), dimensions ((List of Objects), Keys allowed are name (String, required) and value (String)), statistic (String, required), evaluation_periods (String, required), period (String, required), threshold (String, required), cooldown (String, required), unit (String, required), operator (String, required), action_type (String, required), adjustment (String), max_target_capacity (String), target (String), maximum (String), minimum (String) target_tracking_policies: description: - (List of Objects) a list of hash/dictionaries of target tracking policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - policy_name (String, required), namespace (String, required), source (String, required), metric_name (String, required), statistic (String, required), unit (String, required), cooldown (String, required), target (String, required) uniqueness_by: choices: - id - name description: - (String) If your group names are not unique, you may use this feature to update or delete a specific group. Whenever this property is set, you must set a group_id in order to update or delete a group, otherwise a group will be created. user_data: description: - (String) Base64-encoded MIME user data. Encode before setting the value. utilize_reserved_instances: description: - (Boolean) In case of any available Reserved Instances, Elastigroup will utilize your reservations before purchasing Spot instances. type: bool wait_for_instances: description: - (Boolean) Whether or not the elastigroup creation / update actions should wait for the instances to spin type: bool wait_timeout: description: - (Integer) How long the module should wait for instances before failing the action.; Only works if wait_for_instances is True. """ EXAMPLES = ''' # Basic configuration YAML example - hosts: localhost tasks: - name: create elastigroup spotinst_aws_elastigroup: state: present risk: 100 availability_vs_cost: balanced availability_zones: - name: us-west-2a subnet_id: subnet-2b68a15c image_id: ami-f173cc91 key_pair: spotinst-oregon max_size: 15 min_size: 0 target: 0 unit: instance monitoring: True name: ansible-group on_demand_instance_type: c3.large product: Linux/UNIX load_balancers: - test-lb-1 security_group_ids: - sg-8f4b8fe9 spot_instance_types: - c3.large do_not_update: - image_id - target register: result - debug: var=result # In this example, we create an elastigroup and wait 600 seconds to retrieve the instances, and use their private ips - hosts: localhost tasks: - name: create elastigroup spotinst_aws_elastigroup: state: present account_id: act-1a9dd2b risk: 100 availability_vs_cost: balanced availability_zones: - name: us-west-2a subnet_id: subnet-2b68a15c tags: - Environment: someEnvValue - OtherTagKey: otherValue image_id: ami-f173cc91 key_pair: spotinst-oregon max_size: 5 min_size: 0 target: 0 unit: instance monitoring: True name: ansible-group-tal on_demand_instance_type: c3.large product: Linux/UNIX security_group_ids: - sg-8f4b8fe9 block_device_mappings: - device_name: '/dev/sda1' ebs: volume_size: 100 volume_type: gp2 spot_instance_types: - c3.large do_not_update: - image_id wait_for_instances: True wait_timeout: 600 register: result - name: Store private ips to file shell: echo {{ item.private_ip }}\\n >> list-of-private-ips with_items: "{{ result.instances }}" - debug: var=result # In this example, we create an elastigroup with multiple block device mappings, tags, and also an account id # In organizations with more than one account, it is required to specify an account_id - hosts: localhost tasks: - name: create elastigroup spotinst_aws_elastigroup: state: present account_id: act-1a9dd2b risk: 100 availability_vs_cost: balanced availability_zones: - name: us-west-2a subnet_id: subnet-2b68a15c tags: - Environment: someEnvValue - OtherTagKey: otherValue image_id: ami-f173cc91 key_pair: spotinst-oregon max_size: 5 min_size: 0 target: 0 unit: instance monitoring: True name: ansible-group-tal on_demand_instance_type: c3.large product: Linux/UNIX security_group_ids: - sg-8f4b8fe9 block_device_mappings: - device_name: '/dev/xvda' ebs: volume_size: 60 volume_type: gp2 - device_name: '/dev/xvdb' ebs: volume_size: 120 volume_type: gp2 spot_instance_types: - c3.large do_not_update: - image_id wait_for_instances: True wait_timeout: 600 register: result - name: Store private ips to file shell: echo {{ item.private_ip }}\\n >> list-of-private-ips with_items: "{{ result.instances }}" - debug: var=result # In this example we have set up block device mapping with ephemeral devices - hosts: localhost tasks: - name: create elastigroup spotinst_aws_elastigroup: state: present risk: 100 availability_vs_cost: balanced availability_zones: - name: us-west-2a subnet_id: subnet-2b68a15c image_id: ami-f173cc91 key_pair: spotinst-oregon max_size: 15 min_size: 0 target: 0 unit: instance block_device_mappings: - device_name: '/dev/xvda' virtual_name: ephemeral0 - device_name: '/dev/xvdb/' virtual_name: ephemeral1 monitoring: True name: ansible-group on_demand_instance_type: c3.large product: Linux/UNIX load_balancers: - test-lb-1 security_group_ids: - sg-8f4b8fe9 spot_instance_types: - c3.large do_not_update: - image_id - target register: result - debug: var=result # In this example we create a basic group configuration with a network interface defined. # Each network interface must have a device index - hosts: localhost tasks: - name: create elastigroup spotinst_aws_elastigroup: state: present risk: 100 availability_vs_cost: balanced network_interfaces: - associate_public_ip_address: true device_index: 0 availability_zones: - name: us-west-2a subnet_id: subnet-2b68a15c image_id: ami-f173cc91 key_pair: spotinst-oregon max_size: 15 min_size: 0 target: 0 unit: instance monitoring: True name: ansible-group on_demand_instance_type: c3.large product: Linux/UNIX load_balancers: - test-lb-1 security_group_ids: - sg-8f4b8fe9 spot_instance_types: - c3.large do_not_update: - image_id - target register: result - debug: var=result # In this example we create a basic group configuration with a target tracking scaling policy defined - hosts: localhost tasks: - name: create elastigroup spotinst_aws_elastigroup: account_id: act-92d45673 state: present risk: 100 availability_vs_cost: balanced availability_zones: - name: us-west-2a subnet_id: subnet-79da021e image_id: ami-f173cc91 fallback_to_od: true tags: - Creator: ValueOfCreatorTag - Environment: ValueOfEnvironmentTag key_pair: spotinst-labs-oregon max_size: 10 min_size: 0 target: 2 unit: instance monitoring: True name: ansible-group-1 on_demand_instance_type: c3.large product: Linux/UNIX security_group_ids: - sg-46cdc13d spot_instance_types: - c3.large target_tracking_policies: - policy_name: target-tracking-1 namespace: AWS/EC2 metric_name: CPUUtilization statistic: average unit: percent target: 50 cooldown: 120 do_not_update: - image_id register: result - debug: var=result ''' RETURN = ''' --- instances: description: List of active elastigroup instances and their details. returned: success type: dict sample: [ { "spotInstanceRequestId": "sir-regs25zp", "instanceId": "i-09640ad8678234c", "instanceType": "m4.large", "product": "Linux/UNIX", "availabilityZone": "us-west-2b", "privateIp": "180.0.2.244", "createdAt": "2017-07-17T12:46:18.000Z", "status": "fulfilled" } ] group_id: description: Created / Updated group's ID. returned: success type: str sample: "sig-12345" ''' HAS_SPOTINST_SDK = False __metaclass__ = type import os import time from ansible.module_utils.basic import AnsibleModule try: import spotinst_sdk as spotinst from spotinst_sdk import SpotinstClientException HAS_SPOTINST_SDK = True except ImportError: pass eni_fields = ('description', 'device_index', 'secondary_private_ip_address_count', 'associate_public_ip_address', 'delete_on_termination', 'groups', 'network_interface_id', 'private_ip_address', 'subnet_id', 'associate_ipv6_address') private_ip_fields = ('private_ip_address', 'primary') capacity_fields = (dict(ansible_field_name='min_size', spotinst_field_name='minimum'), dict(ansible_field_name='max_size', spotinst_field_name='maximum'), 'target', 'unit') lspec_fields = ('user_data', 'key_pair', 'tenancy', 'shutdown_script', 'monitoring', 'ebs_optimized', 'image_id', 'health_check_type', 'health_check_grace_period', 'health_check_unhealthy_duration_before_replacement', 'security_group_ids') iam_fields = (dict(ansible_field_name='iam_role_name', spotinst_field_name='name'), dict(ansible_field_name='iam_role_arn', spotinst_field_name='arn')) scheduled_task_fields = ('adjustment', 'adjustment_percentage', 'batch_size_percentage', 'cron_expression', 'frequency', 'grace_period', 'task_type', 'is_enabled', 'scale_target_capacity', 'scale_min_capacity', 'scale_max_capacity') scaling_policy_fields = ('policy_name', 'namespace', 'metric_name', 'dimensions', 'statistic', 'evaluation_periods', 'period', 'threshold', 'cooldown', 'unit', 'operator') tracking_policy_fields = ('policy_name', 'namespace', 'source', 'metric_name', 'statistic', 'unit', 'cooldown', 'target', 'threshold') action_fields = (dict(ansible_field_name='action_type', spotinst_field_name='type'), 'adjustment', 'min_target_capacity', 'max_target_capacity', 'target', 'minimum', 'maximum') signal_fields = ('name', 'timeout') multai_lb_fields = ('balancer_id', 'project_id', 'target_set_id', 'az_awareness', 'auto_weight') persistence_fields = ('should_persist_root_device', 'should_persist_block_devices', 'should_persist_private_ip') strategy_fields = ('risk', 'utilize_reserved_instances', 'fallback_to_od', 'on_demand_count', 'availability_vs_cost', 'draining_timeout', 'spin_up_time', 'lifetime_period') ebs_fields = ('delete_on_termination', 'encrypted', 'iops', 'snapshot_id', 'volume_type', 'volume_size') bdm_fields = ('device_name', 'virtual_name', 'no_device') kubernetes_fields = ('api_server', 'token') right_scale_fields = ('account_id', 'refresh_token') rancher_fields = ('access_key', 'secret_key', 'master_host', 'version') chef_fields = ('chef_server', 'organization', 'user', 'pem_key', 'chef_version') az_fields = ('name', 'subnet_id', 'placement_group_name') opsworks_fields = ('layer_id',) scaling_strategy_fields = ('terminate_at_end_of_billing_hour',) mesosphere_fields = ('api_server',) ecs_fields = ('cluster_name',) multai_fields = ('multai_token',) def handle_elastigroup(client, module): has_changed = False should_create = False group_id = None message = 'None' name = module.params.get('name') state = module.params.get('state') uniqueness_by = module.params.get('uniqueness_by') external_group_id = module.params.get('id') if uniqueness_by == 'id': if external_group_id is None: should_create = True else: should_create = False group_id = external_group_id else: groups = client.get_elastigroups() should_create, group_id = find_group_with_same_name(groups, name) if should_create is True: if state == 'present': eg = expand_elastigroup(module, is_update=False) module.debug(str(" [INFO] " + message + "\n")) group = client.create_elastigroup(group=eg) group_id = group['id'] message = 'Created group Successfully.' has_changed = True elif state == 'absent': message = 'Cannot delete non-existent group.' has_changed = False else: eg = expand_elastigroup(module, is_update=True) if state == 'present': group = client.update_elastigroup(group_update=eg, group_id=group_id) message = 'Updated group successfully.' try: roll_config = module.params.get('roll_config') if roll_config: eg_roll = spotinst.aws_elastigroup.Roll( batch_size_percentage=roll_config.get('batch_size_percentage'), grace_period=roll_config.get('grace_period'), health_check_type=roll_config.get('health_check_type') ) roll_response = client.roll_group(group_roll=eg_roll, group_id=group_id) message = 'Updated and started rolling the group successfully.' except SpotinstClientException as exc: message = 'Updated group successfully, but failed to perform roll. Error:' + str(exc) has_changed = True elif state == 'absent': try: client.delete_elastigroup(group_id=group_id) except SpotinstClientException as exc: if "GROUP_DOESNT_EXIST" in exc.message: pass else: module.fail_json(msg="Error while attempting to delete group : " + exc.message) message = 'Deleted group successfully.' has_changed = True return group_id, message, has_changed def retrieve_group_instances(client, module, group_id): wait_timeout = module.params.get('wait_timeout') wait_for_instances = module.params.get('wait_for_instances') health_check_type = module.params.get('health_check_type') if wait_timeout is None: wait_timeout = 300 wait_timeout = time.time() + wait_timeout target = module.params.get('target') state = module.params.get('state') instances = list() if state == 'present' and group_id is not None and wait_for_instances is True: is_amount_fulfilled = False while is_amount_fulfilled is False and wait_timeout > time.time(): instances = list() amount_of_fulfilled_instances = 0 if health_check_type is not None: healthy_instances = client.get_instance_healthiness(group_id=group_id) for healthy_instance in healthy_instances: if(healthy_instance.get('healthStatus') == 'HEALTHY'): amount_of_fulfilled_instances += 1 instances.append(healthy_instance) else: active_instances = client.get_elastigroup_active_instances(group_id=group_id) for active_instance in active_instances: if active_instance.get('private_ip') is not None: amount_of_fulfilled_instances += 1 instances.append(active_instance) if amount_of_fulfilled_instances >= target: is_amount_fulfilled = True time.sleep(10) return instances def find_group_with_same_name(groups, name): for group in groups: if group['name'] == name: return False, group.get('id') return True, None def expand_elastigroup(module, is_update): do_not_update = module.params['do_not_update'] name = module.params.get('name') eg = spotinst.aws_elastigroup.Elastigroup() description = module.params.get('description') if name is not None: eg.name = name if description is not None: eg.description = description # Capacity expand_capacity(eg, module, is_update, do_not_update) # Strategy expand_strategy(eg, module) # Scaling expand_scaling(eg, module) # Third party integrations expand_integrations(eg, module) # Compute expand_compute(eg, module, is_update, do_not_update) # Multai expand_multai(eg, module) # Scheduling expand_scheduled_tasks(eg, module) return eg def expand_compute(eg, module, is_update, do_not_update): elastic_ips = module.params['elastic_ips'] on_demand_instance_type = module.params.get('on_demand_instance_type') spot_instance_types = module.params['spot_instance_types'] ebs_volume_pool = module.params['ebs_volume_pool'] availability_zones_list = module.params['availability_zones'] product = module.params.get('product') eg_compute = spotinst.aws_elastigroup.Compute() if product is not None: # Only put product on group creation if is_update is not True: eg_compute.product = product if elastic_ips is not None: eg_compute.elastic_ips = elastic_ips if on_demand_instance_type or spot_instance_types is not None: eg_instance_types = spotinst.aws_elastigroup.InstanceTypes() if on_demand_instance_type is not None: eg_instance_types.spot = spot_instance_types if spot_instance_types is not None: eg_instance_types.ondemand = on_demand_instance_type if eg_instance_types.spot is not None or eg_instance_types.ondemand is not None: eg_compute.instance_types = eg_instance_types expand_ebs_volume_pool(eg_compute, ebs_volume_pool) eg_compute.availability_zones = expand_list(availability_zones_list, az_fields, 'AvailabilityZone') expand_launch_spec(eg_compute, module, is_update, do_not_update) eg.compute = eg_compute def expand_ebs_volume_pool(eg_compute, ebs_volumes_list): if ebs_volumes_list is not None: eg_volumes = [] for volume in ebs_volumes_list: eg_volume = spotinst.aws_elastigroup.EbsVolume() if volume.get('device_name') is not None: eg_volume.device_name = volume.get('device_name') if volume.get('volume_ids') is not None: eg_volume.volume_ids = volume.get('volume_ids') if eg_volume.device_name is not None: eg_volumes.append(eg_volume) if len(eg_volumes) > 0: eg_compute.ebs_volume_pool = eg_volumes def expand_launch_spec(eg_compute, module, is_update, do_not_update): eg_launch_spec = expand_fields(lspec_fields, module.params, 'LaunchSpecification') if module.params['iam_role_arn'] is not None or module.params['iam_role_name'] is not None: eg_launch_spec.iam_role = expand_fields(iam_fields, module.params, 'IamRole') tags = module.params['tags'] load_balancers = module.params['load_balancers'] target_group_arns = module.params['target_group_arns'] block_device_mappings = module.params['block_device_mappings'] network_interfaces = module.params['network_interfaces'] if is_update is True: if 'image_id' in do_not_update: delattr(eg_launch_spec, 'image_id') expand_tags(eg_launch_spec, tags) expand_load_balancers(eg_launch_spec, load_balancers, target_group_arns) expand_block_device_mappings(eg_launch_spec, block_device_mappings) expand_network_interfaces(eg_launch_spec, network_interfaces) eg_compute.launch_specification = eg_launch_spec def expand_integrations(eg, module): rancher = module.params.get('rancher') mesosphere = module.params.get('mesosphere') ecs = module.params.get('ecs') kubernetes = module.params.get('kubernetes') right_scale = module.params.get('right_scale') opsworks = module.params.get('opsworks') chef = module.params.get('chef') integration_exists = False eg_integrations = spotinst.aws_elastigroup.ThirdPartyIntegrations() if mesosphere is not None: eg_integrations.mesosphere = expand_fields(mesosphere_fields, mesosphere, 'Mesosphere') integration_exists = True if ecs is not None: eg_integrations.ecs = expand_fields(ecs_fields, ecs, 'EcsConfiguration') integration_exists = True if kubernetes is not None: eg_integrations.kubernetes = expand_fields(kubernetes_fields, kubernetes, 'KubernetesConfiguration') integration_exists = True if right_scale is not None: eg_integrations.right_scale = expand_fields(right_scale_fields, right_scale, 'RightScaleConfiguration') integration_exists = True if opsworks is not None: eg_integrations.opsworks = expand_fields(opsworks_fields, opsworks, 'OpsWorksConfiguration') integration_exists = True if rancher is not None: eg_integrations.rancher = expand_fields(rancher_fields, rancher, 'Rancher') integration_exists = True if chef is not None: eg_integrations.chef = expand_fields(chef_fields, chef, 'ChefConfiguration') integration_exists = True if integration_exists: eg.third_parties_integration = eg_integrations def expand_capacity(eg, module, is_update, do_not_update): eg_capacity = expand_fields(capacity_fields, module.params, 'Capacity') if is_update is True: delattr(eg_capacity, 'unit') if 'target' in do_not_update: delattr(eg_capacity, 'target') eg.capacity = eg_capacity def expand_strategy(eg, module): persistence = module.params.get('persistence') signals = module.params.get('signals') eg_strategy = expand_fields(strategy_fields, module.params, 'Strategy') terminate_at_end_of_billing_hour = module.params.get('terminate_at_end_of_billing_hour') if terminate_at_end_of_billing_hour is not None: eg_strategy.eg_scaling_strategy = expand_fields(scaling_strategy_fields, module.params, 'ScalingStrategy') if persistence is not None: eg_strategy.persistence = expand_fields(persistence_fields, persistence, 'Persistence') if signals is not None: eg_signals = expand_list(signals, signal_fields, 'Signal') if len(eg_signals) > 0: eg_strategy.signals = eg_signals eg.strategy = eg_strategy def expand_multai(eg, module): multai_load_balancers = module.params.get('multai_load_balancers') eg_multai = expand_fields(multai_fields, module.params, 'Multai') if multai_load_balancers is not None: eg_multai_load_balancers = expand_list(multai_load_balancers, multai_lb_fields, 'MultaiLoadBalancer') if len(eg_multai_load_balancers) > 0: eg_multai.balancers = eg_multai_load_balancers eg.multai = eg_multai def expand_scheduled_tasks(eg, module): scheduled_tasks = module.params.get('scheduled_tasks') if scheduled_tasks is not None: eg_scheduling = spotinst.aws_elastigroup.Scheduling() eg_tasks = expand_list(scheduled_tasks, scheduled_task_fields, 'ScheduledTask') if len(eg_tasks) > 0: eg_scheduling.tasks = eg_tasks eg.scheduling = eg_scheduling def expand_load_balancers(eg_launchspec, load_balancers, target_group_arns): if load_balancers is not None or target_group_arns is not None: eg_load_balancers_config = spotinst.aws_elastigroup.LoadBalancersConfig() eg_total_lbs = [] if load_balancers is not None: for elb_name in load_balancers: eg_elb = spotinst.aws_elastigroup.LoadBalancer() if elb_name is not None: eg_elb.name = elb_name eg_elb.type = 'CLASSIC' eg_total_lbs.append(eg_elb) if target_group_arns is not None: for target_arn in target_group_arns: eg_elb = spotinst.aws_elastigroup.LoadBalancer() if target_arn is not None: eg_elb.arn = target_arn eg_elb.type = 'TARGET_GROUP' eg_total_lbs.append(eg_elb) if len(eg_total_lbs) > 0: eg_load_balancers_config.load_balancers = eg_total_lbs eg_launchspec.load_balancers_config = eg_load_balancers_config def expand_tags(eg_launchspec, tags): if tags is not None: eg_tags = [] for tag in tags: eg_tag = spotinst.aws_elastigroup.Tag() if tag.keys(): eg_tag.tag_key = tag.keys()[0] if tag.values(): eg_tag.tag_value = tag.values()[0] eg_tags.append(eg_tag) if len(eg_tags) > 0: eg_launchspec.tags = eg_tags def expand_block_device_mappings(eg_launchspec, bdms): if bdms is not None: eg_bdms = [] for bdm in bdms: eg_bdm = expand_fields(bdm_fields, bdm, 'BlockDeviceMapping') if bdm.get('ebs') is not None: eg_bdm.ebs = expand_fields(ebs_fields, bdm.get('ebs'), 'EBS') eg_bdms.append(eg_bdm) if len(eg_bdms) > 0: eg_launchspec.block_device_mappings = eg_bdms def expand_network_interfaces(eg_launchspec, enis): if enis is not None: eg_enis = [] for eni in enis: eg_eni = expand_fields(eni_fields, eni, 'NetworkInterface') eg_pias = expand_list(eni.get('private_ip_addresses'), private_ip_fields, 'PrivateIpAddress') if eg_pias is not None: eg_eni.private_ip_addresses = eg_pias eg_enis.append(eg_eni) if len(eg_enis) > 0: eg_launchspec.network_interfaces = eg_enis def expand_scaling(eg, module): up_scaling_policies = module.params['up_scaling_policies'] down_scaling_policies = module.params['down_scaling_policies'] target_tracking_policies = module.params['target_tracking_policies'] eg_scaling = spotinst.aws_elastigroup.Scaling() if up_scaling_policies is not None: eg_up_scaling_policies = expand_scaling_policies(up_scaling_policies) if len(eg_up_scaling_policies) > 0: eg_scaling.up = eg_up_scaling_policies if down_scaling_policies is not None: eg_down_scaling_policies = expand_scaling_policies(down_scaling_policies) if len(eg_down_scaling_policies) > 0: eg_scaling.down = eg_down_scaling_policies if target_tracking_policies is not None: eg_target_tracking_policies = expand_target_tracking_policies(target_tracking_policies) if len(eg_target_tracking_policies) > 0: eg_scaling.target = eg_target_tracking_policies if eg_scaling.down is not None or eg_scaling.up is not None or eg_scaling.target is not None: eg.scaling = eg_scaling def expand_list(items, fields, class_name): if items is not None: new_objects_list = [] for item in items: new_obj = expand_fields(fields, item, class_name) new_objects_list.append(new_obj) return new_objects_list def expand_fields(fields, item, class_name): class_ = getattr(spotinst.aws_elastigroup, class_name) new_obj = class_() # Handle primitive fields if item is not None: for field in fields: if isinstance(field, dict): ansible_field_name = field['ansible_field_name'] spotinst_field_name = field['spotinst_field_name'] else: ansible_field_name = field spotinst_field_name = field if item.get(ansible_field_name) is not None: setattr(new_obj, spotinst_field_name, item.get(ansible_field_name)) return new_obj def expand_scaling_policies(scaling_policies): eg_scaling_policies = [] for policy in scaling_policies: eg_policy = expand_fields(scaling_policy_fields, policy, 'ScalingPolicy') eg_policy.action = expand_fields(action_fields, policy, 'ScalingPolicyAction') eg_scaling_policies.append(eg_policy) return eg_scaling_policies def expand_target_tracking_policies(tracking_policies): eg_tracking_policies = [] for policy in tracking_policies: eg_policy = expand_fields(tracking_policy_fields, policy, 'TargetTrackingPolicy') eg_tracking_policies.append(eg_policy) return eg_tracking_policies def main(): fields = dict( account_id=dict(type='str'), availability_vs_cost=dict(type='str', required=True), availability_zones=dict(type='list', required=True), block_device_mappings=dict(type='list'), chef=dict(type='dict'), credentials_path=dict(type='path', default="~/.spotinst/credentials"), do_not_update=dict(default=[], type='list'), down_scaling_policies=dict(type='list'), draining_timeout=dict(type='int'), ebs_optimized=dict(type='bool'), ebs_volume_pool=dict(type='list'), ecs=dict(type='dict'), elastic_beanstalk=dict(type='dict'), elastic_ips=dict(type='list'), fallback_to_od=dict(type='bool'), id=dict(type='str'), health_check_grace_period=dict(type='int'), health_check_type=dict(type='str'), health_check_unhealthy_duration_before_replacement=dict(type='int'), iam_role_arn=dict(type='str'), iam_role_name=dict(type='str'), image_id=dict(type='str', required=True), key_pair=dict(type='str'), kubernetes=dict(type='dict'), lifetime_period=dict(type='int'), load_balancers=dict(type='list'), max_size=dict(type='int', required=True), mesosphere=dict(type='dict'), min_size=dict(type='int', required=True), monitoring=dict(type='str'), multai_load_balancers=dict(type='list'), multai_token=dict(type='str'), name=dict(type='str', required=True), network_interfaces=dict(type='list'), on_demand_count=dict(type='int'), on_demand_instance_type=dict(type='str'), opsworks=dict(type='dict'), persistence=dict(type='dict'), product=dict(type='str', required=True), rancher=dict(type='dict'), right_scale=dict(type='dict'), risk=dict(type='int'), roll_config=dict(type='dict'), scheduled_tasks=dict(type='list'), security_group_ids=dict(type='list', required=True), shutdown_script=dict(type='str'), signals=dict(type='list'), spin_up_time=dict(type='int'), spot_instance_types=dict(type='list', required=True), state=dict(default='present', choices=['present', 'absent']), tags=dict(type='list'), target=dict(type='int', required=True), target_group_arns=dict(type='list'), tenancy=dict(type='str'), terminate_at_end_of_billing_hour=dict(type='bool'), token=dict(type='str'), unit=dict(type='str'), user_data=dict(type='str'), utilize_reserved_instances=dict(type='bool'), uniqueness_by=dict(default='name', choices=['name', 'id']), up_scaling_policies=dict(type='list'), target_tracking_policies=dict(type='list'), wait_for_instances=dict(type='bool', default=False), wait_timeout=dict(type='int') ) module = AnsibleModule(argument_spec=fields) if not HAS_SPOTINST_SDK: module.fail_json(msg="the Spotinst SDK library is required. (pip install spotinst_sdk)") # Retrieve creds file variables creds_file_loaded_vars = dict() credentials_path = module.params.get('credentials_path') try: with open(credentials_path, "r") as creds: for line in creds: eq_index = line.find('=') var_name = line[:eq_index].strip() string_value = line[eq_index + 1:].strip() creds_file_loaded_vars[var_name] = string_value except IOError: pass # End of creds file retrieval token = module.params.get('token') if not token: token = os.environ.get('SPOTINST_TOKEN') if not token: token = creds_file_loaded_vars.get("token") account = module.params.get('account_id') if not account: account = os.environ.get('SPOTINST_ACCOUNT_ID') or os.environ.get('ACCOUNT') if not account: account = creds_file_loaded_vars.get("account") client = spotinst.SpotinstClient(auth_token=token, print_output=False) if account is not None: client = spotinst.SpotinstClient(auth_token=token, print_output=False, account_id=account) group_id, message, has_changed = handle_elastigroup(client=client, module=module) instances = retrieve_group_instances(client=client, module=module, group_id=group_id) module.exit_json(changed=has_changed, group_id=group_id, message=message, instances=instances) if __name__ == '__main__': main()
mit
HSAnet/glimpse_client
3rdparty/breakpad/src/third_party/protobuf/protobuf/gtest/test/gtest_break_on_failure_unittest.py
1050
7214
#!/usr/bin/env python # # Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for Google Test's break-on-failure mode. A user can ask Google Test to seg-fault when an assertion fails, using either the GTEST_BREAK_ON_FAILURE environment variable or the --gtest_break_on_failure flag. This script tests such functionality by invoking gtest_break_on_failure_unittest_ (a program written with Google Test) with different environments and command line flags. """ __author__ = 'wan@google.com (Zhanyong Wan)' import gtest_test_utils import os import sys # Constants. IS_WINDOWS = os.name == 'nt' # The environment variable for enabling/disabling the break-on-failure mode. BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE' # The command line flag for enabling/disabling the break-on-failure mode. BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure' # The environment variable for enabling/disabling the throw-on-failure mode. THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE' # The environment variable for enabling/disabling the catch-exceptions mode. CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS' # Path to the gtest_break_on_failure_unittest_ program. EXE_PATH = gtest_test_utils.GetTestExecutablePath( 'gtest_break_on_failure_unittest_') # Utilities. environ = os.environ.copy() def SetEnvVar(env_var, value): """Sets an environment variable to a given value; unsets it when the given value is None. """ if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] def Run(command): """Runs a command; returns 1 if it was killed by a signal, or 0 otherwise.""" p = gtest_test_utils.Subprocess(command, env=environ) if p.terminated_by_signal: return 1 else: return 0 # The tests. class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase): """Tests using the GTEST_BREAK_ON_FAILURE environment variable or the --gtest_break_on_failure flag to turn assertion failures into segmentation faults. """ def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault): """Runs gtest_break_on_failure_unittest_ and verifies that it does (or does not) have a seg-fault. Args: env_var_value: value of the GTEST_BREAK_ON_FAILURE environment variable; None if the variable should be unset. flag_value: value of the --gtest_break_on_failure flag; None if the flag should not be present. expect_seg_fault: 1 if the program is expected to generate a seg-fault; 0 otherwise. """ SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value) if env_var_value is None: env_var_value_msg = ' is not set' else: env_var_value_msg = '=' + env_var_value if flag_value is None: flag = '' elif flag_value == '0': flag = '--%s=0' % BREAK_ON_FAILURE_FLAG else: flag = '--%s' % BREAK_ON_FAILURE_FLAG command = [EXE_PATH] if flag: command.append(flag) if expect_seg_fault: should_or_not = 'should' else: should_or_not = 'should not' has_seg_fault = Run(command) SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None) msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' % (BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command), should_or_not)) self.assert_(has_seg_fault == expect_seg_fault, msg) def testDefaultBehavior(self): """Tests the behavior of the default mode.""" self.RunAndVerify(env_var_value=None, flag_value=None, expect_seg_fault=0) def testEnvVar(self): """Tests using the GTEST_BREAK_ON_FAILURE environment variable.""" self.RunAndVerify(env_var_value='0', flag_value=None, expect_seg_fault=0) self.RunAndVerify(env_var_value='1', flag_value=None, expect_seg_fault=1) def testFlag(self): """Tests using the --gtest_break_on_failure flag.""" self.RunAndVerify(env_var_value=None, flag_value='0', expect_seg_fault=0) self.RunAndVerify(env_var_value=None, flag_value='1', expect_seg_fault=1) def testFlagOverridesEnvVar(self): """Tests that the flag overrides the environment variable.""" self.RunAndVerify(env_var_value='0', flag_value='0', expect_seg_fault=0) self.RunAndVerify(env_var_value='0', flag_value='1', expect_seg_fault=1) self.RunAndVerify(env_var_value='1', flag_value='0', expect_seg_fault=0) self.RunAndVerify(env_var_value='1', flag_value='1', expect_seg_fault=1) def testBreakOnFailureOverridesThrowOnFailure(self): """Tests that gtest_break_on_failure overrides gtest_throw_on_failure.""" SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1') try: self.RunAndVerify(env_var_value=None, flag_value='1', expect_seg_fault=1) finally: SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None) if IS_WINDOWS: def testCatchExceptionsDoesNotInterfere(self): """Tests that gtest_catch_exceptions doesn't interfere.""" SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1') try: self.RunAndVerify(env_var_value='1', flag_value='1', expect_seg_fault=1) finally: SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None) if __name__ == '__main__': gtest_test_utils.Main()
bsd-3-clause
sjwaterco/qgis_ui_plugin
geopy/geocoders/wiki_semantic.py
52
3960
import xml.dom.minidom from geopy.geocoders.base import Geocoder from geopy.point import Point from geopy.location import Location from geopy import util try: from BeautifulSoup import BeautifulSoup except ImportError: util.logger.warn("BeautifulSoup was not found. " \ "The SemanticMediaWiki geocoder will not work.") try: set except NameError: from sets import Set as set class SemanticMediaWiki(Geocoder): def __init__(self, format_url, attributes=None, relations=None, prefer_semantic=False, transform_string=None): self.format_url = format_url self.attributes = attributes self.relations = relations self.prefer_semantic = prefer_semantic self.transform_string = transform_string def get_url(self, string): return self.format_url % self.transform_string(string) def parse_rdf_link(self, page, mime_type='application/rdf+xml'): """Parse the URL of the RDF link from the <head> of ``page``.""" soup = BeautifulSoup(page) link = soup.head.find('link', rel='alternate', type=mime_type) return link and link['href'] or None def parse_rdf_things(self, data): dom = xml.dom.minidom.parseString(data) thing_map = {} things = dom.getElementsByTagName('smw:Thing') things.reverse() for thing in things: name = thing.attributes['rdf:about'].value articles = thing.getElementsByTagName('smw:hasArticle') things[name] = articles[0].attributes['rdf:resource'].value return (things, thing) def transform_semantic(self, string): """Normalize semantic attribute and relation names by replacing spaces with underscores and capitalizing the result.""" return string.replace(' ', '_').capitalize() def get_relations(self, thing, relations=None): if relations is None: relations = self.relations for relation in relations: relation = self.transform_semantic(relation) for node in thing.getElementsByTagName('relation:' + relation): resource = node.attributes['rdf:resource'].value yield (relation, resource) def get_attributes(self, thing, attributes=None): if attributes is None: attributes = self.attributes for attribute in attributes: attribute = self.transform_semantic(attribute) for node in thing.getElementsByTagName('attribute:' + attribute): value = node.firstChild.nodeValue.strip() yield (attribute, value) def get_thing_label(self, thing): return util.get_first_text(thing, 'rdfs:label') def geocode_url(self, url, attempted=None): if attempted is None: attempted = set() util.logger.debug("Fetching %s..." % url) page = urlopen(url) soup = BeautifulSoup(page) rdf_url = self.parse_rdf_link(soup) util.logger.debug("Fetching %s..." % rdf_url) page = urlopen(rdf_url) things, thing = self.parse_rdf(page) name = self.get_label(thing) attributes = self.get_attributes(thing) for attribute, value in attributes: latitude, longitude = util.parse_geo(value) if None not in (latitude, longitude): break if None in (latitude, longitude): relations = self.get_relations(thing) for relation, resource in relations: url = things.get(resource, resource) if url in tried: # Avoid cyclic relationships. continue tried.add(url) name, (latitude, longitude) = self.geocode_url(url, tried) if None not in (name, latitude, longitude): break return (name, (latitude, longitude))
mit
ybellavance/python-for-android
python-build/python-libs/gdata/build/lib/gdata/exif/__init__.py
253
6981
# -*-*- encoding: utf-8 -*-*- # # This is gdata.photos.exif, implementing the exif namespace in gdata # # $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $ # # Copyright 2007 Håvard Gulldahl # Portions copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module maps elements from the {EXIF} namespace[1] to GData objects. These elements describe image data, using exif attributes[2]. Picasa Web Albums uses the exif namespace to represent Exif data encoded in a photo [3]. Picasa Web Albums uses the following exif elements: exif:distance exif:exposure exif:flash exif:focallength exif:fstop exif:imageUniqueID exif:iso exif:make exif:model exif:tags exif:time [1]: http://schemas.google.com/photos/exif/2007. [2]: http://en.wikipedia.org/wiki/Exif [3]: http://code.google.com/apis/picasaweb/reference.html#exif_reference """ __author__ = u'havard@gulldahl.no'# (Håvard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__ __license__ = 'Apache License v2' import atom import gdata EXIF_NAMESPACE = 'http://schemas.google.com/photos/exif/2007' class ExifBaseElement(atom.AtomBase): """Base class for elements in the EXIF_NAMESPACE (%s). To add new elements, you only need to add the element tag name to self._tag """ % EXIF_NAMESPACE _tag = '' _namespace = EXIF_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() def __init__(self, name=None, extension_elements=None, extension_attributes=None, text=None): self.name = name self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} class Distance(ExifBaseElement): "(float) The distance to the subject, e.g. 0.0" _tag = 'distance' def DistanceFromString(xml_string): return atom.CreateClassFromXMLString(Distance, xml_string) class Exposure(ExifBaseElement): "(float) The exposure time used, e.g. 0.025 or 8.0E4" _tag = 'exposure' def ExposureFromString(xml_string): return atom.CreateClassFromXMLString(Exposure, xml_string) class Flash(ExifBaseElement): """(string) Boolean value indicating whether the flash was used. The .text attribute will either be `true' or `false' As a convenience, this object's .bool method will return what you want, so you can say: flash_used = bool(Flash) """ _tag = 'flash' def __bool__(self): if self.text.lower() in ('true','false'): return self.text.lower() == 'true' def FlashFromString(xml_string): return atom.CreateClassFromXMLString(Flash, xml_string) class Focallength(ExifBaseElement): "(float) The focal length used, e.g. 23.7" _tag = 'focallength' def FocallengthFromString(xml_string): return atom.CreateClassFromXMLString(Focallength, xml_string) class Fstop(ExifBaseElement): "(float) The fstop value used, e.g. 5.0" _tag = 'fstop' def FstopFromString(xml_string): return atom.CreateClassFromXMLString(Fstop, xml_string) class ImageUniqueID(ExifBaseElement): "(string) The unique image ID for the photo. Generated by Google Photo servers" _tag = 'imageUniqueID' def ImageUniqueIDFromString(xml_string): return atom.CreateClassFromXMLString(ImageUniqueID, xml_string) class Iso(ExifBaseElement): "(int) The iso equivalent value used, e.g. 200" _tag = 'iso' def IsoFromString(xml_string): return atom.CreateClassFromXMLString(Iso, xml_string) class Make(ExifBaseElement): "(string) The make of the camera used, e.g. Fictitious Camera Company" _tag = 'make' def MakeFromString(xml_string): return atom.CreateClassFromXMLString(Make, xml_string) class Model(ExifBaseElement): "(string) The model of the camera used,e.g AMAZING-100D" _tag = 'model' def ModelFromString(xml_string): return atom.CreateClassFromXMLString(Model, xml_string) class Time(ExifBaseElement): """(int) The date/time the photo was taken, e.g. 1180294337000. Represented as the number of milliseconds since January 1st, 1970. The value of this element will always be identical to the value of the <gphoto:timestamp>. Look at this object's .isoformat() for a human friendly datetime string: photo_epoch = Time.text # 1180294337000 photo_isostring = Time.isoformat() # '2007-05-27T19:32:17.000Z' Alternatively: photo_datetime = Time.datetime() # (requires python >= 2.3) """ _tag = 'time' def isoformat(self): """(string) Return the timestamp as a ISO 8601 formatted string, e.g. '2007-05-27T19:32:17.000Z' """ import time epoch = float(self.text)/1000 return time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime(epoch)) def datetime(self): """(datetime.datetime) Return the timestamp as a datetime.datetime object Requires python 2.3 """ import datetime epoch = float(self.text)/1000 return datetime.datetime.fromtimestamp(epoch) def TimeFromString(xml_string): return atom.CreateClassFromXMLString(Time, xml_string) class Tags(ExifBaseElement): """The container for all exif elements. The <exif:tags> element can appear as a child of a photo entry. """ _tag = 'tags' _children = atom.AtomBase._children.copy() _children['{%s}fstop' % EXIF_NAMESPACE] = ('fstop', Fstop) _children['{%s}make' % EXIF_NAMESPACE] = ('make', Make) _children['{%s}model' % EXIF_NAMESPACE] = ('model', Model) _children['{%s}distance' % EXIF_NAMESPACE] = ('distance', Distance) _children['{%s}exposure' % EXIF_NAMESPACE] = ('exposure', Exposure) _children['{%s}flash' % EXIF_NAMESPACE] = ('flash', Flash) _children['{%s}focallength' % EXIF_NAMESPACE] = ('focallength', Focallength) _children['{%s}iso' % EXIF_NAMESPACE] = ('iso', Iso) _children['{%s}time' % EXIF_NAMESPACE] = ('time', Time) _children['{%s}imageUniqueID' % EXIF_NAMESPACE] = ('imageUniqueID', ImageUniqueID) def __init__(self, extension_elements=None, extension_attributes=None, text=None): ExifBaseElement.__init__(self, extension_elements=extension_elements, extension_attributes=extension_attributes, text=text) self.fstop=None self.make=None self.model=None self.distance=None self.exposure=None self.flash=None self.focallength=None self.iso=None self.time=None self.imageUniqueID=None def TagsFromString(xml_string): return atom.CreateClassFromXMLString(Tags, xml_string)
apache-2.0
tinganho/linux-kernel
tools/perf/scripts/python/netdev-times.py
11271
15048
# Display a process of packets and processed time. # It helps us to investigate networking or network device. # # options # tx: show only tx chart # rx: show only rx chart # dev=: show only thing related to specified device # debug: work with debug mode. It shows buffer status. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * all_event_list = []; # insert all tracepoint event related with this script irq_dic = {}; # key is cpu and value is a list which stacks irqs # which raise NET_RX softirq net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry # and a list which stacks receive receive_hunk_list = []; # a list which include a sequence of receive events rx_skb_list = []; # received packet list for matching # skb_copy_datagram_iovec buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and # tx_xmit_list of_count_rx_skb_list = 0; # overflow count tx_queue_list = []; # list of packets which pass through dev_queue_xmit of_count_tx_queue_list = 0; # overflow count tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit of_count_tx_xmit_list = 0; # overflow count tx_free_list = []; # list of packets which is freed # options show_tx = 0; show_rx = 0; dev = 0; # store a name of device specified by option "dev=" debug = 0; # indices of event_info tuple EINFO_IDX_NAME= 0 EINFO_IDX_CONTEXT=1 EINFO_IDX_CPU= 2 EINFO_IDX_TIME= 3 EINFO_IDX_PID= 4 EINFO_IDX_COMM= 5 # Calculate a time interval(msec) from src(nsec) to dst(nsec) def diff_msec(src, dst): return (dst - src) / 1000000.0 # Display a process of transmitting a packet def print_transmit(hunk): if dev != 0 and hunk['dev'].find(dev) < 0: return print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \ (hunk['dev'], hunk['len'], nsecs_secs(hunk['queue_t']), nsecs_nsecs(hunk['queue_t'])/1000, diff_msec(hunk['queue_t'], hunk['xmit_t']), diff_msec(hunk['xmit_t'], hunk['free_t'])) # Format for displaying rx packet processing PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)" PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)" PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)" PF_JOINT= " |" PF_WJOINT= " | |" PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)" PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)" PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)" PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)" PF_CONS_SKB= " | consume_skb(+%.3fmsec)" # Display a process of received packets and interrputs associated with # a NET_RX softirq def print_receive(hunk): show_hunk = 0 irq_list = hunk['irq_list'] cpu = irq_list[0]['cpu'] base_t = irq_list[0]['irq_ent_t'] # check if this hunk should be showed if dev != 0: for i in range(len(irq_list)): if irq_list[i]['name'].find(dev) >= 0: show_hunk = 1 break else: show_hunk = 1 if show_hunk == 0: return print "%d.%06dsec cpu=%d" % \ (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu) for i in range(len(irq_list)): print PF_IRQ_ENTRY % \ (diff_msec(base_t, irq_list[i]['irq_ent_t']), irq_list[i]['irq'], irq_list[i]['name']) print PF_JOINT irq_event_list = irq_list[i]['event_list'] for j in range(len(irq_event_list)): irq_event = irq_event_list[j] if irq_event['event'] == 'netif_rx': print PF_NET_RX % \ (diff_msec(base_t, irq_event['time']), irq_event['skbaddr']) print PF_JOINT print PF_SOFT_ENTRY % \ diff_msec(base_t, hunk['sirq_ent_t']) print PF_JOINT event_list = hunk['event_list'] for i in range(len(event_list)): event = event_list[i] if event['event_name'] == 'napi_poll': print PF_NAPI_POLL % \ (diff_msec(base_t, event['event_t']), event['dev']) if i == len(event_list) - 1: print "" else: print PF_JOINT else: print PF_NET_RECV % \ (diff_msec(base_t, event['event_t']), event['skbaddr'], event['len']) if 'comm' in event.keys(): print PF_WJOINT print PF_CPY_DGRAM % \ (diff_msec(base_t, event['comm_t']), event['pid'], event['comm']) elif 'handle' in event.keys(): print PF_WJOINT if event['handle'] == "kfree_skb": print PF_KFREE_SKB % \ (diff_msec(base_t, event['comm_t']), event['location']) elif event['handle'] == "consume_skb": print PF_CONS_SKB % \ diff_msec(base_t, event['comm_t']) print PF_JOINT def trace_begin(): global show_tx global show_rx global dev global debug for i in range(len(sys.argv)): if i == 0: continue arg = sys.argv[i] if arg == 'tx': show_tx = 1 elif arg =='rx': show_rx = 1 elif arg.find('dev=',0, 4) >= 0: dev = arg[4:] elif arg == 'debug': debug = 1 if show_tx == 0 and show_rx == 0: show_tx = 1 show_rx = 1 def trace_end(): # order all events in time all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME], b[EINFO_IDX_TIME])) # process all events for i in range(len(all_event_list)): event_info = all_event_list[i] name = event_info[EINFO_IDX_NAME] if name == 'irq__softirq_exit': handle_irq_softirq_exit(event_info) elif name == 'irq__softirq_entry': handle_irq_softirq_entry(event_info) elif name == 'irq__softirq_raise': handle_irq_softirq_raise(event_info) elif name == 'irq__irq_handler_entry': handle_irq_handler_entry(event_info) elif name == 'irq__irq_handler_exit': handle_irq_handler_exit(event_info) elif name == 'napi__napi_poll': handle_napi_poll(event_info) elif name == 'net__netif_receive_skb': handle_netif_receive_skb(event_info) elif name == 'net__netif_rx': handle_netif_rx(event_info) elif name == 'skb__skb_copy_datagram_iovec': handle_skb_copy_datagram_iovec(event_info) elif name == 'net__net_dev_queue': handle_net_dev_queue(event_info) elif name == 'net__net_dev_xmit': handle_net_dev_xmit(event_info) elif name == 'skb__kfree_skb': handle_kfree_skb(event_info) elif name == 'skb__consume_skb': handle_consume_skb(event_info) # display receive hunks if show_rx: for i in range(len(receive_hunk_list)): print_receive(receive_hunk_list[i]) # display transmit hunks if show_tx: print " dev len Qdisc " \ " netdevice free" for i in range(len(tx_free_list)): print_transmit(tx_free_list[i]) if debug: print "debug buffer status" print "----------------------------" print "xmit Qdisc:remain:%d overflow:%d" % \ (len(tx_queue_list), of_count_tx_queue_list) print "xmit netdevice:remain:%d overflow:%d" % \ (len(tx_xmit_list), of_count_tx_xmit_list) print "receive:remain:%d overflow:%d" % \ (len(rx_skb_list), of_count_rx_skb_list) # called from perf, when it finds a correspoinding event def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm, irq, irq_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, irq_name) all_event_list.append(event_info) def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret) all_event_list.append(event_info) def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, napi, dev_name) all_event_list.append(event_info) def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, rc, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, rc ,dev_name) all_event_list.append(event_info) def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, protocol, location): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, protocol, location) all_event_list.append(event_info) def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr) all_event_list.append(event_info) def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen) all_event_list.append(event_info) def handle_irq_handler_entry(event_info): (name, context, cpu, time, pid, comm, irq, irq_name) = event_info if cpu not in irq_dic.keys(): irq_dic[cpu] = [] irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time} irq_dic[cpu].append(irq_record) def handle_irq_handler_exit(event_info): (name, context, cpu, time, pid, comm, irq, ret) = event_info if cpu not in irq_dic.keys(): return irq_record = irq_dic[cpu].pop() if irq != irq_record['irq']: return irq_record.update({'irq_ext_t':time}) # if an irq doesn't include NET_RX softirq, drop. if 'event_list' in irq_record.keys(): irq_dic[cpu].append(irq_record) def handle_irq_softirq_raise(event_info): (name, context, cpu, time, pid, comm, vec) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'sirq_raise'}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_irq_softirq_entry(event_info): (name, context, cpu, time, pid, comm, vec) = event_info net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]} def handle_irq_softirq_exit(event_info): (name, context, cpu, time, pid, comm, vec) = event_info irq_list = [] event_list = 0 if cpu in irq_dic.keys(): irq_list = irq_dic[cpu] del irq_dic[cpu] if cpu in net_rx_dic.keys(): sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t'] event_list = net_rx_dic[cpu]['event_list'] del net_rx_dic[cpu] if irq_list == [] or event_list == 0: return rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time, 'irq_list':irq_list, 'event_list':event_list} # merge information realted to a NET_RX softirq receive_hunk_list.append(rec_data) def handle_napi_poll(event_info): (name, context, cpu, time, pid, comm, napi, dev_name) = event_info if cpu in net_rx_dic.keys(): event_list = net_rx_dic[cpu]['event_list'] rec_data = {'event_name':'napi_poll', 'dev':dev_name, 'event_t':time} event_list.append(rec_data) def handle_netif_rx(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'netif_rx', 'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_netif_receive_skb(event_info): global of_count_rx_skb_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu in net_rx_dic.keys(): rec_data = {'event_name':'netif_receive_skb', 'event_t':time, 'skbaddr':skbaddr, 'len':skblen} event_list = net_rx_dic[cpu]['event_list'] event_list.append(rec_data) rx_skb_list.insert(0, rec_data) if len(rx_skb_list) > buffer_budget: rx_skb_list.pop() of_count_rx_skb_list += 1 def handle_net_dev_queue(event_info): global of_count_tx_queue_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time} tx_queue_list.insert(0, skb) if len(tx_queue_list) > buffer_budget: tx_queue_list.pop() of_count_tx_queue_list += 1 def handle_net_dev_xmit(event_info): global of_count_tx_xmit_list (name, context, cpu, time, pid, comm, skbaddr, skblen, rc, dev_name) = event_info if rc == 0: # NETDEV_TX_OK for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: skb['xmit_t'] = time tx_xmit_list.insert(0, skb) del tx_queue_list[i] if len(tx_xmit_list) > buffer_budget: tx_xmit_list.pop() of_count_tx_xmit_list += 1 return def handle_kfree_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr, protocol, location) = event_info for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: del tx_queue_list[i] return for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if rec_data['skbaddr'] == skbaddr: rec_data.update({'handle':"kfree_skb", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return def handle_consume_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr) = event_info for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return def handle_skb_copy_datagram_iovec(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if skbaddr == rec_data['skbaddr']: rec_data.update({'handle':"skb_copy_datagram_iovec", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return
gpl-2.0
FlorianLudwig/odoo
addons/crm/report/crm_lead_report.py
309
5123
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.addons.crm import crm from openerp.osv import fields, osv from openerp import tools class crm_lead_report(osv.Model): """ CRM Lead Analysis """ _name = "crm.lead.report" _auto = False _description = "CRM Lead Analysis" _rec_name = 'date_deadline' _inherit = ["crm.tracking.mixin"] _columns = { 'date_deadline': fields.date('Exp. Closing', readonly=True, help="Expected Closing"), 'create_date': fields.datetime('Creation Date', readonly=True), 'opening_date': fields.datetime('Assignation Date', readonly=True), 'date_closed': fields.datetime('Close Date', readonly=True), 'date_last_stage_update': fields.datetime('Last Stage Update', readonly=True), 'nbr_cases': fields.integer("# of Cases", readonly=True), # durations 'delay_open': fields.float('Delay to Assign',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to open the case"), 'delay_close': fields.float('Delay to Close',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to close the case"), 'delay_expected': fields.float('Overpassed Deadline',digits=(16,2),readonly=True, group_operator="avg"), 'user_id':fields.many2one('res.users', 'User', readonly=True), 'section_id':fields.many2one('crm.case.section', 'Sales Team', readonly=True), 'country_id':fields.many2one('res.country', 'Country', readonly=True), 'company_id': fields.many2one('res.company', 'Company', readonly=True), 'probability': fields.float('Probability',digits=(16,2),readonly=True, group_operator="avg"), 'planned_revenue': fields.float('Total Revenue',digits=(16,2),readonly=True), # TDE FIXME master: rename into total_revenue 'probable_revenue': fields.float('Expected Revenue', digits=(16,2),readonly=True), # TDE FIXME master: rename into expected_revenue 'stage_id': fields.many2one ('crm.case.stage', 'Stage', readonly=True, domain="[('section_ids', '=', section_id)]"), 'partner_id': fields.many2one('res.partner', 'Partner' , readonly=True), 'company_id': fields.many2one('res.company', 'Company', readonly=True), 'priority': fields.selection(crm.AVAILABLE_PRIORITIES, 'Priority'), 'type':fields.selection([ ('lead','Lead'), ('opportunity','Opportunity'), ],'Type', help="Type is used to separate Leads and Opportunities"), } def init(self, cr): """ CRM Lead Report @param cr: the current row, from the database cursor """ tools.drop_view_if_exists(cr, 'crm_lead_report') cr.execute(""" CREATE OR REPLACE VIEW crm_lead_report AS ( SELECT id, c.date_deadline, count(id) as nbr_cases, c.date_open as opening_date, c.date_closed as date_closed, c.date_last_stage_update as date_last_stage_update, c.user_id, c.probability, c.stage_id, c.type, c.company_id, c.priority, c.section_id, c.campaign_id, c.source_id, c.medium_id, c.partner_id, c.country_id, c.planned_revenue as planned_revenue, c.planned_revenue*(c.probability/100) as probable_revenue, c.create_date as create_date, extract('epoch' from (c.date_closed-c.create_date))/(3600*24) as delay_close, abs(extract('epoch' from (c.date_deadline - c.date_closed))/(3600*24)) as delay_expected, extract('epoch' from (c.date_open-c.create_date))/(3600*24) as delay_open FROM crm_lead c WHERE c.active = 'true' GROUP BY c.id )""") # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
shuishoudage/CloudappCLI
env/lib/python2.7/site-packages/pip/__init__.py
75
10604
#!/usr/bin/env python from __future__ import absolute_import import locale import logging import os import optparse import warnings import sys import re from pip.exceptions import InstallationError, CommandError, PipError from pip.utils import get_installed_distributions, get_prog from pip.utils import deprecation, dist_is_editable from pip.vcs import git, mercurial, subversion, bazaar # noqa from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter from pip.commands import get_summaries, get_similar_commands from pip.commands import commands_dict from pip._vendor.requests.packages.urllib3.exceptions import ( InsecureRequestWarning, ) # assignment for flake8 to be happy # This fixes a peculiarity when importing via __import__ - as we are # initialising the pip module, "from pip import cmdoptions" is recursive # and appears not to work properly in that situation. import pip.cmdoptions cmdoptions = pip.cmdoptions # The version as used in the setup.py and the docs conf.py __version__ = "8.1.2" logger = logging.getLogger(__name__) # Hide the InsecureRequestWArning from urllib3 warnings.filterwarnings("ignore", category=InsecureRequestWarning) def autocomplete(): """Command and option completion for the main option parser (and options) and its subcommands (and options). Enable by sourcing one of the completion shell scripts (bash or zsh). """ # Don't complete if user hasn't sourced bash_completion file. if 'PIP_AUTO_COMPLETE' not in os.environ: return cwords = os.environ['COMP_WORDS'].split()[1:] cword = int(os.environ['COMP_CWORD']) try: current = cwords[cword - 1] except IndexError: current = '' subcommands = [cmd for cmd, summary in get_summaries()] options = [] # subcommand try: subcommand_name = [w for w in cwords if w in subcommands][0] except IndexError: subcommand_name = None parser = create_main_parser() # subcommand options if subcommand_name: # special case: 'help' subcommand has no options if subcommand_name == 'help': sys.exit(1) # special case: list locally installed dists for uninstall command if subcommand_name == 'uninstall' and not current.startswith('-'): installed = [] lc = current.lower() for dist in get_installed_distributions(local_only=True): if dist.key.startswith(lc) and dist.key not in cwords[1:]: installed.append(dist.key) # if there are no dists installed, fall back to option completion if installed: for dist in installed: print(dist) sys.exit(1) subcommand = commands_dict[subcommand_name]() options += [(opt.get_opt_string(), opt.nargs) for opt in subcommand.parser.option_list_all if opt.help != optparse.SUPPRESS_HELP] # filter out previously specified options from available options prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]] options = [(x, v) for (x, v) in options if x not in prev_opts] # filter options by current input options = [(k, v) for k, v in options if k.startswith(current)] for option in options: opt_label = option[0] # append '=' to options which require args if option[1]: opt_label += '=' print(opt_label) else: # show main parser options only when necessary if current.startswith('-') or current.startswith('--'): opts = [i.option_list for i in parser.option_groups] opts.append(parser.option_list) opts = (o for it in opts for o in it) subcommands += [i.get_opt_string() for i in opts if i.help != optparse.SUPPRESS_HELP] print(' '.join([x for x in subcommands if x.startswith(current)])) sys.exit(1) def create_main_parser(): parser_kw = { 'usage': '\n%prog <command> [options]', 'add_help_option': False, 'formatter': UpdatingDefaultsHelpFormatter(), 'name': 'global', 'prog': get_prog(), } parser = ConfigOptionParser(**parser_kw) parser.disable_interspersed_args() pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) parser.version = 'pip %s from %s (python %s)' % ( __version__, pip_pkg_dir, sys.version[:3]) # add the general options gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser) parser.add_option_group(gen_opts) parser.main = True # so the help formatter knows # create command listing for description command_summaries = get_summaries() description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries] parser.description = '\n'.join(description) return parser def parseopts(args): parser = create_main_parser() # Note: parser calls disable_interspersed_args(), so the result of this # call is to split the initial args into the general options before the # subcommand and everything else. # For example: # args: ['--timeout=5', 'install', '--user', 'INITools'] # general_options: ['--timeout==5'] # args_else: ['install', '--user', 'INITools'] general_options, args_else = parser.parse_args(args) # --version if general_options.version: sys.stdout.write(parser.version) sys.stdout.write(os.linesep) sys.exit() # pip || pip help -> print_help() if not args_else or (args_else[0] == 'help' and len(args_else) == 1): parser.print_help() sys.exit() # the subcommand name cmd_name = args_else[0] if cmd_name not in commands_dict: guess = get_similar_commands(cmd_name) msg = ['unknown command "%s"' % cmd_name] if guess: msg.append('maybe you meant "%s"' % guess) raise CommandError(' - '.join(msg)) # all the args without the subcommand cmd_args = args[:] cmd_args.remove(cmd_name) return cmd_name, cmd_args def check_isolated(args): isolated = False if "--isolated" in args: isolated = True return isolated def main(args=None): if args is None: args = sys.argv[1:] # Configure our deprecation warnings to be sent through loggers deprecation.install_warning_logger() autocomplete() try: cmd_name, cmd_args = parseopts(args) except PipError as exc: sys.stderr.write("ERROR: %s" % exc) sys.stderr.write(os.linesep) sys.exit(1) # Needed for locale.getpreferredencoding(False) to work # in pip.utils.encoding.auto_decode try: locale.setlocale(locale.LC_ALL, '') except locale.Error as e: # setlocale can apparently crash if locale are uninitialized logger.debug("Ignoring error %s when setting locale", e) command = commands_dict[cmd_name](isolated=check_isolated(cmd_args)) return command.main(cmd_args) # ########################################################### # # Writing freeze files class FrozenRequirement(object): def __init__(self, name, req, editable, comments=()): self.name = name self.req = req self.editable = editable self.comments = comments _rev_re = re.compile(r'-r(\d+)$') _date_re = re.compile(r'-(20\d\d\d\d\d\d)$') @classmethod def from_dist(cls, dist, dependency_links): location = os.path.normcase(os.path.abspath(dist.location)) comments = [] from pip.vcs import vcs, get_src_requirement if dist_is_editable(dist) and vcs.get_backend_name(location): editable = True try: req = get_src_requirement(dist, location) except InstallationError as exc: logger.warning( "Error when trying to get requirement for VCS system %s, " "falling back to uneditable format", exc ) req = None if req is None: logger.warning( 'Could not determine repository location of %s', location ) comments.append( '## !! Could not determine repository location' ) req = dist.as_requirement() editable = False else: editable = False req = dist.as_requirement() specs = req.specs assert len(specs) == 1 and specs[0][0] in ["==", "==="], \ 'Expected 1 spec with == or ===; specs = %r; dist = %r' % \ (specs, dist) version = specs[0][1] ver_match = cls._rev_re.search(version) date_match = cls._date_re.search(version) if ver_match or date_match: svn_backend = vcs.get_backend('svn') if svn_backend: svn_location = svn_backend().get_location( dist, dependency_links, ) if not svn_location: logger.warning( 'Warning: cannot find svn location for %s', req) comments.append( '## FIXME: could not find svn URL in dependency_links ' 'for this package:' ) else: comments.append( '# Installing as editable to satisfy requirement %s:' % req ) if ver_match: rev = ver_match.group(1) else: rev = '{%s}' % date_match.group(1) editable = True req = '%s@%s#egg=%s' % ( svn_location, rev, cls.egg_name(dist) ) return cls(dist.project_name, req, editable, comments) @staticmethod def egg_name(dist): name = dist.egg_name() match = re.search(r'-py\d\.\d$', name) if match: name = name[:match.start()] return name def __str__(self): req = self.req if self.editable: req = '-e %s' % req return '\n'.join(list(self.comments) + [str(req)]) + '\n' if __name__ == '__main__': sys.exit(main())
gpl-3.0
petrutlucian94/nova_dev
nova/virt/libvirt/driver.py
1
217063
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # Copyright (c) 2011 Piston Cloud Computing, Inc # Copyright (c) 2012 University Of Minho # (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A connection to a hypervisor through libvirt. Supports KVM, LXC, QEMU, UML, and XEN. **Related Flags** :driver_type: Libvirt domain type. Can be kvm, qemu, uml, xen (default: kvm). :connection_uri: Override for the default libvirt URI (depends on driver_type). :disk_prefix: Override the default disk prefix for the devices attached to a server. :rescue_image_id: Rescue ami image (None = original image). :rescue_kernel_id: Rescue aki image (None = original image). :rescue_ramdisk_id: Rescue ari image (None = original image). :injected_network_template: Template file for injected network :allow_same_net_traffic: Whether to allow in project network traffic """ import errno import eventlet import functools import glob import mmap import os import shutil import socket import sys import tempfile import threading import time import uuid from eventlet import greenio from eventlet import greenthread from eventlet import patcher from eventlet import tpool from eventlet import util as eventlet_util from lxml import etree from oslo.config import cfg from nova.api.metadata import base as instance_metadata from nova import block_device from nova.compute import flavors from nova.compute import power_state from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_mode from nova import context as nova_context from nova import exception from nova.image import glance from nova.objects import flavor as flavor_obj from nova.objects import instance as instance_obj from nova.objects import service as service_obj from nova.openstack.common import excutils from nova.openstack.common import fileutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import loopingcall from nova.openstack.common import processutils from nova.openstack.common import units from nova.openstack.common import xmlutils from nova.pci import pci_manager from nova.pci import pci_utils from nova.pci import pci_whitelist from nova import rpc from nova import utils from nova import version from nova.virt import configdrive from nova.virt import cpu from nova.virt.disk import api as disk from nova.virt import driver from nova.virt import event as virtevent from nova.virt import firewall from nova.virt.libvirt import blockinfo from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import firewall as libvirt_firewall from nova.virt.libvirt import imagebackend from nova.virt.libvirt import imagecache from nova.virt.libvirt import utils as libvirt_utils from nova.virt import netutils from nova import volume from nova.volume import encryptors native_threading = patcher.original("threading") native_Queue = patcher.original("Queue") libvirt = None LOG = logging.getLogger(__name__) libvirt_opts = [ cfg.StrOpt('rescue_image_id', help='Rescue ami image', deprecated_group='DEFAULT'), cfg.StrOpt('rescue_kernel_id', help='Rescue aki image', deprecated_group='DEFAULT'), cfg.StrOpt('rescue_ramdisk_id', help='Rescue ari image', deprecated_group='DEFAULT'), cfg.StrOpt('virt_type', default='kvm', help='Libvirt domain type (valid options are: ' 'kvm, lxc, qemu, uml, xen)', deprecated_group='DEFAULT', deprecated_name='libvirt_type'), cfg.StrOpt('connection_uri', default='', help='Override the default libvirt URI ' '(which is dependent on virt_type)', deprecated_group='DEFAULT', deprecated_name='libvirt_uri'), cfg.BoolOpt('inject_password', default=False, help='Inject the admin password at boot time, ' 'without an agent.', deprecated_name='libvirt_inject_password', deprecated_group='DEFAULT'), cfg.BoolOpt('inject_key', default=False, help='Inject the ssh public key at boot time', deprecated_name='libvirt_inject_key', deprecated_group='DEFAULT'), cfg.IntOpt('inject_partition', default=-2, help='The partition to inject to : ' '-2 => disable, -1 => inspect (libguestfs only), ' '0 => not partitioned, >0 => partition number', deprecated_name='libvirt_inject_partition', deprecated_group='DEFAULT'), cfg.BoolOpt('use_usb_tablet', default=True, help='Sync virtual and real mouse cursors in Windows VMs', deprecated_group='DEFAULT'), cfg.StrOpt('live_migration_uri', default="qemu+tcp://%s/system", help='Migration target URI ' '(any included "%s" is replaced with ' 'the migration target hostname)', deprecated_group='DEFAULT'), cfg.StrOpt('live_migration_flag', default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER', help='Migration flags to be set for live migration', deprecated_group='DEFAULT'), cfg.StrOpt('block_migration_flag', default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_NON_SHARED_INC', help='Migration flags to be set for block migration', deprecated_group='DEFAULT'), cfg.IntOpt('live_migration_bandwidth', default=0, help='Maximum bandwidth to be used during migration, in Mbps', deprecated_group='DEFAULT'), cfg.StrOpt('snapshot_image_format', help='Snapshot image format (valid options are : ' 'raw, qcow2, vmdk, vdi). ' 'Defaults to same as source image', deprecated_group='DEFAULT'), cfg.StrOpt('vif_driver', default='nova.virt.libvirt.vif.LibvirtGenericVIFDriver', help='The libvirt VIF driver to configure the VIFs.', deprecated_name='libvirt_vif_driver', deprecated_group='DEFAULT'), cfg.ListOpt('volume_drivers', default=[ 'iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver', 'iser=nova.virt.libvirt.volume.LibvirtISERVolumeDriver', 'local=nova.virt.libvirt.volume.LibvirtVolumeDriver', 'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver', 'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver', 'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver', 'nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver', 'smbfs=nova.virt.libvirt.volume.LibvirtSMBFSVolumeDriver', 'aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver', 'glusterfs=' 'nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver', 'fibre_channel=nova.virt.libvirt.volume.' 'LibvirtFibreChannelVolumeDriver', 'scality=' 'nova.virt.libvirt.volume.LibvirtScalityVolumeDriver', ], help='Libvirt handlers for remote volumes.', deprecated_name='libvirt_volume_drivers', deprecated_group='DEFAULT'), cfg.StrOpt('disk_prefix', help='Override the default disk prefix for the devices attached' ' to a server, which is dependent on virt_type. ' '(valid options are: sd, xvd, uvd, vd)', deprecated_name='libvirt_disk_prefix', deprecated_group='DEFAULT'), cfg.IntOpt('wait_soft_reboot_seconds', default=120, help='Number of seconds to wait for instance to shut down after' ' soft reboot request is made. We fall back to hard reboot' ' if instance does not shutdown within this window.', deprecated_name='libvirt_wait_soft_reboot_seconds', deprecated_group='DEFAULT'), cfg.StrOpt('cpu_mode', help='Set to "host-model" to clone the host CPU feature flags; ' 'to "host-passthrough" to use the host CPU model exactly; ' 'to "custom" to use a named CPU model; ' 'to "none" to not set any CPU model. ' 'If virt_type="kvm|qemu", it will default to ' '"host-model", otherwise it will default to "none"', deprecated_name='libvirt_cpu_mode', deprecated_group='DEFAULT'), cfg.StrOpt('cpu_model', help='Set to a named libvirt CPU model (see names listed ' 'in /usr/share/libvirt/cpu_map.xml). Only has effect if ' 'cpu_mode="custom" and virt_type="kvm|qemu"', deprecated_name='libvirt_cpu_model', deprecated_group='DEFAULT'), cfg.StrOpt('snapshots_directory', default='$instances_path/snapshots', help='Location where libvirt driver will store snapshots ' 'before uploading them to image service', deprecated_name='libvirt_snapshots_directory', deprecated_group='DEFAULT'), cfg.StrOpt('xen_hvmloader_path', default='/usr/lib/xen/boot/hvmloader', help='Location where the Xen hvmloader is kept', deprecated_group='DEFAULT'), cfg.ListOpt('disk_cachemodes', default=[], help='Specific cachemodes to use for different disk types ' 'e.g: file=directsync,block=none', deprecated_group='DEFAULT'), ] CONF = cfg.CONF CONF.register_opts(libvirt_opts, 'libvirt') CONF.import_opt('host', 'nova.netconf') CONF.import_opt('my_ip', 'nova.netconf') CONF.import_opt('default_ephemeral_format', 'nova.virt.driver') CONF.import_opt('use_cow_images', 'nova.virt.driver') CONF.import_opt('live_migration_retry_count', 'nova.compute.manager') CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc') CONF.import_opt('server_proxyclient_address', 'nova.spice', group='spice') CONF.import_opt('vcpu_pin_set', 'nova.virt.cpu') DEFAULT_FIREWALL_DRIVER = "%s.%s" % ( libvirt_firewall.__name__, libvirt_firewall.IptablesFirewallDriver.__name__) MAX_CONSOLE_BYTES = 100 * units.Ki # The libvirt driver will prefix any disable reason codes with this string. DISABLE_PREFIX = 'AUTO: ' # Disable reason for the service which was enabled or disabled without reason DISABLE_REASON_UNDEFINED = 'None' def patch_tpool_proxy(): """eventlet.tpool.Proxy doesn't work with old-style class in __str__() or __repr__() calls. See bug #962840 for details. We perform a monkey patch to replace those two instance methods. """ def str_method(self): return str(self._obj) def repr_method(self): return repr(self._obj) tpool.Proxy.__str__ = str_method tpool.Proxy.__repr__ = repr_method patch_tpool_proxy() VIR_DOMAIN_NOSTATE = 0 VIR_DOMAIN_RUNNING = 1 VIR_DOMAIN_BLOCKED = 2 VIR_DOMAIN_PAUSED = 3 VIR_DOMAIN_SHUTDOWN = 4 VIR_DOMAIN_SHUTOFF = 5 VIR_DOMAIN_CRASHED = 6 VIR_DOMAIN_PMSUSPENDED = 7 LIBVIRT_POWER_STATE = { VIR_DOMAIN_NOSTATE: power_state.NOSTATE, VIR_DOMAIN_RUNNING: power_state.RUNNING, # NOTE(maoy): The DOMAIN_BLOCKED state is only valid in Xen. # It means that the VM is running and the vCPU is idle. So, # we map it to RUNNING VIR_DOMAIN_BLOCKED: power_state.RUNNING, VIR_DOMAIN_PAUSED: power_state.PAUSED, # NOTE(maoy): The libvirt API doc says that DOMAIN_SHUTDOWN # means the domain is being shut down. So technically the domain # is still running. SHUTOFF is the real powered off state. # But we will map both to SHUTDOWN anyway. # http://libvirt.org/html/libvirt-libvirt.html VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN, VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN, VIR_DOMAIN_CRASHED: power_state.CRASHED, VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED, } MIN_LIBVIRT_VERSION = (0, 9, 6) # When the above version matches/exceeds this version # delete it & corresponding code using it MIN_LIBVIRT_HOST_CPU_VERSION = (0, 9, 10) MIN_LIBVIRT_DEVICE_CALLBACK_VERSION = (1, 1, 1) # Live snapshot requirements REQ_HYPERVISOR_LIVESNAPSHOT = "QEMU" MIN_LIBVIRT_LIVESNAPSHOT_VERSION = (1, 0, 0) MIN_QEMU_LIVESNAPSHOT_VERSION = (1, 3, 0) # block size tuning requirements MIN_LIBVIRT_BLOCKIO_VERSION = (0, 10, 2) # BlockJobInfo management requirement MIN_LIBVIRT_BLOCKJOBINFO_VERSION = (1, 1, 1) def libvirt_error_handler(context, err): # Just ignore instead of default outputting to stderr. pass class LibvirtDriver(driver.ComputeDriver): capabilities = { "has_imagecache": True, "supports_recreate": True, } def __init__(self, virtapi, read_only=False): super(LibvirtDriver, self).__init__(virtapi) global libvirt if libvirt is None: libvirt = __import__('libvirt') self._host_state = None self._initiator = None self._fc_wwnns = None self._fc_wwpns = None self._wrapped_conn = None self._wrapped_conn_lock = threading.Lock() self._caps = None self._vcpu_total = 0 self.read_only = read_only self.firewall_driver = firewall.load_driver( DEFAULT_FIREWALL_DRIVER, self.virtapi, get_connection=self._get_connection) vif_class = importutils.import_class(CONF.libvirt.vif_driver) self.vif_driver = vif_class(self._get_connection) self.volume_drivers = driver.driver_dict_from_config( CONF.libvirt.volume_drivers, self) self.dev_filter = pci_whitelist.get_pci_devices_filter() self._event_queue = None self._disk_cachemode = None self.image_cache_manager = imagecache.ImageCacheManager() self.image_backend = imagebackend.Backend(CONF.use_cow_images) self.disk_cachemodes = {} self.valid_cachemodes = ["default", "none", "writethrough", "writeback", "directsync", "unsafe", ] for mode_str in CONF.libvirt.disk_cachemodes: disk_type, sep, cache_mode = mode_str.partition('=') if cache_mode not in self.valid_cachemodes: LOG.warn(_('Invalid cachemode %(cache_mode)s specified ' 'for disk type %(disk_type)s.'), {'cache_mode': cache_mode, 'disk_type': disk_type}) continue self.disk_cachemodes[disk_type] = cache_mode self._volume_api = volume.API() @property def disk_cachemode(self): if self._disk_cachemode is None: # We prefer 'none' for consistent performance, host crash # safety & migration correctness by avoiding host page cache. # Some filesystems (eg GlusterFS via FUSE) don't support # O_DIRECT though. For those we fallback to 'writethrough' # which gives host crash safety, and is safe for migration # provided the filesystem is cache coherent (cluster filesystems # typically are, but things like NFS are not). self._disk_cachemode = "none" if not self._supports_direct_io(CONF.instances_path): self._disk_cachemode = "writethrough" return self._disk_cachemode @property def host_state(self): if not self._host_state: self._host_state = HostState(self) return self._host_state def set_cache_mode(self, conf): """Set cache mode on LibvirtConfigGuestDisk object.""" try: source_type = conf.source_type driver_cache = conf.driver_cache except AttributeError: return cache_mode = self.disk_cachemodes.get(source_type, driver_cache) conf.driver_cache = cache_mode @staticmethod def _has_min_version(conn, lv_ver=None, hv_ver=None, hv_type=None): try: if lv_ver is not None: libvirt_version = conn.getLibVersion() if libvirt_version < utils.convert_version_to_int(lv_ver): return False if hv_ver is not None: hypervisor_version = conn.getVersion() if hypervisor_version < utils.convert_version_to_int(hv_ver): return False if hv_type is not None: hypervisor_type = conn.getType() if hypervisor_type != hv_type: return False return True except Exception: return False def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None): return self._has_min_version(self._conn, lv_ver, hv_ver, hv_type) def _native_thread(self): """Receives async events coming in from libvirtd. This is a native thread which runs the default libvirt event loop implementation. This processes any incoming async events from libvirtd and queues them for later dispatch. This thread is only permitted to use libvirt python APIs, and the driver.queue_event method. In particular any use of logging is forbidden, since it will confuse eventlet's greenthread integration """ while True: libvirt.virEventRunDefaultImpl() def _dispatch_thread(self): """Dispatches async events coming in from libvirtd. This is a green thread which waits for events to arrive from the libvirt event loop thread. This then dispatches the events to the compute manager. """ while True: self._dispatch_events() @staticmethod def _event_lifecycle_callback(conn, dom, event, detail, opaque): """Receives lifecycle events from libvirt. NB: this method is executing in a native thread, not an eventlet coroutine. It can only invoke other libvirt APIs, or use self.queue_event(). Any use of logging APIs in particular is forbidden. """ self = opaque uuid = dom.UUIDString() transition = None if event == libvirt.VIR_DOMAIN_EVENT_STOPPED: transition = virtevent.EVENT_LIFECYCLE_STOPPED elif event == libvirt.VIR_DOMAIN_EVENT_STARTED: transition = virtevent.EVENT_LIFECYCLE_STARTED elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED: transition = virtevent.EVENT_LIFECYCLE_PAUSED elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED: transition = virtevent.EVENT_LIFECYCLE_RESUMED if transition is not None: self._queue_event(virtevent.LifecycleEvent(uuid, transition)) def _queue_event(self, event): """Puts an event on the queue for dispatch. This method is called by the native event thread to put events on the queue for later dispatch by the green thread. """ if self._event_queue is None: LOG.debug(_("Event loop thread is not active, " "discarding event %s") % event) return # Queue the event... self._event_queue.put(event) # ...then wakeup the green thread to dispatch it c = ' '.encode() self._event_notify_send.write(c) self._event_notify_send.flush() def _dispatch_events(self): """Wait for & dispatch events from native thread Blocks until native thread indicates some events are ready. Then dispatches all queued events. """ # Wait to be notified that there are some # events pending try: _c = self._event_notify_recv.read(1) assert _c except ValueError: return # will be raised when pipe is closed # Process as many events as possible without # blocking while not self._event_queue.empty(): try: event = self._event_queue.get(block=False) self.emit_event(event) except native_Queue.Empty: pass def _init_events_pipe(self): """Create a self-pipe for the native thread to synchronize on. This code is taken from the eventlet tpool module, under terms of the Apache License v2.0. """ self._event_queue = native_Queue.Queue() try: rpipe, wpipe = os.pipe() self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0) self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0) except (ImportError, NotImplementedError): # This is Windows compatibility -- use a socket instead # of a pipe because pipes don't really exist on Windows. sock = eventlet_util.__original_socket__(socket.AF_INET, socket.SOCK_STREAM) sock.bind(('localhost', 0)) sock.listen(50) csock = eventlet_util.__original_socket__(socket.AF_INET, socket.SOCK_STREAM) csock.connect(('localhost', sock.getsockname()[1])) nsock, addr = sock.accept() self._event_notify_send = nsock.makefile('wb', 0) gsock = greenio.GreenSocket(csock) self._event_notify_recv = gsock.makefile('rb', 0) def _init_events(self): """Initializes the libvirt events subsystem. This requires running a native thread to provide the libvirt event loop integration. This forwards events to a green thread which does the actual dispatching. """ self._init_events_pipe() LOG.debug(_("Starting native event thread")) event_thread = native_threading.Thread(target=self._native_thread) event_thread.setDaemon(True) event_thread.start() LOG.debug(_("Starting green dispatch thread")) eventlet.spawn(self._dispatch_thread) def _do_quality_warnings(self): """Warn about untested driver configurations. This will log a warning message about untested driver or host arch configurations to indicate to administrators that the quality is unknown. Currently, only qemu or kvm on intel 32- or 64-bit systems is tested upstream. """ caps = self.get_host_capabilities() arch = caps.host.cpu.arch if (CONF.libvirt.virt_type not in ('qemu', 'kvm') or arch not in ('i686', 'x86_64')): LOG.warning(_('The libvirt driver is not tested on ' '%(type)s/%(arch)s by the OpenStack project and ' 'thus its quality can not be ensured. For more ' 'information, see: https://wiki.openstack.org/wiki/' 'HypervisorSupportMatrix'), {'type': CONF.libvirt.virt_type, 'arch': arch}) def init_host(self, host): self._do_quality_warnings() libvirt.registerErrorHandler(libvirt_error_handler, None) libvirt.virEventRegisterDefaultImpl() if not self.has_min_version(MIN_LIBVIRT_VERSION): major = MIN_LIBVIRT_VERSION[0] minor = MIN_LIBVIRT_VERSION[1] micro = MIN_LIBVIRT_VERSION[2] LOG.error(_('Nova requires libvirt version ' '%(major)i.%(minor)i.%(micro)i or greater.'), {'major': major, 'minor': minor, 'micro': micro}) self._init_events() def _get_new_connection(self): # call with _wrapped_conn_lock held LOG.debug(_('Connecting to libvirt: %s'), self.uri()) wrapped_conn = None try: wrapped_conn = self._connect(self.uri(), self.read_only) finally: # Enabling the compute service, in case it was disabled # since the connection was successful. disable_reason = DISABLE_REASON_UNDEFINED if not wrapped_conn: disable_reason = 'Failed to connect to libvirt' self._set_host_enabled(bool(wrapped_conn), disable_reason) self._wrapped_conn = wrapped_conn try: LOG.debug(_("Registering for lifecycle events %s"), self) wrapped_conn.domainEventRegisterAny( None, libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, self._event_lifecycle_callback, self) except Exception as e: LOG.warn(_("URI %(uri)s does not support events: %(error)s"), {'uri': self.uri(), 'error': e}) try: LOG.debug(_("Registering for connection events: %s") % str(self)) wrapped_conn.registerCloseCallback(self._close_callback, None) except (TypeError, AttributeError) as e: # NOTE: The registerCloseCallback of python-libvirt 1.0.1+ # is defined with 3 arguments, and the above registerClose- # Callback succeeds. However, the one of python-libvirt 1.0.0 # is defined with 4 arguments and TypeError happens here. # Then python-libvirt 0.9 does not define a method register- # CloseCallback. LOG.debug(_("The version of python-libvirt does not support " "registerCloseCallback or is too old: %s"), e) except libvirt.libvirtError as e: LOG.warn(_("URI %(uri)s does not support connection" " events: %(error)s"), {'uri': self.uri(), 'error': e}) return wrapped_conn def _get_connection(self): # multiple concurrent connections are protected by _wrapped_conn_lock with self._wrapped_conn_lock: wrapped_conn = self._wrapped_conn if not wrapped_conn or not self._test_connection(wrapped_conn): wrapped_conn = self._get_new_connection() return wrapped_conn _conn = property(_get_connection) def _close_callback(self, conn, reason, opaque): with self._wrapped_conn_lock: if conn == self._wrapped_conn: _error = _("Connection to libvirt lost: %s") % reason LOG.warn(_error) self._wrapped_conn = None # Disable compute service to avoid # new instances of being scheduled on this host. self._set_host_enabled(False, disable_reason=_error) @staticmethod def _test_connection(conn): try: conn.getLibVersion() return True except libvirt.libvirtError as e: if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_ERR_INTERNAL_ERROR) and e.get_error_domain() in (libvirt.VIR_FROM_REMOTE, libvirt.VIR_FROM_RPC)): LOG.debug(_('Connection to libvirt broke')) return False raise @staticmethod def uri(): if CONF.libvirt.virt_type == 'uml': uri = CONF.libvirt.connection_uri or 'uml:///system' elif CONF.libvirt.virt_type == 'xen': uri = CONF.libvirt.connection_uri or 'xen:///' elif CONF.libvirt.virt_type == 'lxc': uri = CONF.libvirt.connection_uri or 'lxc:///' else: uri = CONF.libvirt.connection_uri or 'qemu:///system' return uri @staticmethod def _connect(uri, read_only): def _connect_auth_cb(creds, opaque): if len(creds) == 0: return 0 LOG.warning( _("Can not handle authentication request for %d credentials") % len(creds)) raise exception.NovaException( _("Can not handle authentication request for %d credentials") % len(creds)) auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_ECHOPROMPT, libvirt.VIR_CRED_REALM, libvirt.VIR_CRED_PASSPHRASE, libvirt.VIR_CRED_NOECHOPROMPT, libvirt.VIR_CRED_EXTERNAL], _connect_auth_cb, None] try: flags = 0 if read_only: flags = libvirt.VIR_CONNECT_RO # tpool.proxy_call creates a native thread. Due to limitations # with eventlet locking we cannot use the logging API inside # the called function. return tpool.proxy_call( (libvirt.virDomain, libvirt.virConnect), libvirt.openAuth, uri, auth, flags) except libvirt.libvirtError as ex: LOG.exception(_("Connection to libvirt failed: %s"), ex) payload = dict(ip=LibvirtDriver.get_host_ip_addr(), method='_connect', reason=ex) rpc.get_notifier('compute').error(nova_context.get_admin_context(), 'compute.libvirt.error', payload) raise exception.HypervisorUnavailable(host=CONF.host) def get_num_instances(self): """Efficient override of base instance_exists method.""" return self._conn.numOfDomains() def instance_exists(self, instance_name): """Efficient override of base instance_exists method.""" try: self._lookup_by_name(instance_name) return True except exception.NovaException: return False # TODO(Shrews): Remove when libvirt Bugzilla bug # 836647 is fixed. def list_instance_ids(self): if self._conn.numOfDomains() == 0: return [] return self._conn.listDomainsID() def list_instances(self): names = [] for domain_id in self.list_instance_ids(): try: # We skip domains with ID 0 (hypervisors). if domain_id != 0: domain = self._lookup_by_id(domain_id) names.append(domain.name()) except exception.InstanceNotFound: # Ignore deleted instance while listing continue # extend instance list to contain also defined domains names.extend([vm for vm in self._conn.listDefinedDomains() if vm not in names]) return names def list_instance_uuids(self): uuids = set() for domain_id in self.list_instance_ids(): try: # We skip domains with ID 0 (hypervisors). if domain_id != 0: domain = self._lookup_by_id(domain_id) uuids.add(domain.UUIDString()) except exception.InstanceNotFound: # Ignore deleted instance while listing continue # extend instance list to contain also defined domains for domain_name in self._conn.listDefinedDomains(): try: uuids.add(self._lookup_by_name(domain_name).UUIDString()) except exception.InstanceNotFound: # Ignore deleted instance while listing continue return list(uuids) def plug_vifs(self, instance, network_info): """Plug VIFs into networks.""" for vif in network_info: self.vif_driver.plug(instance, vif) def unplug_vifs(self, instance, network_info): """Unplug VIFs from networks.""" for vif in network_info: self.vif_driver.unplug(instance, vif) def _teardown_container(self, instance): inst_path = libvirt_utils.get_instance_path(instance) container_dir = os.path.join(inst_path, 'rootfs') container_root_device = instance.get('root_device_name') disk.teardown_container(container_dir, container_root_device) def _destroy(self, instance): try: virt_dom = self._lookup_by_name(instance['name']) except exception.InstanceNotFound: virt_dom = None # If the instance is already terminated, we're still happy # Otherwise, destroy it old_domid = -1 if virt_dom is not None: try: old_domid = virt_dom.ID() virt_dom.destroy() # NOTE(GuanQiang): teardown container to avoid resource leak if CONF.libvirt.virt_type == 'lxc': self._teardown_container(instance) except libvirt.libvirtError as e: is_okay = False errcode = e.get_error_code() if errcode == libvirt.VIR_ERR_OPERATION_INVALID: # If the instance is already shut off, we get this: # Code=55 Error=Requested operation is not valid: # domain is not running (state, _max_mem, _mem, _cpus, _t) = virt_dom.info() state = LIBVIRT_POWER_STATE[state] if state == power_state.SHUTDOWN: is_okay = True elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT: LOG.warn(_("Cannot destroy instance, operation time out"), instance=instance) reason = _("operation time out") raise exception.InstancePowerOffFailure(reason=reason) if not is_okay: with excutils.save_and_reraise_exception(): LOG.error(_('Error from libvirt during destroy. ' 'Code=%(errcode)s Error=%(e)s'), {'errcode': errcode, 'e': e}, instance=instance) def _wait_for_destroy(expected_domid): """Called at an interval until the VM is gone.""" # NOTE(vish): If the instance disappears during the destroy # we ignore it so the cleanup can still be # attempted because we would prefer destroy to # never fail. try: dom_info = self.get_info(instance) state = dom_info['state'] new_domid = dom_info['id'] except exception.InstanceNotFound: LOG.error(_("During wait destroy, instance disappeared."), instance=instance) raise loopingcall.LoopingCallDone() if state == power_state.SHUTDOWN: LOG.info(_("Instance destroyed successfully."), instance=instance) raise loopingcall.LoopingCallDone() # NOTE(wangpan): If the instance was booted again after destroy, # this may be a endless loop, so check the id of # domain here, if it changed and the instance is # still running, we should destroy it again. # see https://bugs.launchpad.net/nova/+bug/1111213 for more details if new_domid != expected_domid: LOG.info(_("Instance may be started again."), instance=instance) kwargs['is_running'] = True raise loopingcall.LoopingCallDone() kwargs = {'is_running': False} timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy, old_domid) timer.start(interval=0.5).wait() if kwargs['is_running']: LOG.info(_("Going to destroy instance again."), instance=instance) self._destroy(instance) def destroy(self, context, instance, network_info, block_device_info=None, destroy_disks=True): self._destroy(instance) self.cleanup(context, instance, network_info, block_device_info, destroy_disks) def _undefine_domain(self, instance): try: virt_dom = self._lookup_by_name(instance['name']) except exception.InstanceNotFound: virt_dom = None if virt_dom: try: try: virt_dom.undefineFlags( libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE) except libvirt.libvirtError: LOG.debug(_("Error from libvirt during undefineFlags." " Retrying with undefine"), instance=instance) virt_dom.undefine() except AttributeError: # NOTE(vish): Older versions of libvirt don't support # undefine flags, so attempt to do the # right thing. try: if virt_dom.hasManagedSaveImage(0): virt_dom.managedSaveRemove(0) except AttributeError: pass virt_dom.undefine() except libvirt.libvirtError as e: with excutils.save_and_reraise_exception(): errcode = e.get_error_code() LOG.error(_('Error from libvirt during undefine. ' 'Code=%(errcode)s Error=%(e)s') % {'errcode': errcode, 'e': e}, instance=instance) def cleanup(self, context, instance, network_info, block_device_info=None, destroy_disks=True): self._undefine_domain(instance) self.unplug_vifs(instance, network_info) retry = True while retry: try: self.firewall_driver.unfilter_instance(instance, network_info=network_info) except libvirt.libvirtError as e: try: state = self.get_info(instance)['state'] except exception.InstanceNotFound: state = power_state.SHUTDOWN if state != power_state.SHUTDOWN: LOG.warn(_("Instance may be still running, destroy " "it again."), instance=instance) self._destroy(instance) else: retry = False errcode = e.get_error_code() LOG.exception(_('Error from libvirt during unfilter. ' 'Code=%(errcode)s Error=%(e)s') % {'errcode': errcode, 'e': e}, instance=instance) reason = "Error unfiltering instance." raise exception.InstanceTerminationFailure(reason=reason) except Exception: retry = False raise else: retry = False # FIXME(wangpan): if the instance is booted again here, such as the # the soft reboot operation boot it here, it will # become "running deleted", should we check and destroy # it at the end of this method? # NOTE(vish): we disconnect from volumes regardless block_device_mapping = driver.block_device_info_get_mapping( block_device_info) for vol in block_device_mapping: connection_info = vol['connection_info'] disk_dev = vol['mount_device'].rpartition("/")[2] if ('data' in connection_info and 'volume_id' in connection_info['data']): volume_id = connection_info['data']['volume_id'] encryption = encryptors.get_encryption_metadata( context, self._volume_api, volume_id, connection_info) if encryption: # The volume must be detached from the VM before # disconnecting it from its encryptor. Otherwise, the # encryptor may report that the volume is still in use. encryptor = self._get_volume_encryptor(connection_info, encryption) encryptor.detach_volume(**encryption) try: self.volume_driver_method('disconnect_volume', connection_info, disk_dev) except Exception as exc: with excutils.save_and_reraise_exception() as ctxt: if destroy_disks: # Don't block on Volume errors if we're trying to # delete the instance as we may be patially created # or deleted ctxt.reraise = False LOG.warn(_("Ignoring Volume Error on vol %(vol_id)s " "during delete %(exc)s"), {'vol_id': vol.get('volume_id'), 'exc': exc}, instance=instance) if destroy_disks: self._delete_instance_files(instance) self._cleanup_lvm(instance) #NOTE(haomai): destroy volumes if needed if CONF.libvirt.images_type == 'rbd': self._cleanup_rbd(instance) def _cleanup_rbd(self, instance): pool = CONF.libvirt.images_rbd_pool volumes = libvirt_utils.list_rbd_volumes(pool) pattern = instance['uuid'] def belongs_to_instance(disk): return disk.startswith(pattern) volumes = filter(belongs_to_instance, volumes) if volumes: libvirt_utils.remove_rbd_volumes(pool, *volumes) def _cleanup_lvm(self, instance): """Delete all LVM disks for given instance object.""" disks = self._lvm_disks(instance) if disks: libvirt_utils.remove_logical_volumes(*disks) def _lvm_disks(self, instance): """Returns all LVM disks for given instance object.""" if CONF.libvirt.images_volume_group: vg = os.path.join('/dev', CONF.libvirt.images_volume_group) if not os.path.exists(vg): return [] pattern = '%s_' % instance['name'] def belongs_to_instance(disk): return disk.startswith(pattern) def fullpath(name): return os.path.join(vg, name) logical_volumes = libvirt_utils.list_logical_volumes(vg) disk_names = filter(belongs_to_instance, logical_volumes) disks = map(fullpath, disk_names) return disks return [] def get_volume_connector(self, instance): if not self._initiator: self._initiator = libvirt_utils.get_iscsi_initiator() if not self._initiator: LOG.debug(_('Could not determine iscsi initiator name'), instance=instance) if not self._fc_wwnns: self._fc_wwnns = libvirt_utils.get_fc_wwnns() if not self._fc_wwnns or len(self._fc_wwnns) == 0: LOG.debug(_('Could not determine fibre channel ' 'world wide node names'), instance=instance) if not self._fc_wwpns: self._fc_wwpns = libvirt_utils.get_fc_wwpns() if not self._fc_wwpns or len(self._fc_wwpns) == 0: LOG.debug(_('Could not determine fibre channel ' 'world wide port names'), instance=instance) connector = {'ip': CONF.my_ip, 'host': CONF.host} if self._initiator: connector['initiator'] = self._initiator if self._fc_wwnns and self._fc_wwpns: connector["wwnns"] = self._fc_wwnns connector["wwpns"] = self._fc_wwpns return connector def _cleanup_resize(self, instance, network_info): target = libvirt_utils.get_instance_path(instance) + "_resize" if os.path.exists(target): # Deletion can fail over NFS, so retry the deletion as required. # Set maximum attempt as 5, most test can remove the directory # for the second time. utils.execute('rm', '-rf', target, delay_on_retry=True, attempts=5) if instance['host'] != CONF.host: self._undefine_domain(instance) self.unplug_vifs(instance, network_info) self.firewall_driver.unfilter_instance(instance, network_info) def volume_driver_method(self, method_name, connection_info, *args, **kwargs): driver_type = connection_info.get('driver_volume_type') if driver_type not in self.volume_drivers: raise exception.VolumeDriverNotFound(driver_type=driver_type) driver = self.volume_drivers[driver_type] method = getattr(driver, method_name) return method(connection_info, *args, **kwargs) def _get_volume_encryptor(self, connection_info, encryption): encryptor = encryptors.get_volume_encryptor(connection_info, **encryption) return encryptor def attach_volume(self, context, connection_info, instance, mountpoint, encryption=None): instance_name = instance['name'] virt_dom = self._lookup_by_name(instance_name) disk_dev = mountpoint.rpartition("/")[2] disk_info = { 'dev': disk_dev, 'bus': blockinfo.get_disk_bus_for_disk_dev( CONF.libvirt.virt_type, disk_dev), 'type': 'disk', } # Note(cfb): If the volume has a custom block size, check that # that we are using QEMU/KVM and libvirt >= 0.10.2. The # presence of a block size is considered mandatory by # cinder so we fail if we can't honor the request. data = {} if ('data' in connection_info): data = connection_info['data'] if ('logical_block_size' in data or 'physical_block_size' in data): if ((CONF.libvirt.virt_type != "kvm" and CONF.libvirt.virt_type != "qemu")): msg = _("Volume sets block size, but the current " "libvirt hypervisor '%s' does not support custom " "block size") % CONF.libvirt.virt_type raise exception.InvalidHypervisorType(msg) if not self.has_min_version(MIN_LIBVIRT_BLOCKIO_VERSION): ver = ".".join([str(x) for x in MIN_LIBVIRT_BLOCKIO_VERSION]) msg = _("Volume sets block size, but libvirt '%s' or later is " "required.") % ver raise exception.Invalid(msg) conf = self.volume_driver_method('connect_volume', connection_info, disk_info) self.set_cache_mode(conf) try: # NOTE(vish): We can always affect config because our # domains are persistent, but we should only # affect live if the domain is running. flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG state = LIBVIRT_POWER_STATE[virt_dom.info()[0]] if state == power_state.RUNNING: flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE # cache device_path in connection_info -- required by encryptors if 'data' in connection_info: connection_info['data']['device_path'] = conf.source_path if encryption: encryptor = self._get_volume_encryptor(connection_info, encryption) encryptor.attach_volume(context, **encryption) virt_dom.attachDeviceFlags(conf.to_xml(), flags) except Exception as ex: if isinstance(ex, libvirt.libvirtError): errcode = ex.get_error_code() if errcode == libvirt.VIR_ERR_OPERATION_FAILED: self.volume_driver_method('disconnect_volume', connection_info, disk_dev) raise exception.DeviceIsBusy(device=disk_dev) with excutils.save_and_reraise_exception(): self.volume_driver_method('disconnect_volume', connection_info, disk_dev) def _swap_volume(self, domain, disk_path, new_path): """Swap existing disk with a new block device.""" # Save a copy of the domain's running XML file xml = domain.XMLDesc(0) # Abort is an idempotent operation, so make sure any block # jobs which may have failed are ended. try: domain.blockJobAbort(disk_path, 0) except Exception: pass try: # NOTE (rmk): blockRebase cannot be executed on persistent # domains, so we need to temporarily undefine it. # If any part of this block fails, the domain is # re-defined regardless. if domain.isPersistent(): domain.undefine() # Start copy with VIR_DOMAIN_REBASE_REUSE_EXT flag to # allow writing to existing external volume file domain.blockRebase(disk_path, new_path, 0, libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT) while self._wait_for_block_job(domain, disk_path): time.sleep(0.5) domain.blockJobAbort(disk_path, libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT) finally: self._conn.defineXML(xml) def swap_volume(self, old_connection_info, new_connection_info, instance, mountpoint): instance_name = instance['name'] virt_dom = self._lookup_by_name(instance_name) disk_dev = mountpoint.rpartition("/")[2] xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev) if not xml: raise exception.DiskNotFound(location=disk_dev) disk_info = { 'dev': disk_dev, 'bus': blockinfo.get_disk_bus_for_disk_dev( CONF.libvirt.virt_type, disk_dev), 'type': 'disk', } conf = self.volume_driver_method('connect_volume', new_connection_info, disk_info) if not conf.source_path: self.volume_driver_method('disconnect_volume', new_connection_info, disk_dev) raise NotImplementedError(_("Swap only supports host devices")) self._swap_volume(virt_dom, disk_dev, conf.source_path) self.volume_driver_method('disconnect_volume', old_connection_info, disk_dev) @staticmethod def _get_disk_xml(xml, device): """Returns the xml for the disk mounted at device.""" try: doc = etree.fromstring(xml) except Exception: return None ret = doc.findall('./devices/disk') for node in ret: for child in node.getchildren(): if child.tag == 'target': if child.get('dev') == device: return etree.tostring(node) def _get_existing_domain_xml(self, instance, network_info, block_device_info=None): try: virt_dom = self._lookup_by_name(instance['name']) xml = virt_dom.XMLDesc(0) except exception.InstanceNotFound: disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, block_device_info) xml = self.to_xml(nova_context.get_admin_context(), instance, network_info, disk_info, block_device_info=block_device_info) return xml def detach_volume(self, connection_info, instance, mountpoint, encryption=None): instance_name = instance['name'] disk_dev = mountpoint.rpartition("/")[2] try: virt_dom = self._lookup_by_name(instance_name) xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev) if not xml: raise exception.DiskNotFound(location=disk_dev) else: # NOTE(vish): We can always affect config because our # domains are persistent, but we should only # affect live if the domain is running. flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG state = LIBVIRT_POWER_STATE[virt_dom.info()[0]] if state == power_state.RUNNING: flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE virt_dom.detachDeviceFlags(xml, flags) if encryption: # The volume must be detached from the VM before # disconnecting it from its encryptor. Otherwise, the # encryptor may report that the volume is still in use. encryptor = self._get_volume_encryptor(connection_info, encryption) encryptor.detach_volume(**encryption) except libvirt.libvirtError as ex: # NOTE(vish): This is called to cleanup volumes after live # migration, so we should still disconnect even if # the instance doesn't exist here anymore. error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_DOMAIN: # NOTE(vish): LOG.warn(_("During detach_volume, instance disappeared.")) else: raise self.volume_driver_method('disconnect_volume', connection_info, disk_dev) def attach_interface(self, instance, image_meta, vif): virt_dom = self._lookup_by_name(instance['name']) flavor = flavor_obj.Flavor.get_by_id( nova_context.get_admin_context(read_deleted='yes'), instance['instance_type_id']) self.vif_driver.plug(instance, vif) self.firewall_driver.setup_basic_filtering(instance, [vif]) cfg = self.vif_driver.get_config(instance, vif, image_meta, flavor) try: flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG state = LIBVIRT_POWER_STATE[virt_dom.info()[0]] if state == power_state.RUNNING: flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE virt_dom.attachDeviceFlags(cfg.to_xml(), flags) except libvirt.libvirtError: LOG.error(_('attaching network adapter failed.'), instance=instance) self.vif_driver.unplug(instance, vif) raise exception.InterfaceAttachFailed(instance) def detach_interface(self, instance, vif): virt_dom = self._lookup_by_name(instance['name']) flavor = flavor_obj.Flavor.get_by_id( nova_context.get_admin_context(read_deleted='yes'), instance['instance_type_id']) cfg = self.vif_driver.get_config(instance, vif, None, flavor) try: self.vif_driver.unplug(instance, vif) flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG state = LIBVIRT_POWER_STATE[virt_dom.info()[0]] if state == power_state.RUNNING: flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE virt_dom.detachDeviceFlags(cfg.to_xml(), flags) except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_DOMAIN: LOG.warn(_("During detach_interface, " "instance disappeared."), instance=instance) else: LOG.error(_('detaching network adapter failed.'), instance=instance) raise exception.InterfaceDetachFailed(instance) def _create_snapshot_metadata(self, base, instance, img_fmt, snp_name): metadata = {'is_public': False, 'status': 'active', 'name': snp_name, 'properties': { 'kernel_id': instance['kernel_id'], 'image_location': 'snapshot', 'image_state': 'available', 'owner_id': instance['project_id'], 'ramdisk_id': instance['ramdisk_id'], } } if instance['os_type']: metadata['properties']['os_type'] = instance['os_type'] # NOTE(vish): glance forces ami disk format to be ami if base.get('disk_format') == 'ami': metadata['disk_format'] = 'ami' else: metadata['disk_format'] = img_fmt metadata['container_format'] = base.get('container_format', 'bare') return metadata def snapshot(self, context, instance, image_href, update_task_state): """Create snapshot from a running VM instance. This command only works with qemu 0.14+ """ try: virt_dom = self._lookup_by_name(instance['name']) except exception.InstanceNotFound: raise exception.InstanceNotRunning(instance_id=instance['uuid']) (image_service, image_id) = glance.get_remote_image_service( context, instance['image_ref']) base = compute_utils.get_image_metadata( context, image_service, image_id, instance) _image_service = glance.get_remote_image_service(context, image_href) snapshot_image_service, snapshot_image_id = _image_service snapshot = snapshot_image_service.show(context, snapshot_image_id) disk_path = libvirt_utils.find_disk(virt_dom) source_format = libvirt_utils.get_disk_type(disk_path) image_format = CONF.libvirt.snapshot_image_format or source_format # NOTE(bfilippov): save lvm and rbd as raw if image_format == 'lvm' or image_format == 'rbd': image_format = 'raw' metadata = self._create_snapshot_metadata(base, instance, image_format, snapshot['name']) snapshot_name = uuid.uuid4().hex (state, _max_mem, _mem, _cpus, _t) = virt_dom.info() state = LIBVIRT_POWER_STATE[state] # NOTE(rmk): Live snapshots require QEMU 1.3 and Libvirt 1.0.0. # These restrictions can be relaxed as other configurations # can be validated. if self.has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION, MIN_QEMU_LIVESNAPSHOT_VERSION, REQ_HYPERVISOR_LIVESNAPSHOT) \ and not source_format == "lvm" and not source_format == 'rbd': live_snapshot = True # Abort is an idempotent operation, so make sure any block # jobs which may have failed are ended. This operation also # confirms the running instance, as opposed to the system as a # whole, has a new enough version of the hypervisor (bug 1193146). try: virt_dom.blockJobAbort(disk_path, 0) except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED: live_snapshot = False else: pass else: live_snapshot = False # NOTE(rmk): We cannot perform live snapshots when a managedSave # file is present, so we will use the cold/legacy method # for instances which are shutdown. if state == power_state.SHUTDOWN: live_snapshot = False # NOTE(dkang): managedSave does not work for LXC if CONF.libvirt.virt_type != 'lxc' and not live_snapshot: if state == power_state.RUNNING or state == power_state.PAUSED: self._detach_pci_devices(virt_dom, pci_manager.get_instance_pci_devs(instance)) virt_dom.managedSave(0) snapshot_backend = self.image_backend.snapshot(disk_path, image_type=source_format) if live_snapshot: LOG.info(_("Beginning live snapshot process"), instance=instance) else: LOG.info(_("Beginning cold snapshot process"), instance=instance) update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD) snapshot_directory = CONF.libvirt.snapshots_directory fileutils.ensure_tree(snapshot_directory) with utils.tempdir(dir=snapshot_directory) as tmpdir: try: out_path = os.path.join(tmpdir, snapshot_name) if live_snapshot: # NOTE(xqueralt): libvirt needs o+x in the temp directory os.chmod(tmpdir, 0o701) self._live_snapshot(virt_dom, disk_path, out_path, image_format) else: snapshot_backend.snapshot_extract(out_path, image_format) finally: new_dom = None # NOTE(dkang): because previous managedSave is not called # for LXC, _create_domain must not be called. if CONF.libvirt.virt_type != 'lxc' and not live_snapshot: if state == power_state.RUNNING: new_dom = self._create_domain(domain=virt_dom) elif state == power_state.PAUSED: new_dom = self._create_domain(domain=virt_dom, launch_flags=libvirt.VIR_DOMAIN_START_PAUSED) if new_dom is not None: self._attach_pci_devices(new_dom, pci_manager.get_instance_pci_devs(instance)) LOG.info(_("Snapshot extracted, beginning image upload"), instance=instance) # Upload that image to the image service update_task_state(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD) with libvirt_utils.file_open(out_path) as image_file: image_service.update(context, image_href, metadata, image_file) LOG.info(_("Snapshot image upload complete"), instance=instance) @staticmethod def _wait_for_block_job(domain, disk_path, abort_on_error=False): """Wait for libvirt block job to complete. Libvirt may return either cur==end or an empty dict when the job is complete, depending on whether the job has been cleaned up by libvirt yet, or not. :returns: True if still in progress False if completed """ status = domain.blockJobInfo(disk_path, 0) if status == -1 and abort_on_error: msg = _('libvirt error while requesting blockjob info.') raise exception.NovaException(msg) try: cur = status.get('cur', 0) end = status.get('end', 0) except Exception: return False if cur == end: return False else: return True def _live_snapshot(self, domain, disk_path, out_path, image_format): """Snapshot an instance without downtime.""" # Save a copy of the domain's running XML file xml = domain.XMLDesc(0) # Abort is an idempotent operation, so make sure any block # jobs which may have failed are ended. try: domain.blockJobAbort(disk_path, 0) except Exception: pass # NOTE (rmk): We are using shallow rebases as a workaround to a bug # in QEMU 1.3. In order to do this, we need to create # a destination image with the original backing file # and matching size of the instance root disk. src_disk_size = libvirt_utils.get_disk_size(disk_path) src_back_path = libvirt_utils.get_disk_backing_file(disk_path, basename=False) disk_delta = out_path + '.delta' libvirt_utils.create_cow_image(src_back_path, disk_delta, src_disk_size) try: # NOTE (rmk): blockRebase cannot be executed on persistent # domains, so we need to temporarily undefine it. # If any part of this block fails, the domain is # re-defined regardless. if domain.isPersistent(): domain.undefine() # NOTE (rmk): Establish a temporary mirror of our root disk and # issue an abort once we have a complete copy. domain.blockRebase(disk_path, disk_delta, 0, libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT | libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW) while self._wait_for_block_job(domain, disk_path): time.sleep(0.5) domain.blockJobAbort(disk_path, 0) libvirt_utils.chown(disk_delta, os.getuid()) finally: self._conn.defineXML(xml) # Convert the delta (CoW) image with a backing file to a flat # image with no backing file. libvirt_utils.extract_snapshot(disk_delta, 'qcow2', out_path, image_format) def _volume_snapshot_update_status(self, context, snapshot_id, status): """Send a snapshot status update to Cinder. This method captures and logs exceptions that occur since callers cannot do anything useful with these exceptions. Operations on the Cinder side waiting for this will time out if a failure occurs sending the update. :param context: security context :param snapshot_id: id of snapshot being updated :param status: new status value """ try: self._volume_api.update_snapshot_status(context, snapshot_id, status) except Exception: msg = _('Failed to send updated snapshot status ' 'to volume service.') LOG.exception(msg) def _volume_snapshot_create(self, context, instance, domain, volume_id, snapshot_id, new_file): """Perform volume snapshot. :param domain: VM that volume is attached to :param volume_id: volume UUID to snapshot :param snapshot_id: UUID of snapshot being created :param new_file: relative path to new qcow2 file present on share """ xml = domain.XMLDesc(0) xml_doc = etree.fromstring(xml) device_info = vconfig.LibvirtConfigGuest() device_info.parse_dom(xml_doc) disks_to_snap = [] # to be snapshotted by libvirt disks_to_skip = [] # local disks not snapshotted for disk in device_info.devices: if (disk.root_name != 'disk'): continue if (disk.target_dev is None): continue if (disk.serial is None or disk.serial != volume_id): disks_to_skip.append(disk.source_path) continue # disk is a Cinder volume with the correct volume_id disk_info = { 'dev': disk.target_dev, 'serial': disk.serial, 'current_file': disk.source_path } # Determine path for new_file based on current path current_file = disk_info['current_file'] new_file_path = os.path.join(os.path.dirname(current_file), new_file) disks_to_snap.append((current_file, new_file_path)) if not disks_to_snap: msg = _('Found no disk to snapshot.') raise exception.NovaException(msg) snapshot = vconfig.LibvirtConfigGuestSnapshot() for current_name, new_filename in disks_to_snap: snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk() snap_disk.name = current_name snap_disk.source_path = new_filename snap_disk.source_type = 'file' snap_disk.snapshot = 'external' snap_disk.driver_name = 'qcow2' snapshot.add_disk(snap_disk) for dev in disks_to_skip: snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk() snap_disk.name = dev snap_disk.snapshot = 'no' snapshot.add_disk(snap_disk) snapshot_xml = snapshot.to_xml() LOG.debug(_("snap xml: %s") % snapshot_xml) snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT) QUIESCE = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE try: domain.snapshotCreateXML(snapshot_xml, snap_flags | QUIESCE) return except libvirt.libvirtError: msg = _('Unable to create quiesced VM snapshot, ' 'attempting again with quiescing disabled.') LOG.exception(msg) try: domain.snapshotCreateXML(snapshot_xml, snap_flags) except libvirt.libvirtError: msg = _('Unable to create VM snapshot, ' 'failing volume_snapshot operation.') LOG.exception(msg) raise def volume_snapshot_create(self, context, instance, volume_id, create_info): """Create snapshots of a Cinder volume via libvirt. :param instance: VM instance object reference :param volume_id: id of volume being snapshotted :param create_info: dict of information used to create snapshots - snapshot_id : ID of snapshot - type : qcow2 / <other> - new_file : qcow2 file created by Cinder which becomes the VM's active image after the snapshot is complete """ LOG.debug(_("volume_snapshot_create: create_info: %(c_info)s"), {'c_info': create_info}, instance=instance) try: virt_dom = self._lookup_by_name(instance.name) except exception.InstanceNotFound: raise exception.InstanceNotRunning(instance_id=instance.uuid) if create_info['type'] != 'qcow2': raise exception.NovaException(_('Unknown type: %s') % create_info['type']) snapshot_id = create_info.get('snapshot_id', None) if snapshot_id is None: raise exception.NovaException(_('snapshot_id required ' 'in create_info')) try: self._volume_snapshot_create(context, instance, virt_dom, volume_id, snapshot_id, create_info['new_file']) except Exception: with excutils.save_and_reraise_exception(): msg = _('Error occurred during volume_snapshot_create, ' 'sending error status to Cinder.') LOG.exception(msg) self._volume_snapshot_update_status( context, snapshot_id, 'error') self._volume_snapshot_update_status( context, snapshot_id, 'creating') def _volume_snapshot_delete(self, context, instance, volume_id, snapshot_id, delete_info=None): """ Note: if file being merged into == active image: do a blockRebase (pull) operation else: do a blockCommit operation Files must be adjacent in snap chain. :param instance: instance object reference :param volume_id: volume UUID :param snapshot_id: snapshot UUID (unused currently) :param delete_info: { 'type': 'qcow2', 'file_to_merge': 'a.img', 'merge_target_file': 'b.img' or None (if merging file_to_merge into active image) } Libvirt blockjob handling required for this method is broken in versions of libvirt that do not contain: http://libvirt.org/git/?p=libvirt.git;h=0f9e67bfad (1.1.1) (Patch is pending in 1.0.5-maint branch as well, but we cannot detect libvirt 1.0.5.5 vs. 1.0.5.6 here.) """ if not self.has_min_version(MIN_LIBVIRT_BLOCKJOBINFO_VERSION): ver = '.'.join([str(x) for x in MIN_LIBVIRT_BLOCKJOBINFO_VERSION]) msg = _("Libvirt '%s' or later is required for online deletion " "of volume snapshots.") % ver raise exception.Invalid(msg) LOG.debug(_('volume_snapshot_delete: delete_info: %s') % delete_info) if delete_info['type'] != 'qcow2': msg = _('Unknown delete_info type %s') % delete_info['type'] raise exception.NovaException(msg) try: virt_dom = self._lookup_by_name(instance.name) except exception.InstanceNotFound: raise exception.InstanceNotRunning(instance_id=instance.uuid) ##### Find dev name my_dev = None active_disk = None xml = virt_dom.XMLDesc(0) xml_doc = etree.fromstring(xml) device_info = vconfig.LibvirtConfigGuest() device_info.parse_dom(xml_doc) for disk in device_info.devices: if (disk.root_name != 'disk'): continue if (disk.target_dev is None or disk.serial is None): continue if disk.serial == volume_id: my_dev = disk.target_dev active_disk = disk.source_path if my_dev is None or active_disk is None: msg = _('Unable to locate disk matching id: %s') % volume_id raise exception.NovaException(msg) LOG.debug(_("found dev, it's %(dev)s, with active disk: %(disk)s"), {'dev': my_dev, 'disk': active_disk}) if delete_info['merge_target_file'] is None: # pull via blockRebase() # Merge the most recent snapshot into the active image rebase_disk = my_dev rebase_base = delete_info['file_to_merge'] rebase_bw = 0 rebase_flags = 0 LOG.debug(_('disk: %(disk)s, base: %(base)s, ' 'bw: %(bw)s, flags: %(flags)s') % {'disk': rebase_disk, 'base': rebase_base, 'bw': rebase_bw, 'flags': rebase_flags}) result = virt_dom.blockRebase(rebase_disk, rebase_base, rebase_bw, rebase_flags) if result == 0: LOG.debug(_('blockRebase started successfully')) while self._wait_for_block_job(virt_dom, rebase_disk, abort_on_error=True): LOG.debug(_('waiting for blockRebase job completion')) time.sleep(0.5) else: # commit with blockCommit() commit_disk = my_dev commit_base = delete_info['merge_target_file'] commit_top = delete_info['file_to_merge'] bandwidth = 0 flags = 0 result = virt_dom.blockCommit(commit_disk, commit_base, commit_top, bandwidth, flags) if result == 0: LOG.debug(_('blockCommit started successfully')) while self._wait_for_block_job(virt_dom, commit_disk, abort_on_error=True): LOG.debug(_('waiting for blockCommit job completion')) time.sleep(0.5) def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id, delete_info=None): try: self._volume_snapshot_delete(context, instance, volume_id, snapshot_id, delete_info=delete_info) except Exception: with excutils.save_and_reraise_exception(): msg = _('Error occurred during volume_snapshot_delete, ' 'sending error status to Cinder.') LOG.exception(msg) self._volume_snapshot_update_status( context, snapshot_id, 'error_deleting') self._volume_snapshot_update_status(context, snapshot_id, 'deleting') def reboot(self, context, instance, network_info, reboot_type='SOFT', block_device_info=None, bad_volumes_callback=None): """Reboot a virtual machine, given an instance reference.""" if reboot_type == 'SOFT': # NOTE(vish): This will attempt to do a graceful shutdown/restart. try: soft_reboot_success = self._soft_reboot(instance) except libvirt.libvirtError as e: LOG.debug(_("Instance soft reboot failed: %s"), e) soft_reboot_success = False if soft_reboot_success: LOG.info(_("Instance soft rebooted successfully."), instance=instance) return else: LOG.warn(_("Failed to soft reboot instance. " "Trying hard reboot."), instance=instance) return self._hard_reboot(context, instance, network_info, block_device_info) def _soft_reboot(self, instance): """Attempt to shutdown and restart the instance gracefully. We use shutdown and create here so we can return if the guest responded and actually rebooted. Note that this method only succeeds if the guest responds to acpi. Therefore we return success or failure so we can fall back to a hard reboot if necessary. :returns: True if the reboot succeeded """ dom = self._lookup_by_name(instance["name"]) (state, _max_mem, _mem, _cpus, _t) = dom.info() state = LIBVIRT_POWER_STATE[state] old_domid = dom.ID() # NOTE(vish): This check allows us to reboot an instance that # is already shutdown. if state == power_state.RUNNING: dom.shutdown() # NOTE(vish): This actually could take slightly longer than the # FLAG defines depending on how long the get_info # call takes to return. self._prepare_pci_devices_for_use( pci_manager.get_instance_pci_devs(instance)) for x in xrange(CONF.libvirt.wait_soft_reboot_seconds): dom = self._lookup_by_name(instance["name"]) (state, _max_mem, _mem, _cpus, _t) = dom.info() state = LIBVIRT_POWER_STATE[state] new_domid = dom.ID() # NOTE(ivoks): By checking domain IDs, we make sure we are # not recreating domain that's already running. if old_domid != new_domid: if state in [power_state.SHUTDOWN, power_state.CRASHED]: LOG.info(_("Instance shutdown successfully."), instance=instance) self._create_domain(domain=dom) timer = loopingcall.FixedIntervalLoopingCall( self._wait_for_running, instance) timer.start(interval=0.5).wait() return True else: LOG.info(_("Instance may have been rebooted during soft " "reboot, so return now."), instance=instance) return True greenthread.sleep(1) return False def _hard_reboot(self, context, instance, network_info, block_device_info=None): """Reboot a virtual machine, given an instance reference. Performs a Libvirt reset (if supported) on the domain. If Libvirt reset is unavailable this method actually destroys and re-creates the domain to ensure the reboot happens, as the guest OS cannot ignore this action. If xml is set, it uses the passed in xml in place of the xml from the existing domain. """ self._destroy(instance) # Get the system metadata from the instance system_meta = utils.instance_sys_meta(instance) # Convert the system metadata to image metadata image_meta = utils.get_image_from_system_metadata(system_meta) if not image_meta: image_ref = instance.get('image_ref') service, image_id = glance.get_remote_image_service(context, image_ref) image_meta = compute_utils.get_image_metadata(context, service, image_id, instance) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, block_device_info, image_meta) # NOTE(vish): This could generate the wrong device_format if we are # using the raw backend and the images don't exist yet. # The create_images_and_backing below doesn't properly # regenerate raw backend images, however, so when it # does we need to (re)generate the xml after the images # are in place. xml = self.to_xml(context, instance, network_info, disk_info, block_device_info=block_device_info, write_to_disk=True) # NOTE (rmk): Re-populate any missing backing files. disk_info_json = self.get_instance_disk_info(instance['name'], xml, block_device_info) instance_dir = libvirt_utils.get_instance_path(instance) self._create_images_and_backing(context, instance, instance_dir, disk_info_json) # Initialize all the necessary networking, block devices and # start the instance. self._create_domain_and_network(context, xml, instance, network_info, block_device_info, reboot=True) self._prepare_pci_devices_for_use( pci_manager.get_instance_pci_devs(instance)) def _wait_for_reboot(): """Called at an interval until the VM is running again.""" state = self.get_info(instance)['state'] if state == power_state.RUNNING: LOG.info(_("Instance rebooted successfully."), instance=instance) raise loopingcall.LoopingCallDone() timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot) timer.start(interval=0.5).wait() def pause(self, instance): """Pause VM instance.""" dom = self._lookup_by_name(instance['name']) dom.suspend() def unpause(self, instance): """Unpause paused VM instance.""" dom = self._lookup_by_name(instance['name']) dom.resume() def power_off(self, instance): """Power off the specified instance.""" self._destroy(instance) def power_on(self, context, instance, network_info, block_device_info=None): """Power on the specified instance.""" # We use _hard_reboot here to ensure that all backing files, # network, and block device connections, etc. are established # and available before we attempt to start the instance. self._hard_reboot(context, instance, network_info, block_device_info) def suspend(self, instance): """Suspend the specified instance.""" dom = self._lookup_by_name(instance['name']) self._detach_pci_devices(dom, pci_manager.get_instance_pci_devs(instance)) dom.managedSave(0) def resume(self, context, instance, network_info, block_device_info=None): """resume the specified instance.""" xml = self._get_existing_domain_xml(instance, network_info, block_device_info) dom = self._create_domain_and_network(context, xml, instance, network_info, block_device_info=block_device_info) self._attach_pci_devices(dom, pci_manager.get_instance_pci_devs(instance)) def resume_state_on_host_boot(self, context, instance, network_info, block_device_info=None): """resume guest state when a host is booted.""" # Check if the instance is running already and avoid doing # anything if it is. if self.instance_exists(instance['name']): domain = self._lookup_by_name(instance['name']) state = LIBVIRT_POWER_STATE[domain.info()[0]] ignored_states = (power_state.RUNNING, power_state.SUSPENDED, power_state.NOSTATE, power_state.PAUSED) if state in ignored_states: return # Instance is not up and could be in an unknown state. # Be as absolute as possible about getting it back into # a known and running state. self._hard_reboot(context, instance, network_info, block_device_info) def rescue(self, context, instance, network_info, image_meta, rescue_password): """Loads a VM using rescue images. A rescue is normally performed when something goes wrong with the primary images and data needs to be corrected/recovered. Rescuing should not edit or over-ride the original image, only allow for data recovery. """ instance_dir = libvirt_utils.get_instance_path(instance) unrescue_xml = self._get_existing_domain_xml(instance, network_info) unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml') libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml) rescue_images = { 'image_id': CONF.libvirt.rescue_image_id or instance['image_ref'], 'kernel_id': (CONF.libvirt.rescue_kernel_id or instance['kernel_id']), 'ramdisk_id': (CONF.libvirt.rescue_ramdisk_id or instance['ramdisk_id']), } disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, None, image_meta, rescue=True) self._create_image(context, instance, disk_info['mapping'], '.rescue', rescue_images, network_info=network_info, admin_pass=rescue_password) xml = self.to_xml(context, instance, network_info, disk_info, image_meta, rescue=rescue_images, write_to_disk=True) self._destroy(instance) self._create_domain(xml) def unrescue(self, instance, network_info): """Reboot the VM which is being rescued back into primary images. """ instance_dir = libvirt_utils.get_instance_path(instance) unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml') xml = libvirt_utils.load_file(unrescue_xml_path) virt_dom = self._lookup_by_name(instance['name']) self._destroy(instance) self._create_domain(xml, virt_dom) libvirt_utils.file_delete(unrescue_xml_path) rescue_files = os.path.join(instance_dir, "*.rescue") for rescue_file in glob.iglob(rescue_files): libvirt_utils.file_delete(rescue_file) def poll_rebooting_instances(self, timeout, instances): pass def _enable_hairpin(self, xml): interfaces = self.get_interfaces(xml) for interface in interfaces: utils.execute('tee', '/sys/class/net/%s/brport/hairpin_mode' % interface, process_input='1', run_as_root=True, check_exit_code=[0, 1]) # NOTE(ilyaalekseyev): Implementation like in multinics # for xenapi(tr3buchet) def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, block_device_info, image_meta) self._create_image(context, instance, disk_info['mapping'], network_info=network_info, block_device_info=block_device_info, files=injected_files, admin_pass=admin_password) xml = self.to_xml(context, instance, network_info, disk_info, image_meta, block_device_info=block_device_info, write_to_disk=True) self._create_domain_and_network(context, xml, instance, network_info, block_device_info) LOG.debug(_("Instance is running"), instance=instance) def _wait_for_boot(): """Called at an interval until the VM is running.""" state = self.get_info(instance)['state'] if state == power_state.RUNNING: LOG.info(_("Instance spawned successfully."), instance=instance) raise loopingcall.LoopingCallDone() timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot) timer.start(interval=0.5).wait() def _flush_libvirt_console(self, pty): out, err = utils.execute('dd', 'if=%s' % pty, 'iflag=nonblock', run_as_root=True, check_exit_code=False) return out def _append_to_file(self, data, fpath): LOG.info(_('data: %(data)r, fpath: %(fpath)r'), {'data': data, 'fpath': fpath}) fp = open(fpath, 'a+') fp.write(data) return fpath def get_console_output(self, context, instance): virt_dom = self._lookup_by_name(instance.name) xml = virt_dom.XMLDesc(0) tree = etree.fromstring(xml) console_types = {} # NOTE(comstud): We want to try 'file' types first, then try 'pty' # types. We can't use Python 2.7 syntax of: # tree.find("./devices/console[@type='file']/source") # because we need to support 2.6. console_nodes = tree.findall('./devices/console') for console_node in console_nodes: console_type = console_node.get('type') console_types.setdefault(console_type, []) console_types[console_type].append(console_node) # If the guest has a console logging to a file prefer to use that if console_types.get('file'): for file_console in console_types.get('file'): source_node = file_console.find('./source') if source_node is None: continue path = source_node.get("path") if not path: continue libvirt_utils.chown(path, os.getuid()) with libvirt_utils.file_open(path, 'rb') as fp: log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES) if remaining > 0: LOG.info(_('Truncated console log returned, %d bytes ' 'ignored'), remaining, instance=instance) return log_data # Try 'pty' types if console_types.get('pty'): for pty_console in console_types.get('pty'): source_node = pty_console.find('./source') if source_node is None: continue pty = source_node.get("path") if not pty: continue break else: msg = _("Guest does not have a console available") raise exception.NovaException(msg) self._chown_console_log_for_instance(instance) data = self._flush_libvirt_console(pty) console_log = self._get_console_log_path(instance) fpath = self._append_to_file(data, console_log) with libvirt_utils.file_open(fpath, 'rb') as fp: log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES) if remaining > 0: LOG.info(_('Truncated console log returned, %d bytes ignored'), remaining, instance=instance) return log_data @staticmethod def get_host_ip_addr(): return CONF.my_ip def get_vnc_console(self, context, instance): def get_vnc_port_for_instance(instance_name): virt_dom = self._lookup_by_name(instance_name) xml = virt_dom.XMLDesc(0) dom = xmlutils.safe_minidom_parse_string(xml) for graphic in dom.getElementsByTagName('graphics'): if graphic.getAttribute('type') == 'vnc': return graphic.getAttribute('port') # NOTE(rmk): We had VNC consoles enabled but the instance in # question is not actually listening for connections. raise exception.ConsoleTypeUnavailable(console_type='vnc') port = get_vnc_port_for_instance(instance.name) host = CONF.vncserver_proxyclient_address return {'host': host, 'port': port, 'internal_access_path': None} def get_spice_console(self, context, instance): def get_spice_ports_for_instance(instance_name): virt_dom = self._lookup_by_name(instance_name) xml = virt_dom.XMLDesc(0) # TODO(sleepsonthefloor): use etree instead of minidom dom = xmlutils.safe_minidom_parse_string(xml) for graphic in dom.getElementsByTagName('graphics'): if graphic.getAttribute('type') == 'spice': return (graphic.getAttribute('port'), graphic.getAttribute('tlsPort')) # NOTE(rmk): We had Spice consoles enabled but the instance in # question is not actually listening for connections. raise exception.ConsoleTypeUnavailable(console_type='spice') ports = get_spice_ports_for_instance(instance['name']) host = CONF.spice.server_proxyclient_address return {'host': host, 'port': ports[0], 'tlsPort': ports[1], 'internal_access_path': None} @staticmethod def _supports_direct_io(dirpath): if not hasattr(os, 'O_DIRECT'): LOG.debug(_("This python runtime does not support direct I/O")) return False testfile = os.path.join(dirpath, ".directio.test") hasDirectIO = True try: f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT) # Check is the write allowed with 512 byte alignment align_size = 512 m = mmap.mmap(-1, align_size) m.write(r"x" * align_size) os.write(f, m) os.close(f) LOG.debug(_("Path '%(path)s' supports direct I/O") % {'path': dirpath}) except OSError as e: if e.errno == errno.EINVAL: LOG.debug(_("Path '%(path)s' does not support direct I/O: " "'%(ex)s'") % {'path': dirpath, 'ex': str(e)}) hasDirectIO = False else: with excutils.save_and_reraise_exception(): LOG.error(_("Error on '%(path)s' while checking " "direct I/O: '%(ex)s'") % {'path': dirpath, 'ex': str(e)}) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_("Error on '%(path)s' while checking direct I/O: " "'%(ex)s'") % {'path': dirpath, 'ex': str(e)}) finally: try: os.unlink(testfile) except Exception: pass return hasDirectIO @staticmethod def _create_local(target, local_size, unit='G', fs_format=None, label=None): """Create a blank image of specified size.""" libvirt_utils.create_image('raw', target, '%d%c' % (local_size, unit)) def _create_ephemeral(self, target, ephemeral_size, fs_label, os_type, is_block_dev=False, max_size=None): if not is_block_dev: self._create_local(target, ephemeral_size) # Run as root only for block devices. disk.mkfs(os_type, fs_label, target, run_as_root=is_block_dev) @staticmethod def _create_swap(target, swap_mb, max_size=None): """Create a swap file of specified size.""" libvirt_utils.create_image('raw', target, '%dM' % swap_mb) utils.mkfs('swap', target) @staticmethod def _get_console_log_path(instance): return os.path.join(libvirt_utils.get_instance_path(instance), 'console.log') @staticmethod def _get_disk_config_path(instance): return os.path.join(libvirt_utils.get_instance_path(instance), 'disk.config') def _chown_console_log_for_instance(self, instance): console_log = self._get_console_log_path(instance) if os.path.exists(console_log): libvirt_utils.chown(console_log, os.getuid()) def _chown_disk_config_for_instance(self, instance): disk_config = self._get_disk_config_path(instance) if os.path.exists(disk_config): libvirt_utils.chown(disk_config, os.getuid()) def _create_image(self, context, instance, disk_mapping, suffix='', disk_images=None, network_info=None, block_device_info=None, files=None, admin_pass=None, inject_files=True): if not suffix: suffix = '' booted_from_volume = ( (not bool(instance.get('image_ref'))) or 'disk' not in disk_mapping ) def image(fname, image_type=CONF.libvirt.images_type): return self.image_backend.image(instance, fname + suffix, image_type) def raw(fname): return image(fname, image_type='raw') # ensure directories exist and are writable fileutils.ensure_tree(libvirt_utils.get_instance_path(instance)) LOG.info(_('Creating image'), instance=instance) # NOTE(dprince): for rescue console.log may already exist... chown it. self._chown_console_log_for_instance(instance) # NOTE(yaguang): For evacuate disk.config already exist in shared # storage, chown it. self._chown_disk_config_for_instance(instance) # NOTE(vish): No need add the suffix to console.log libvirt_utils.write_to_file( self._get_console_log_path(instance), '', 7) if not disk_images: disk_images = {'image_id': instance['image_ref'], 'kernel_id': instance['kernel_id'], 'ramdisk_id': instance['ramdisk_id']} if disk_images['kernel_id']: fname = imagecache.get_cache_fname(disk_images, 'kernel_id') raw('kernel').cache(fetch_func=libvirt_utils.fetch_image, context=context, filename=fname, image_id=disk_images['kernel_id'], user_id=instance['user_id'], project_id=instance['project_id']) if disk_images['ramdisk_id']: fname = imagecache.get_cache_fname(disk_images, 'ramdisk_id') raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_image, context=context, filename=fname, image_id=disk_images['ramdisk_id'], user_id=instance['user_id'], project_id=instance['project_id']) inst_type = flavors.extract_flavor(instance) # NOTE(ndipanov): Even if disk_mapping was passed in, which # currently happens only on rescue - we still don't want to # create a base image. if not booted_from_volume: root_fname = imagecache.get_cache_fname(disk_images, 'image_id') size = instance['root_gb'] * units.Gi if size == 0 or suffix == '.rescue': size = None image('disk').cache(fetch_func=libvirt_utils.fetch_image, context=context, filename=root_fname, size=size, image_id=disk_images['image_id'], user_id=instance['user_id'], project_id=instance['project_id']) # Lookup the filesystem type if required os_type_with_default = disk.get_fs_type_for_os_type( instance['os_type']) ephemeral_gb = instance['ephemeral_gb'] if 'disk.local' in disk_mapping: disk_image = image('disk.local') fn = functools.partial(self._create_ephemeral, fs_label='ephemeral0', os_type=instance["os_type"], is_block_dev=disk_image.is_block_dev) fname = "ephemeral_%s_%s" % (ephemeral_gb, os_type_with_default) size = ephemeral_gb * units.Gi disk_image.cache(fetch_func=fn, filename=fname, size=size, ephemeral_size=ephemeral_gb) for idx, eph in enumerate(driver.block_device_info_get_ephemerals( block_device_info)): disk_image = image(blockinfo.get_eph_disk(idx)) fn = functools.partial(self._create_ephemeral, fs_label='ephemeral%d' % idx, os_type=instance["os_type"], is_block_dev=disk_image.is_block_dev) size = eph['size'] * units.Gi fname = "ephemeral_%s_%s" % (eph['size'], os_type_with_default) disk_image.cache( fetch_func=fn, filename=fname, size=size, ephemeral_size=eph['size']) if 'disk.swap' in disk_mapping: mapping = disk_mapping['disk.swap'] swap_mb = 0 swap = driver.block_device_info_get_swap(block_device_info) if driver.swap_is_usable(swap): swap_mb = swap['swap_size'] elif (inst_type['swap'] > 0 and not block_device.volume_in_mapping( mapping['dev'], block_device_info)): swap_mb = inst_type['swap'] if swap_mb > 0: size = swap_mb * units.Mi image('disk.swap').cache(fetch_func=self._create_swap, filename="swap_%s" % swap_mb, size=size, swap_mb=swap_mb) # Config drive if configdrive.required_by(instance): LOG.info(_('Using config drive'), instance=instance) extra_md = {} if admin_pass: extra_md['admin_pass'] = admin_pass inst_md = instance_metadata.InstanceMetadata(instance, content=files, extra_md=extra_md, network_info=network_info) with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb: configdrive_path = self._get_disk_config_path(instance) LOG.info(_('Creating config drive at %(path)s'), {'path': configdrive_path}, instance=instance) try: cdb.make_drive(configdrive_path) except processutils.ProcessExecutionError as e: with excutils.save_and_reraise_exception(): LOG.error(_('Creating config drive failed ' 'with error: %s'), e, instance=instance) # File injection only if needed elif inject_files and CONF.libvirt.inject_partition != -2: if booted_from_volume: LOG.warn(_('File injection into a boot from volume ' 'instance is not supported'), instance=instance) target_partition = None if not instance['kernel_id']: target_partition = CONF.libvirt.inject_partition if target_partition == 0: target_partition = None if CONF.libvirt.virt_type == 'lxc': target_partition = None if CONF.libvirt.inject_key and instance['key_data']: key = str(instance['key_data']) else: key = None net = netutils.get_injected_network_template(network_info) metadata = instance.get('metadata') if not CONF.libvirt.inject_password: admin_pass = None if any((key, net, metadata, admin_pass, files)): # If we're not using config_drive, inject into root fs injection_path = image('disk').path img_id = instance['image_ref'] for inj, val in [('key', key), ('net', net), ('metadata', metadata), ('admin_pass', admin_pass), ('files', files)]: if val: LOG.info(_('Injecting %(inj)s into image ' '%(img_id)s'), {'inj': inj, 'img_id': img_id}, instance=instance) try: disk.inject_data(injection_path, key, net, metadata, admin_pass, files, partition=target_partition, use_cow=CONF.use_cow_images, mandatory=('files',)) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_('Error injecting data into image ' '%(img_id)s (%(e)s)'), {'img_id': img_id, 'e': e}, instance=instance) if CONF.libvirt.virt_type == 'uml': libvirt_utils.chown(image('disk').path, 'root') def _prepare_pci_devices_for_use(self, pci_devices): # kvm , qemu support managed mode # In managed mode, the configured device will be automatically # detached from the host OS drivers when the guest is started, # and then re-attached when the guest shuts down. if CONF.libvirt.virt_type != 'xen': # we do manual detach only for xen return try: for dev in pci_devices: libvirt_dev_addr = dev['hypervisor_name'] libvirt_dev = \ self._conn.nodeDeviceLookupByName(libvirt_dev_addr) # Note(yjiang5) Spelling for 'dettach' is correct, see # http://libvirt.org/html/libvirt-libvirt.html. libvirt_dev.dettach() # Note(yjiang5): A reset of one PCI device may impact other # devices on the same bus, thus we need two separated loops # to detach and then reset it. for dev in pci_devices: libvirt_dev_addr = dev['hypervisor_name'] libvirt_dev = \ self._conn.nodeDeviceLookupByName(libvirt_dev_addr) libvirt_dev.reset() except libvirt.libvirtError as exc: raise exception.PciDevicePrepareFailed(id=dev['id'], instance_uuid= dev['instance_uuid'], reason=str(exc)) def _detach_pci_devices(self, dom, pci_devs): # for libvirt version < 1.1.1, this is race condition # so forbid detach if not had this version if not self.has_min_version(MIN_LIBVIRT_DEVICE_CALLBACK_VERSION): if pci_devs: reason = (_("Detaching PCI devices with libvirt < %(ver)s" " is not permitted") % {'ver': MIN_LIBVIRT_DEVICE_CALLBACK_VERSION}) raise exception.PciDeviceDetachFailed(reason=reason, dev=pci_devs) try: for dev in pci_devs: dom.detachDeviceFlags(self.get_guest_pci_device(dev).to_xml(), libvirt.VIR_DOMAIN_AFFECT_LIVE) # after detachDeviceFlags returned, we should check the dom to # ensure the detaching is finished xml = dom.XMLDesc(0) xml_doc = etree.fromstring(xml) guest_config = vconfig.LibvirtConfigGuest() guest_config.parse_dom(xml_doc) for hdev in [d for d in guest_config.devices if d.type == 'pci']: hdbsf = [hdev.domain, hdev.bus, hdev.slot, hdev.function] dbsf = pci_utils.parse_address(dev['address']) if [int(x, 16) for x in hdbsf] ==\ [int(x, 16) for x in dbsf]: raise exception.PciDeviceDetachFailed(reason= "timeout", dev=dev) except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_DOMAIN: LOG.warn(_("Instance disappeared while detaching " "a PCI device from it.")) else: raise def _attach_pci_devices(self, dom, pci_devs): try: for dev in pci_devs: dom.attachDevice(self.get_guest_pci_device(dev).to_xml()) except libvirt.libvirtError: LOG.error(_('Attaching PCI devices %(dev)s to %(dom)s failed.') % {'dev': pci_devs, 'dom': dom.ID()}) raise def _set_host_enabled(self, enabled, disable_reason=DISABLE_REASON_UNDEFINED): """Enables / Disables the compute service on this host. This doesn't override non-automatic disablement with an automatic setting; thereby permitting operators to keep otherwise healthy hosts out of rotation. """ status_name = {True: 'disabled', False: 'enabled'} disable_service = not enabled ctx = nova_context.get_admin_context() try: service = service_obj.Service.get_by_compute_host(ctx, CONF.host) if service.disabled != disable_service: # Note(jang): this is a quick fix to stop operator- # disabled compute hosts from re-enabling themselves # automatically. We prefix any automatic reason code # with a fixed string. We only re-enable a host # automatically if we find that string in place. # This should probably be replaced with a separate flag. if not service.disabled or ( service.disabled_reason and service.disabled_reason.startswith(DISABLE_PREFIX)): service.disabled = disable_service service.disabled_reason = ( DISABLE_PREFIX + disable_reason if disable_service else DISABLE_REASON_UNDEFINED) service.save() LOG.debug(_('Updating compute service status to %s'), status_name[disable_service]) else: LOG.debug(_('Not overriding manual compute service ' 'status with: %s'), status_name[disable_service]) except exception.ComputeHostNotFound: LOG.warn(_('Cannot update service status on host: %s,' 'since it is not registered.') % CONF.host) except Exception: LOG.warn(_('Cannot update service status on host: %s,' 'due to an unexpected exception.') % CONF.host, exc_info=True) def get_host_capabilities(self): """Returns an instance of config.LibvirtConfigCaps representing the capabilities of the host. """ if not self._caps: xmlstr = self._conn.getCapabilities() self._caps = vconfig.LibvirtConfigCaps() self._caps.parse_str(xmlstr) if hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'): try: features = self._conn.baselineCPU( [self._caps.host.cpu.to_xml()], libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES) # FIXME(wangpan): the return value of baselineCPU should be # None or xml string, but libvirt has a bug # of it from 1.1.2 which is fixed in 1.2.0, # this -1 checking should be removed later. if features and features != -1: self._caps.host.cpu.parse_str(features) except libvirt.VIR_ERR_NO_SUPPORT: # Note(yjiang5): ignore if libvirt has no support pass return self._caps def get_host_uuid(self): """Returns a UUID representing the host.""" caps = self.get_host_capabilities() return caps.host.uuid def get_host_cpu_for_guest(self): """Returns an instance of config.LibvirtConfigGuestCPU representing the host's CPU model & topology with policy for configuring a guest to match """ caps = self.get_host_capabilities() hostcpu = caps.host.cpu guestcpu = vconfig.LibvirtConfigGuestCPU() guestcpu.model = hostcpu.model guestcpu.vendor = hostcpu.vendor guestcpu.arch = hostcpu.arch guestcpu.match = "exact" for hostfeat in hostcpu.features: guestfeat = vconfig.LibvirtConfigGuestCPUFeature(hostfeat.name) guestfeat.policy = "require" guestcpu.features.append(guestfeat) return guestcpu def get_guest_cpu_config(self): mode = CONF.libvirt.cpu_mode model = CONF.libvirt.cpu_model if mode is None: if ((CONF.libvirt.virt_type == "kvm" or CONF.libvirt.virt_type == "qemu")): mode = "host-model" else: mode = "none" if mode == "none": return None if ((CONF.libvirt.virt_type != "kvm" and CONF.libvirt.virt_type != "qemu")): msg = _("Config requested an explicit CPU model, but " "the current libvirt hypervisor '%s' does not " "support selecting CPU models") % CONF.libvirt.virt_type raise exception.Invalid(msg) if mode == "custom" and model is None: msg = _("Config requested a custom CPU model, but no " "model name was provided") raise exception.Invalid(msg) elif mode != "custom" and model is not None: msg = _("A CPU model name should not be set when a " "host CPU model is requested") raise exception.Invalid(msg) LOG.debug(_("CPU mode '%(mode)s' model '%(model)s' was chosen") % {'mode': mode, 'model': (model or "")}) # TODO(berrange): in the future, when MIN_LIBVIRT_VERSION is # updated to be at least this new, we can kill off the elif # blocks here if self.has_min_version(MIN_LIBVIRT_HOST_CPU_VERSION): cpu = vconfig.LibvirtConfigGuestCPU() cpu.mode = mode cpu.model = model elif mode == "custom": cpu = vconfig.LibvirtConfigGuestCPU() cpu.model = model elif mode == "host-model": cpu = self.get_host_cpu_for_guest() elif mode == "host-passthrough": msg = _("Passthrough of the host CPU was requested but " "this libvirt version does not support this feature") raise exception.NovaException(msg) return cpu def get_guest_disk_config(self, instance, name, disk_mapping, inst_type, image_type=None): image = self.image_backend.image(instance, name, image_type) disk_info = disk_mapping[name] return image.libvirt_info(disk_info['bus'], disk_info['dev'], disk_info['type'], self.disk_cachemode, inst_type['extra_specs'], self.get_hypervisor_version()) def get_guest_storage_config(self, instance, image_meta, disk_info, rescue, block_device_info, inst_type): devices = [] disk_mapping = disk_info['mapping'] block_device_mapping = driver.block_device_info_get_mapping( block_device_info) if CONF.libvirt.virt_type == "lxc": fs = vconfig.LibvirtConfigGuestFilesys() fs.source_type = "mount" fs.source_dir = os.path.join( libvirt_utils.get_instance_path(instance), 'rootfs') devices.append(fs) else: if rescue: diskrescue = self.get_guest_disk_config(instance, 'disk.rescue', disk_mapping, inst_type) devices.append(diskrescue) diskos = self.get_guest_disk_config(instance, 'disk', disk_mapping, inst_type) devices.append(diskos) else: if 'disk' in disk_mapping: diskos = self.get_guest_disk_config(instance, 'disk', disk_mapping, inst_type) devices.append(diskos) if 'disk.local' in disk_mapping: disklocal = self.get_guest_disk_config(instance, 'disk.local', disk_mapping, inst_type) devices.append(disklocal) self.virtapi.instance_update( nova_context.get_admin_context(), instance['uuid'], {'default_ephemeral_device': block_device.prepend_dev(disklocal.target_dev)}) for idx, eph in enumerate( driver.block_device_info_get_ephemerals( block_device_info)): diskeph = self.get_guest_disk_config( instance, blockinfo.get_eph_disk(idx), disk_mapping, inst_type) devices.append(diskeph) if 'disk.swap' in disk_mapping: diskswap = self.get_guest_disk_config(instance, 'disk.swap', disk_mapping, inst_type) devices.append(diskswap) self.virtapi.instance_update( nova_context.get_admin_context(), instance['uuid'], {'default_swap_device': block_device.prepend_dev( diskswap.target_dev)}) for vol in block_device_mapping: connection_info = vol['connection_info'] vol_dev = block_device.prepend_dev(vol['mount_device']) info = disk_mapping[vol_dev] cfg = self.volume_driver_method('connect_volume', connection_info, info) devices.append(cfg) if 'disk.config' in disk_mapping: diskconfig = self.get_guest_disk_config(instance, 'disk.config', disk_mapping, inst_type, 'raw') devices.append(diskconfig) for d in devices: self.set_cache_mode(d) if (image_meta and image_meta.get('properties', {}).get('hw_scsi_model')): hw_scsi_model = image_meta['properties']['hw_scsi_model'] scsi_controller = vconfig.LibvirtConfigGuestController() scsi_controller.type = 'scsi' scsi_controller.model = hw_scsi_model devices.append(scsi_controller) return devices def get_guest_config_sysinfo(self, instance): sysinfo = vconfig.LibvirtConfigGuestSysinfo() sysinfo.system_manufacturer = version.vendor_string() sysinfo.system_product = version.product_string() sysinfo.system_version = version.version_string_with_package() sysinfo.system_serial = self.get_host_uuid() sysinfo.system_uuid = instance['uuid'] return sysinfo def get_guest_pci_device(self, pci_device): dbsf = pci_utils.parse_address(pci_device['address']) dev = vconfig.LibvirtConfigGuestHostdevPCI() dev.domain, dev.bus, dev.slot, dev.function = dbsf # only kvm support managed mode if CONF.libvirt.virt_type in ('xen',): dev.managed = 'no' if CONF.libvirt.virt_type in ('kvm', 'qemu'): dev.managed = 'yes' return dev def get_guest_config(self, instance, network_info, image_meta, disk_info, rescue=None, block_device_info=None): """Get config data for parameters. :param rescue: optional dictionary that should contain the key 'ramdisk_id' if a ramdisk is needed for the rescue image and 'kernel_id' if a kernel is needed for the rescue image. """ flavor = flavor_obj.Flavor.get_by_id( nova_context.get_admin_context(read_deleted='yes'), instance['instance_type_id']) inst_path = libvirt_utils.get_instance_path(instance) disk_mapping = disk_info['mapping'] CONSOLE = "console=tty0 console=ttyS0" guest = vconfig.LibvirtConfigGuest() guest.virt_type = CONF.libvirt.virt_type guest.name = instance['name'] guest.uuid = instance['uuid'] # We are using default unit for memory: KiB guest.memory = flavor.memory_mb * units.Ki guest.vcpus = flavor.vcpus guest.cpuset = CONF.vcpu_pin_set quota_items = ['cpu_shares', 'cpu_period', 'cpu_quota'] for key, value in flavor.extra_specs.iteritems(): scope = key.split(':') if len(scope) > 1 and scope[0] == 'quota': if scope[1] in quota_items: setattr(guest, scope[1], value) guest.cpu = self.get_guest_cpu_config() if 'root' in disk_mapping: root_device_name = block_device.prepend_dev( disk_mapping['root']['dev']) else: root_device_name = None if root_device_name: # NOTE(yamahata): # for nova.api.ec2.cloud.CloudController.get_metadata() self.virtapi.instance_update( nova_context.get_admin_context(), instance['uuid'], {'root_device_name': root_device_name}) guest.os_type = vm_mode.get_from_instance(instance) if guest.os_type is None: if CONF.libvirt.virt_type == "lxc": guest.os_type = vm_mode.EXE elif CONF.libvirt.virt_type == "uml": guest.os_type = vm_mode.UML elif CONF.libvirt.virt_type == "xen": guest.os_type = vm_mode.XEN else: guest.os_type = vm_mode.HVM if CONF.libvirt.virt_type == "xen" and guest.os_type == vm_mode.HVM: guest.os_loader = CONF.libvirt.xen_hvmloader_path if CONF.libvirt.virt_type in ("kvm", "qemu"): caps = self.get_host_capabilities() if caps.host.cpu.arch in ("i686", "x86_64"): guest.sysinfo = self.get_guest_config_sysinfo(instance) guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS() # The underlying machine type can be set as an image attribute, # or otherwise based on some architecture specific defaults if (image_meta is not None and image_meta.get('properties') and image_meta['properties'].get('hw_machine_type') is not None): guest.os_mach_type = \ image_meta['properties']['hw_machine_type'] else: # For ARM systems we will default to vexpress-a15 for armv7 # and virt for aarch64 if caps.host.cpu.arch == "armv7l": guest.os_mach_type = "vexpress-a15" if caps.host.cpu.arch == "aarch64": guest.os_mach_type = "virt" if CONF.libvirt.virt_type == "lxc": guest.os_init_path = "/sbin/init" guest.os_cmdline = CONSOLE elif CONF.libvirt.virt_type == "uml": guest.os_kernel = "/usr/bin/linux" guest.os_root = root_device_name else: if rescue: if rescue.get('kernel_id'): guest.os_kernel = os.path.join(inst_path, "kernel.rescue") if CONF.libvirt.virt_type == "xen": guest.os_cmdline = "ro root=%s" % root_device_name else: guest.os_cmdline = ("root=%s %s" % (root_device_name, CONSOLE)) if rescue.get('ramdisk_id'): guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue") elif instance['kernel_id']: guest.os_kernel = os.path.join(inst_path, "kernel") if CONF.libvirt.virt_type == "xen": guest.os_cmdline = "ro root=%s" % root_device_name else: guest.os_cmdline = ("root=%s %s" % (root_device_name, CONSOLE)) if instance['ramdisk_id']: guest.os_initrd = os.path.join(inst_path, "ramdisk") else: guest.os_boot_dev = blockinfo.get_boot_order(disk_info) if (image_meta and image_meta.get('properties', {}).get('os_command_line')): guest.os_cmdline = \ image_meta['properties'].get('os_command_line') if ((CONF.libvirt.virt_type != "lxc" and CONF.libvirt.virt_type != "uml")): guest.acpi = True guest.apic = True # NOTE(mikal): Microsoft Windows expects the clock to be in # "localtime". If the clock is set to UTC, then you can use a # registry key to let windows know, but Microsoft says this is # buggy in http://support.microsoft.com/kb/2687252 clk = vconfig.LibvirtConfigGuestClock() if instance['os_type'] == 'windows': LOG.info(_('Configuring timezone for windows instance to ' 'localtime'), instance=instance) clk.offset = 'localtime' else: clk.offset = 'utc' guest.set_clock(clk) if CONF.libvirt.virt_type == "kvm": # TODO(berrange) One day this should be per-guest # OS type configurable tmpit = vconfig.LibvirtConfigGuestTimer() tmpit.name = "pit" tmpit.tickpolicy = "delay" tmrtc = vconfig.LibvirtConfigGuestTimer() tmrtc.name = "rtc" tmrtc.tickpolicy = "catchup" tmhpet = vconfig.LibvirtConfigGuestTimer() tmhpet.name = "hpet" tmhpet.present = False clk.add_timer(tmpit) clk.add_timer(tmrtc) clk.add_timer(tmhpet) for cfg in self.get_guest_storage_config(instance, image_meta, disk_info, rescue, block_device_info, flavor): guest.add_device(cfg) for vif in network_info: cfg = self.vif_driver.get_config(instance, vif, image_meta, flavor) guest.add_device(cfg) if ((CONF.libvirt.virt_type == "qemu" or CONF.libvirt.virt_type == "kvm")): # The QEMU 'pty' driver throws away any data if no # client app is connected. Thus we can't get away # with a single type=pty console. Instead we have # to configure two separate consoles. consolelog = vconfig.LibvirtConfigGuestSerial() consolelog.type = "file" consolelog.source_path = self._get_console_log_path(instance) guest.add_device(consolelog) consolepty = vconfig.LibvirtConfigGuestSerial() consolepty.type = "pty" guest.add_device(consolepty) else: consolepty = vconfig.LibvirtConfigGuestConsole() consolepty.type = "pty" guest.add_device(consolepty) # We want a tablet if VNC is enabled, # or SPICE is enabled and the SPICE agent is disabled # NB: this implies that if both SPICE + VNC are enabled # at the same time, we'll get the tablet whether the # SPICE agent is used or not. need_usb_tablet = False if CONF.vnc_enabled: need_usb_tablet = CONF.libvirt.use_usb_tablet elif CONF.spice.enabled and not CONF.spice.agent_enabled: need_usb_tablet = CONF.libvirt.use_usb_tablet if need_usb_tablet and guest.os_type == vm_mode.HVM: tablet = vconfig.LibvirtConfigGuestInput() tablet.type = "tablet" tablet.bus = "usb" guest.add_device(tablet) if CONF.spice.enabled and CONF.spice.agent_enabled and \ CONF.libvirt.virt_type not in ('lxc', 'uml', 'xen'): channel = vconfig.LibvirtConfigGuestChannel() channel.target_name = "com.redhat.spice.0" guest.add_device(channel) # NB some versions of libvirt support both SPICE and VNC # at the same time. We're not trying to second guess which # those versions are. We'll just let libvirt report the # errors appropriately if the user enables both. add_video_driver = False if ((CONF.vnc_enabled and CONF.libvirt.virt_type not in ('lxc', 'uml'))): graphics = vconfig.LibvirtConfigGuestGraphics() graphics.type = "vnc" graphics.keymap = CONF.vnc_keymap graphics.listen = CONF.vncserver_listen guest.add_device(graphics) add_video_driver = True if CONF.spice.enabled and \ CONF.libvirt.virt_type not in ('lxc', 'uml', 'xen'): graphics = vconfig.LibvirtConfigGuestGraphics() graphics.type = "spice" graphics.keymap = CONF.spice.keymap graphics.listen = CONF.spice.server_listen guest.add_device(graphics) add_video_driver = True if add_video_driver: VALID_VIDEO_DEVICES = ("vga", "cirrus", "vmvga", "xen", "qxl") video = vconfig.LibvirtConfigGuestVideo() # NOTE(ldbragst): The following logic sets the video.type # depending on supported defaults given the architecture, # virtualization type, and features. The video.type attribute can # be overridden by the user with image_meta['properties'], which # is carried out in the next if statement below this one. arch = libvirt_utils.get_arch(image_meta) if guest.os_type == vm_mode.XEN: video.type = 'xen' elif arch in ('ppc', 'ppc64'): # NOTE(ldbragst): PowerKVM doesn't support 'cirrus' be default # so use 'vga' instead when running on Power hardware. video.type = 'vga' elif CONF.spice.enabled: video.type = 'qxl' meta_prop = image_meta.get('properties', {}) if image_meta else {} if meta_prop.get('hw_video_model'): video.type = meta_prop.get('hw_video_model') if (video.type not in VALID_VIDEO_DEVICES): raise exception.InvalidVideoMode(model=video.type) guest.add_device(video) # Qemu guest agent only support 'qemu' and 'kvm' hypervisor if CONF.libvirt.virt_type in ('qemu', 'kvm'): qga_enabled = False # Enable qga only if the 'hw_qemu_guest_agent' property is set if (image_meta is not None and image_meta.get('properties') and image_meta['properties'].get('hw_qemu_guest_agent') is not None): hw_qga = image_meta['properties']['hw_qemu_guest_agent'] if hw_qga.lower() == 'yes': LOG.debug(_("Qemu guest agent is enabled through image " "metadata"), instance=instance) qga_enabled = True if qga_enabled: qga = vconfig.LibvirtConfigGuestChannel() qga.type = "unix" qga.target_name = "org.qemu.guest_agent.0" qga.source_path = ("/var/lib/libvirt/qemu/%s.%s.sock" % ("org.qemu.guest_agent.0", instance['name'])) guest.add_device(qga) if CONF.libvirt.virt_type in ('xen', 'qemu', 'kvm'): for pci_dev in pci_manager.get_instance_pci_devs(instance): guest.add_device(self.get_guest_pci_device(pci_dev)) else: if len(pci_manager.get_instance_pci_devs(instance)) > 0: raise exception.PciDeviceUnsupportedHypervisor( type=CONF.libvirt.virt_type) return guest def to_xml(self, context, instance, network_info, disk_info, image_meta=None, rescue=None, block_device_info=None, write_to_disk=False): # We should get image metadata every time for generating xml if image_meta is None: (image_service, image_id) = glance.get_remote_image_service( context, instance['image_ref']) image_meta = compute_utils.get_image_metadata( context, image_service, image_id, instance) # NOTE(danms): Stringifying a NetworkInfo will take a lock. Do # this ahead of time so that we don't acquire it while also # holding the logging lock. network_info_str = str(network_info) LOG.debug(_('Start to_xml ' 'network_info=%(network_info)s ' 'disk_info=%(disk_info)s ' 'image_meta=%(image_meta)s rescue=%(rescue)s' 'block_device_info=%(block_device_info)s'), {'network_info': network_info_str, 'disk_info': disk_info, 'image_meta': image_meta, 'rescue': rescue, 'block_device_info': block_device_info}, instance=instance) conf = self.get_guest_config(instance, network_info, image_meta, disk_info, rescue, block_device_info) xml = conf.to_xml() if write_to_disk: instance_dir = libvirt_utils.get_instance_path(instance) xml_path = os.path.join(instance_dir, 'libvirt.xml') libvirt_utils.write_to_file(xml_path, xml) LOG.debug(_('End to_xml xml=%(xml)s'), {'xml': xml}, instance=instance) return xml def _lookup_by_id(self, instance_id): """Retrieve libvirt domain object given an instance id. All libvirt error handling should be handled in this method and relevant nova exceptions should be raised in response. """ try: return self._conn.lookupByID(instance_id) except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_DOMAIN: raise exception.InstanceNotFound(instance_id=instance_id) msg = (_("Error from libvirt while looking up %(instance_id)s: " "[Error Code %(error_code)s] %(ex)s") % {'instance_id': instance_id, 'error_code': error_code, 'ex': ex}) raise exception.NovaException(msg) def _lookup_by_name(self, instance_name): """Retrieve libvirt domain object given an instance name. All libvirt error handling should be handled in this method and relevant nova exceptions should be raised in response. """ try: return self._conn.lookupByName(instance_name) except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_DOMAIN: raise exception.InstanceNotFound(instance_id=instance_name) msg = (_('Error from libvirt while looking up %(instance_name)s: ' '[Error Code %(error_code)s] %(ex)s') % {'instance_name': instance_name, 'error_code': error_code, 'ex': ex}) raise exception.NovaException(msg) def get_info(self, instance): """Retrieve information from libvirt for a specific instance name. If a libvirt error is encountered during lookup, we might raise a NotFound exception or Error exception depending on how severe the libvirt error is. """ virt_dom = self._lookup_by_name(instance['name']) (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() return {'state': LIBVIRT_POWER_STATE[state], 'max_mem': max_mem, 'mem': mem, 'num_cpu': num_cpu, 'cpu_time': cpu_time, 'id': virt_dom.ID()} def _create_domain(self, xml=None, domain=None, instance=None, launch_flags=0, power_on=True): """Create a domain. Either domain or xml must be passed in. If both are passed, then the domain definition is overwritten from the xml. """ inst_path = None if instance: inst_path = libvirt_utils.get_instance_path(instance) if CONF.libvirt.virt_type == 'lxc': if not inst_path: inst_path = None container_dir = os.path.join(inst_path, 'rootfs') fileutils.ensure_tree(container_dir) image = self.image_backend.image(instance, 'disk') container_root_device = disk.setup_container(image.path, container_dir=container_dir, use_cow=CONF.use_cow_images) #Note(GuanQiang): save container root device name here, used for # detaching the linked image device when deleting # the lxc instance. if container_root_device: self.virtapi.instance_update( nova_context.get_admin_context(), instance['uuid'], {'root_device_name': container_root_device}) if xml: try: domain = self._conn.defineXML(xml) except Exception as e: LOG.error(_("An error occurred while trying to define a domain" " with xml: %s") % xml) raise e if power_on: try: domain.createWithFlags(launch_flags) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_("An error occurred while trying to launch a " "defined domain with xml: %s") % domain.XMLDesc(0)) try: self._enable_hairpin(domain.XMLDesc(0)) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_("An error occurred while enabling hairpin mode on " "domain with xml: %s") % domain.XMLDesc(0)) # NOTE(uni): Now the container is running with its own private mount # namespace and so there is no need to keep the container rootfs # mounted in the host namespace if CONF.libvirt.virt_type == 'lxc': state = self.get_info(instance)['state'] container_dir = os.path.join(inst_path, 'rootfs') if state == power_state.RUNNING: disk.clean_lxc_namespace(container_dir=container_dir) else: disk.teardown_container(container_dir=container_dir) return domain def _create_domain_and_network(self, context, xml, instance, network_info, block_device_info=None, power_on=True, reboot=False): """Do required network setup and create domain.""" block_device_mapping = driver.block_device_info_get_mapping( block_device_info) for vol in block_device_mapping: connection_info = vol['connection_info'] disk_info = blockinfo.get_info_from_bdm( CONF.libvirt.virt_type, vol) conf = self.volume_driver_method('connect_volume', connection_info, disk_info) # cache device_path in connection_info -- required by encryptors if (not reboot and 'data' in connection_info and 'volume_id' in connection_info['data']): connection_info['data']['device_path'] = conf.source_path vol['connection_info'] = connection_info vol.save(context) volume_id = connection_info['data']['volume_id'] encryption = encryptors.get_encryption_metadata( context, self._volume_api, volume_id, connection_info) if encryption: encryptor = self._get_volume_encryptor(connection_info, encryption) encryptor.attach_volume(context, **encryption) self.plug_vifs(instance, network_info) self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) domain = self._create_domain(xml, instance=instance, power_on=power_on) self.firewall_driver.apply_instance_filter(instance, network_info) return domain def get_all_block_devices(self): """ Return all block devices in use on this node. """ devices = [] for dom_id in self.list_instance_ids(): try: domain = self._lookup_by_id(dom_id) doc = etree.fromstring(domain.XMLDesc(0)) except exception.InstanceNotFound: LOG.info(_("libvirt can't find a domain with id: %s") % dom_id) continue except Exception: continue ret = doc.findall('./devices/disk') for node in ret: if node.get('type') != 'block': continue for child in node.getchildren(): if child.tag == 'source': devices.append(child.get('dev')) return devices def get_disks(self, instance_name): """ Note that this function takes an instance name. Returns a list of all block devices for this domain. """ domain = self._lookup_by_name(instance_name) xml = domain.XMLDesc(0) try: doc = etree.fromstring(xml) except Exception: return [] return filter(bool, [target.get("dev") for target in doc.findall('devices/disk/target')]) def get_interfaces(self, xml): """ Note that this function takes a domain xml. Returns a list of all network interfaces for this instance. """ doc = None try: doc = etree.fromstring(xml) except Exception: return [] interfaces = [] ret = doc.findall('./devices/interface') for node in ret: devdst = None for child in list(node): if child.tag == 'target': devdst = child.attrib['dev'] if devdst is None: continue interfaces.append(devdst) return interfaces def get_vcpu_total(self): """Get available vcpu number of physical computer. :returns: the number of cpu core instances can be used. """ if self._vcpu_total != 0: return self._vcpu_total try: total_pcpus = self._conn.getInfo()[2] except libvirt.libvirtError: LOG.warn(_("Cannot get the number of cpu, because this " "function is not implemented for this platform. ")) return 0 if CONF.vcpu_pin_set is None: self._vcpu_total = total_pcpus return self._vcpu_total available_ids = cpu.get_cpuset_ids() if available_ids[-1] >= total_pcpus: raise exception.Invalid(_("Invalid vcpu_pin_set config, " "out of hypervisor cpu range.")) self._vcpu_total = len(available_ids) return self._vcpu_total def get_memory_mb_total(self): """Get the total memory size(MB) of physical computer. :returns: the total amount of memory(MB). """ return self._conn.getInfo()[1] @staticmethod def get_local_gb_info(): """Get local storage info of the compute node in GB. :returns: A dict containing: :total: How big the overall usable filesystem is (in gigabytes) :free: How much space is free (in gigabytes) :used: How much space is used (in gigabytes) """ if CONF.libvirt.images_type == 'lvm': info = libvirt_utils.get_volume_group_info( CONF.libvirt.images_volume_group) else: info = libvirt_utils.get_fs_info(CONF.instances_path) for (k, v) in info.iteritems(): info[k] = v / units.Gi return info def get_vcpu_used(self): """Get vcpu usage number of physical computer. :returns: The total number of vcpu that currently used. """ total = 0 if CONF.libvirt.virt_type == 'lxc': return total + 1 dom_ids = self.list_instance_ids() for dom_id in dom_ids: try: dom = self._lookup_by_id(dom_id) try: vcpus = dom.vcpus() except libvirt.libvirtError as e: LOG.warn(_("couldn't obtain the vpu count from domain id:" " %(id)s, exception: %(ex)s") % {"id": dom_id, "ex": e}) else: total += len(vcpus[1]) except exception.InstanceNotFound: LOG.info(_("libvirt can't find a domain with id: %s") % dom_id) continue # NOTE(gtt116): give change to do other task. greenthread.sleep(0) return total def get_memory_mb_used(self): """Get the free memory size(MB) of physical computer. :returns: the total usage of memory(MB). """ if sys.platform.upper() not in ['LINUX2', 'LINUX3']: return 0 m = open('/proc/meminfo').read().split() idx1 = m.index('MemFree:') idx2 = m.index('Buffers:') idx3 = m.index('Cached:') if CONF.libvirt.virt_type == 'xen': used = 0 for domain_id in self.list_instance_ids(): try: dom_mem = int(self._lookup_by_id(domain_id).info()[2]) except exception.InstanceNotFound: LOG.info(_("libvirt can't find a domain with id: %s") % domain_id) continue # skip dom0 if domain_id != 0: used += dom_mem else: # the mem reported by dom0 is be greater of what # it is being used used += (dom_mem - (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))) # Convert it to MB return used / units.Ki else: avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])) # Convert it to MB return self.get_memory_mb_total() - avail / units.Ki def get_hypervisor_type(self): """Get hypervisor type. :returns: hypervisor type (ex. qemu) """ return self._conn.getType() def get_hypervisor_version(self): """Get hypervisor version. :returns: hypervisor version (ex. 12003) """ # NOTE(justinsb): getVersion moved between libvirt versions # Trying to do be compatible with older versions is a lost cause # But ... we can at least give the user a nice message method = getattr(self._conn, 'getVersion', None) if method is None: raise exception.NovaException(_("libvirt version is too old" " (does not support getVersion)")) # NOTE(justinsb): If we wanted to get the version, we could: # method = getattr(libvirt, 'getVersion', None) # NOTE(justinsb): This would then rely on a proper version check return method() def get_hypervisor_hostname(self): """Returns the hostname of the hypervisor.""" hostname = self._conn.getHostname() if not hasattr(self, '_hypervisor_hostname'): self._hypervisor_hostname = hostname elif hostname != self._hypervisor_hostname: LOG.error(_('Hostname has changed from %(old)s ' 'to %(new)s. A restart is required to take effect.' ) % {'old': self._hypervisor_hostname, 'new': hostname}) return self._hypervisor_hostname def get_instance_capabilities(self): """Get hypervisor instance capabilities Returns a list of tuples that describe instances the hypervisor is capable of hosting. Each tuple consists of the triplet (arch, hypervisor_type, vm_mode). :returns: List of tuples describing instance capabilities """ caps = self.get_host_capabilities() instance_caps = list() for g in caps.guests: for dt in g.domtype: instance_cap = (g.arch, dt, g.ostype) instance_caps.append(instance_cap) return instance_caps def get_cpu_info(self): """Get cpuinfo information. Obtains cpu feature from virConnect.getCapabilities, and returns as a json string. :return: see above description """ caps = self.get_host_capabilities() cpu_info = dict() cpu_info['arch'] = caps.host.cpu.arch cpu_info['model'] = caps.host.cpu.model cpu_info['vendor'] = caps.host.cpu.vendor topology = dict() topology['sockets'] = caps.host.cpu.sockets topology['cores'] = caps.host.cpu.cores topology['threads'] = caps.host.cpu.threads cpu_info['topology'] = topology features = list() for f in caps.host.cpu.features: features.append(f.name) cpu_info['features'] = features # TODO(berrange): why do we bother converting the # libvirt capabilities XML into a special JSON format ? # The data format is different across all the drivers # so we could just return the raw capabilities XML # which 'compare_cpu' could use directly # # That said, arch_filter.py now seems to rely on # the libvirt drivers format which suggests this # data format needs to be standardized across drivers return jsonutils.dumps(cpu_info) def _get_pcidev_info(self, devname): """Returns a dict of PCI device.""" def _get_device_type(cfgdev): """Get a PCI device's device type. An assignable PCI device can be a normal PCI device, a SR-IOV Physical Function (PF), or a SR-IOV Virtual Function (VF). Only normal PCI devices or SR-IOV VFs are assignable, while SR-IOV PFs are always owned by hypervisor. Please notice that a PCI device with SR-IOV capability but not enabled is reported as normal PCI device. """ for fun_cap in cfgdev.pci_capability.fun_capability: if len(fun_cap.device_addrs) != 0: if fun_cap.type == 'virt_functions': return {'dev_type': 'type-PF'} if fun_cap.type == 'phys_function': phys_address = "%s:%s:%s.%s" % ( fun_cap.device_addrs[0][0].replace("0x", ''), fun_cap.device_addrs[0][1].replace("0x", ''), fun_cap.device_addrs[0][2].replace("0x", ''), fun_cap.device_addrs[0][3].replace("0x", '')) return {'dev_type': 'type-VF', 'phys_function': phys_address} return {'dev_type': 'type-PCI'} virtdev = self._conn.nodeDeviceLookupByName(devname) xmlstr = virtdev.XMLDesc(0) cfgdev = vconfig.LibvirtConfigNodeDevice() cfgdev.parse_str(xmlstr) address = "%04x:%02x:%02x.%1x" % ( cfgdev.pci_capability.domain, cfgdev.pci_capability.bus, cfgdev.pci_capability.slot, cfgdev.pci_capability.function) device = { "dev_id": cfgdev.name, "address": address, "product_id": cfgdev.pci_capability.product_id[2:6], "vendor_id": cfgdev.pci_capability.vendor_id[2:6], } #requirement by DataBase Model device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device device.update(_get_device_type(cfgdev)) return device def _pci_device_assignable(self, device): if device['dev_type'] == 'type-PF': return False return self.dev_filter.device_assignable(device) def get_pci_passthrough_devices(self): """Get host pci devices information. Obtains pci devices information from libvirt, and returns as a json string. Each device information is a dictionary, with mandatory keys of 'address', 'vendor_id', 'product_id', 'dev_type', 'dev_id', 'label' and other optional device specific information. Refer to the objects/pci_device.py for more idea of these keys. :returns: a list of the assignable pci devices information """ pci_info = [] dev_names = self._conn.listDevices('pci', 0) or [] for name in dev_names: pci_dev = self._get_pcidev_info(name) if self._pci_device_assignable(pci_dev): pci_info.append(pci_dev) return jsonutils.dumps(pci_info) def get_all_volume_usage(self, context, compute_host_bdms): """Return usage info for volumes attached to vms on a given host. """ vol_usage = [] for instance_bdms in compute_host_bdms: instance = instance_bdms['instance'] for bdm in instance_bdms['instance_bdms']: vol_stats = [] mountpoint = bdm['device_name'] if mountpoint.startswith('/dev/'): mountpoint = mountpoint[5:] volume_id = bdm['volume_id'] LOG.debug(_("Trying to get stats for the volume %s"), volume_id) vol_stats = self.block_stats(instance['name'], mountpoint) if vol_stats: stats = dict(volume=volume_id, instance=instance, rd_req=vol_stats[0], rd_bytes=vol_stats[1], wr_req=vol_stats[2], wr_bytes=vol_stats[3], flush_operations=vol_stats[4]) LOG.debug( _("Got volume usage stats for the volume=%(volume)s," " rd_req=%(rd_req)d, rd_bytes=%(rd_bytes)d, " "wr_req=%(wr_req)d, wr_bytes=%(wr_bytes)d"), stats, instance=instance) vol_usage.append(stats) return vol_usage def block_stats(self, instance_name, disk): """ Note that this function takes an instance name. """ try: domain = self._lookup_by_name(instance_name) return domain.blockStats(disk) except libvirt.libvirtError as e: errcode = e.get_error_code() LOG.info(_('Getting block stats failed, device might have ' 'been detached. Instance=%(instance_name)s ' 'Disk=%(disk)s Code=%(errcode)s Error=%(e)s'), {'instance_name': instance_name, 'disk': disk, 'errcode': errcode, 'e': e}) except exception.InstanceNotFound: LOG.info(_('Could not find domain in libvirt for instance %s. ' 'Cannot get block stats for device'), instance_name) def interface_stats(self, instance_name, interface): """ Note that this function takes an instance name. """ domain = self._lookup_by_name(instance_name) return domain.interfaceStats(interface) def get_console_pool_info(self, console_type): #TODO(mdragon): console proxy should be implemented for libvirt, # in case someone wants to use it with kvm or # such. For now return fake data. return {'address': '127.0.0.1', 'username': 'fakeuser', 'password': 'fakepassword'} def refresh_security_group_rules(self, security_group_id): self.firewall_driver.refresh_security_group_rules(security_group_id) def refresh_security_group_members(self, security_group_id): self.firewall_driver.refresh_security_group_members(security_group_id) def refresh_instance_security_rules(self, instance): self.firewall_driver.refresh_instance_security_rules(instance) def refresh_provider_fw_rules(self): self.firewall_driver.refresh_provider_fw_rules() def get_available_resource(self, nodename): """Retrieve resource information. This method is called when nova-compute launches, and as part of a periodic task that records the results in the DB. :param nodename: will be put in PCI device :returns: dictionary containing resource info """ # Temporary: convert supported_instances into a string, while keeping # the RPC version as JSON. Can be changed when RPC broadcast is removed stats = self.get_host_stats(refresh=True) stats['supported_instances'] = jsonutils.dumps( stats['supported_instances']) return stats def check_instance_shared_storage_local(self, context, instance): dirpath = libvirt_utils.get_instance_path(instance) if not os.path.exists(dirpath): return None fd, tmp_file = tempfile.mkstemp(dir=dirpath) LOG.debug(_("Creating tmpfile %s to verify with other " "compute node that the instance is on " "the same shared storage."), tmp_file, instance=instance) os.close(fd) return {"filename": tmp_file} def check_instance_shared_storage_remote(self, context, data): return os.path.exists(data['filename']) def check_instance_shared_storage_cleanup(self, context, data): fileutils.delete_if_exists(data["filename"]) def check_can_live_migrate_destination(self, context, instance, src_compute_info, dst_compute_info, block_migration=False, disk_over_commit=False): """Check if it is possible to execute live migration. This runs checks on the destination host, and then calls back to the source host to check the results. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance :param block_migration: if true, prepare for block migration :param disk_over_commit: if true, allow disk over commit :returns: a dict containing: :filename: name of the tmpfile under CONF.instances_path :block_migration: whether this is block migration :disk_over_commit: disk-over-commit factor on dest host :disk_available_mb: available disk space on dest host """ disk_available_mb = None if block_migration: disk_available_gb = dst_compute_info['disk_available_least'] disk_available_mb = \ (disk_available_gb * units.Ki) - CONF.reserved_host_disk_mb # Compare CPU source_cpu_info = src_compute_info['cpu_info'] self._compare_cpu(source_cpu_info) # Create file on storage, to be checked on source host filename = self._create_shared_storage_test_file() return {"filename": filename, "block_migration": block_migration, "disk_over_commit": disk_over_commit, "disk_available_mb": disk_available_mb} def check_can_live_migrate_destination_cleanup(self, context, dest_check_data): """Do required cleanup on dest host after check_can_live_migrate calls :param context: security context """ filename = dest_check_data["filename"] self._cleanup_shared_storage_test_file(filename) def check_can_live_migrate_source(self, context, instance, dest_check_data): """Check if it is possible to execute live migration. This checks if the live migration can succeed, based on the results from check_can_live_migrate_destination. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance :param dest_check_data: result of check_can_live_migrate_destination :returns: a dict containing migration info """ # Checking shared storage connectivity # if block migration, instances_paths should not be on shared storage. source = CONF.host filename = dest_check_data["filename"] block_migration = dest_check_data["block_migration"] is_volume_backed = dest_check_data.get('is_volume_backed', False) has_local_disks = bool( jsonutils.loads(self.get_instance_disk_info(instance['name']))) shared = self._check_shared_storage_test_file(filename) if block_migration: if shared: reason = _("Block migration can not be used " "with shared storage.") raise exception.InvalidLocalStorage(reason=reason, path=source) self._assert_dest_node_has_enough_disk(context, instance, dest_check_data['disk_available_mb'], dest_check_data['disk_over_commit']) elif not shared and (not is_volume_backed or has_local_disks): reason = _("Live migration can not be used " "without shared storage.") raise exception.InvalidSharedStorage(reason=reason, path=source) dest_check_data.update({"is_shared_storage": shared}) # NOTE(mikal): include the instance directory name here because it # doesn't yet exist on the destination but we want to force that # same name to be used instance_path = libvirt_utils.get_instance_path(instance, relative=True) dest_check_data['instance_relative_path'] = instance_path return dest_check_data def _assert_dest_node_has_enough_disk(self, context, instance, available_mb, disk_over_commit): """Checks if destination has enough disk for block migration.""" # Libvirt supports qcow2 disk format,which is usually compressed # on compute nodes. # Real disk image (compressed) may enlarged to "virtual disk size", # that is specified as the maximum disk size. # (See qemu-img -f path-to-disk) # Scheduler recognizes destination host still has enough disk space # if real disk size < available disk size # if disk_over_commit is True, # otherwise virtual disk size < available disk size. available = 0 if available_mb: available = available_mb * units.Mi ret = self.get_instance_disk_info(instance['name']) disk_infos = jsonutils.loads(ret) necessary = 0 if disk_over_commit: for info in disk_infos: necessary += int(info['disk_size']) else: for info in disk_infos: necessary += int(info['virt_disk_size']) # Check that available disk > necessary disk if (available - necessary) < 0: reason = (_('Unable to migrate %(instance_uuid)s: ' 'Disk of instance is too large(available' ' on destination host:%(available)s ' '< need:%(necessary)s)') % {'instance_uuid': instance['uuid'], 'available': available, 'necessary': necessary}) raise exception.MigrationPreCheckError(reason=reason) def _compare_cpu(self, cpu_info): """Checks the host cpu is compatible to a cpu given by xml. "xml" must be a part of libvirt.openAuth(...).getCapabilities(). return values follows by virCPUCompareResult. if 0 > return value, do live migration. 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult' :param cpu_info: json string that shows cpu feature(see get_cpu_info()) :returns: None. if given cpu info is not compatible to this server, raise exception. """ # NOTE(berendt): virConnectCompareCPU not working for Xen if CONF.libvirt.virt_type == 'xen': return 1 info = jsonutils.loads(cpu_info) LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info) cpu = vconfig.LibvirtConfigCPU() cpu.arch = info['arch'] cpu.model = info['model'] cpu.vendor = info['vendor'] cpu.sockets = info['topology']['sockets'] cpu.cores = info['topology']['cores'] cpu.threads = info['topology']['threads'] for f in info['features']: cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f)) u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult" m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s") # unknown character exists in xml, then libvirt complains try: ret = self._conn.compareCPU(cpu.to_xml(), 0) except libvirt.libvirtError as e: with excutils.save_and_reraise_exception(): ret = unicode(e) LOG.error(m, {'ret': ret, 'u': u}) if ret <= 0: LOG.error(m, {'ret': ret, 'u': u}) raise exception.InvalidCPUInfo(reason=m % {'ret': ret, 'u': u}) def _create_shared_storage_test_file(self): """Makes tmpfile under CONF.instances_path.""" dirpath = CONF.instances_path fd, tmp_file = tempfile.mkstemp(dir=dirpath) LOG.debug(_("Creating tmpfile %s to notify to other " "compute nodes that they should mount " "the same storage.") % tmp_file) os.close(fd) return os.path.basename(tmp_file) def _check_shared_storage_test_file(self, filename): """Confirms existence of the tmpfile under CONF.instances_path. Cannot confirm tmpfile return False. """ tmp_file = os.path.join(CONF.instances_path, filename) if not os.path.exists(tmp_file): return False else: return True def _cleanup_shared_storage_test_file(self, filename): """Removes existence of the tmpfile under CONF.instances_path.""" tmp_file = os.path.join(CONF.instances_path, filename) os.remove(tmp_file) def ensure_filtering_rules_for_instance(self, instance, network_info, time_module=None): """Ensure that an instance's filtering rules are enabled. When migrating an instance, we need the filtering rules to be configured on the destination host before starting the migration. Also, when restarting the compute service, we need to ensure that filtering rules exist for all running services. """ if not time_module: time_module = greenthread self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) # nwfilters may be defined in a separate thread in the case # of libvirt non-blocking mode, so we wait for completion timeout_count = range(CONF.live_migration_retry_count) while timeout_count: if self.firewall_driver.instance_filter_exists(instance, network_info): break timeout_count.pop() if len(timeout_count) == 0: msg = _('The firewall filter for %s does not exist') raise exception.NovaException(msg % instance["name"]) time_module.sleep(1) def filter_defer_apply_on(self): self.firewall_driver.filter_defer_apply_on() def filter_defer_apply_off(self): self.firewall_driver.filter_defer_apply_off() def live_migration(self, context, instance, dest, post_method, recover_method, block_migration=False, migrate_data=None): """Spawning live_migration operation for distributing high-load. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param dest: destination host :param block_migration: destination host :param post_method: post operation method. expected nova.compute.manager.post_live_migration. :param recover_method: recovery method when any exception occurs. expected nova.compute.manager.recover_live_migration. :param block_migration: if true, do block migration. :param migrate_data: implementation specific params """ greenthread.spawn(self._live_migration, context, instance, dest, post_method, recover_method, block_migration, migrate_data) def _live_migration(self, context, instance, dest, post_method, recover_method, block_migration=False, migrate_data=None): """Do live migration. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param dest: destination host :param post_method: post operation method. expected nova.compute.manager.post_live_migration. :param recover_method: recovery method when any exception occurs. expected nova.compute.manager.recover_live_migration. :param migrate_data: implementation specific params """ # Do live migration. try: if block_migration: flaglist = CONF.libvirt.block_migration_flag.split(',') else: flaglist = CONF.libvirt.live_migration_flag.split(',') flagvals = [getattr(libvirt, x.strip()) for x in flaglist] logical_sum = reduce(lambda x, y: x | y, flagvals) dom = self._lookup_by_name(instance["name"]) dom.migrateToURI(CONF.libvirt.live_migration_uri % dest, logical_sum, None, CONF.libvirt.live_migration_bandwidth) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_("Live Migration failure: %s"), e, instance=instance) recover_method(context, instance, dest, block_migration) # Waiting for completion of live_migration. timer = loopingcall.FixedIntervalLoopingCall(f=None) def wait_for_live_migration(): """waiting for live migration completion.""" try: self.get_info(instance)['state'] except exception.InstanceNotFound: timer.stop() post_method(context, instance, dest, block_migration, migrate_data) timer.f = wait_for_live_migration timer.start(interval=0.5).wait() def _fetch_instance_kernel_ramdisk(self, context, instance): """Download kernel and ramdisk for instance in instance directory.""" instance_dir = libvirt_utils.get_instance_path(instance) if instance['kernel_id']: libvirt_utils.fetch_image(context, os.path.join(instance_dir, 'kernel'), instance['kernel_id'], instance['user_id'], instance['project_id']) if instance['ramdisk_id']: libvirt_utils.fetch_image(context, os.path.join(instance_dir, 'ramdisk'), instance['ramdisk_id'], instance['user_id'], instance['project_id']) def rollback_live_migration_at_destination(self, context, instance, network_info, block_device_info): """Clean up destination node after a failed live migration.""" self.destroy(context, instance, network_info, block_device_info) def pre_live_migration(self, context, instance, block_device_info, network_info, disk_info, migrate_data=None): """Preparation live migration.""" # Steps for volume backed instance live migration w/o shared storage. is_shared_storage = True is_volume_backed = False is_block_migration = True instance_relative_path = None if migrate_data: is_shared_storage = migrate_data.get('is_shared_storage', True) is_volume_backed = migrate_data.get('is_volume_backed', False) is_block_migration = migrate_data.get('block_migration', True) instance_relative_path = migrate_data.get('instance_relative_path') if not is_shared_storage: # NOTE(mikal): this doesn't use libvirt_utils.get_instance_path # because we are ensuring that the same instance directory name # is used as was at the source if instance_relative_path: instance_dir = os.path.join(CONF.instances_path, instance_relative_path) else: instance_dir = libvirt_utils.get_instance_path(instance) if os.path.exists(instance_dir): raise exception.DestinationDiskExists(path=instance_dir) os.mkdir(instance_dir) # Ensure images and backing files are present. self._create_images_and_backing(context, instance, instance_dir, disk_info) if is_volume_backed and not (is_block_migration or is_shared_storage): # Touch the console.log file, required by libvirt. console_file = self._get_console_log_path(instance) libvirt_utils.file_open(console_file, 'a').close() # if image has kernel and ramdisk, just download # following normal way. self._fetch_instance_kernel_ramdisk(context, instance) # Establishing connection to volume server. block_device_mapping = driver.block_device_info_get_mapping( block_device_info) for vol in block_device_mapping: connection_info = vol['connection_info'] disk_info = blockinfo.get_info_from_bdm( CONF.libvirt.virt_type, vol) self.volume_driver_method('connect_volume', connection_info, disk_info) # We call plug_vifs before the compute manager calls # ensure_filtering_rules_for_instance, to ensure bridge is set up # Retry operation is necessary because continuously request comes, # concurrent request occurs to iptables, then it complains. max_retry = CONF.live_migration_retry_count for cnt in range(max_retry): try: self.plug_vifs(instance, network_info) break except processutils.ProcessExecutionError: if cnt == max_retry - 1: raise else: LOG.warn(_('plug_vifs() failed %(cnt)d. Retry up to ' '%(max_retry)d.'), {'cnt': cnt, 'max_retry': max_retry}, instance=instance) greenthread.sleep(1) def _create_images_and_backing(self, context, instance, instance_dir, disk_info_json): """ :param context: security context :param instance: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param instance_dir: instance path to use, calculated externally to handle block migrating an instance with an old style instance path :param disk_info_json: json strings specified in get_instance_disk_info """ if not disk_info_json: disk_info = [] else: disk_info = jsonutils.loads(disk_info_json) for info in disk_info: base = os.path.basename(info['path']) # Get image type and create empty disk image, and # create backing file in case of qcow2. instance_disk = os.path.join(instance_dir, base) if not info['backing_file'] and not os.path.exists(instance_disk): libvirt_utils.create_image(info['type'], instance_disk, info['virt_disk_size']) elif info['backing_file']: # Creating backing file follows same way as spawning instances. cache_name = os.path.basename(info['backing_file']) image = self.image_backend.image(instance, instance_disk, CONF.libvirt.images_type) if cache_name.startswith('ephemeral'): image.cache(fetch_func=self._create_ephemeral, fs_label=cache_name, os_type=instance["os_type"], filename=cache_name, size=info['virt_disk_size'], ephemeral_size=instance['ephemeral_gb']) elif cache_name.startswith('swap'): inst_type = flavors.extract_flavor(instance) swap_mb = inst_type['swap'] image.cache(fetch_func=self._create_swap, filename="swap_%s" % swap_mb, size=swap_mb * units.Mi, swap_mb=swap_mb) else: image.cache(fetch_func=libvirt_utils.fetch_image, context=context, filename=cache_name, image_id=instance['image_ref'], user_id=instance['user_id'], project_id=instance['project_id'], size=info['virt_disk_size']) # if image has kernel and ramdisk, just download # following normal way. self._fetch_instance_kernel_ramdisk(context, instance) def post_live_migration(self, context, instance, block_device_info, migrate_data=None): # Disconnect from volume server block_device_mapping = driver.block_device_info_get_mapping( block_device_info) for vol in block_device_mapping: connection_info = vol['connection_info'] disk_dev = vol['mount_device'].rpartition("/")[2] self.volume_driver_method('disconnect_volume', connection_info, disk_dev) def post_live_migration_at_destination(self, context, instance, network_info, block_migration, block_device_info=None): """Post operation of live migration at destination host. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param network_info: instance network information :param block_migration: if true, post operation of block_migration. """ # Define migrated instance, otherwise, suspend/destroy does not work. dom_list = self._conn.listDefinedDomains() if instance["name"] not in dom_list: # In case of block migration, destination does not have # libvirt.xml disk_info = blockinfo.get_disk_info( CONF.libvirt.virt_type, instance, block_device_info) xml = self.to_xml(context, instance, network_info, disk_info, block_device_info=block_device_info, write_to_disk=True) self._conn.defineXML(xml) def get_instance_disk_info(self, instance_name, xml=None, block_device_info=None): """Retrieve information about actual disk sizes of an instance. :param instance_name: name of a nova instance as returned by list_instances() :param xml: Optional; Domain XML of given libvirt instance. If omitted, this method attempts to extract it from the pre-existing definition. :param block_device_info: Optional; Can be used to filter out devices which are actually volumes. :return: json strings with below format:: "[{'path':'disk', 'type':'raw', 'virt_disk_size':'10737418240', 'backing_file':'backing_file', 'disk_size':'83886080'},...]" """ if xml is None: try: virt_dom = self._lookup_by_name(instance_name) xml = virt_dom.XMLDesc(0) except libvirt.libvirtError as ex: error_code = ex.get_error_code() msg = (_('Error from libvirt while getting description of ' '%(instance_name)s: [Error Code %(error_code)s] ' '%(ex)s') % {'instance_name': instance_name, 'error_code': error_code, 'ex': ex}) LOG.warn(msg) raise exception.InstanceNotFound(instance_id=instance_name) block_device_mapping = driver.block_device_info_get_mapping( block_device_info) volume_devices = set() for vol in block_device_mapping: disk_dev = vol['mount_device'].rpartition("/")[2] volume_devices.add(disk_dev) disk_info = [] doc = etree.fromstring(xml) disk_nodes = doc.findall('.//devices/disk') path_nodes = doc.findall('.//devices/disk/source') driver_nodes = doc.findall('.//devices/disk/driver') target_nodes = doc.findall('.//devices/disk/target') for cnt, path_node in enumerate(path_nodes): disk_type = disk_nodes[cnt].get('type') path = path_node.get('file') target = target_nodes[cnt].attrib['dev'] if not path: LOG.debug(_('skipping disk for %s as it does not have a path'), instance_name) continue if disk_type != 'file': LOG.debug(_('skipping %s since it looks like volume'), path) continue if target in volume_devices: LOG.debug(_('skipping disk %(path)s (%(target)s) as it is a ' 'volume'), {'path': path, 'target': target}) continue # get the real disk size or # raise a localized error if image is unavailable dk_size = int(os.path.getsize(path)) disk_type = driver_nodes[cnt].get('type') if disk_type == "qcow2": backing_file = libvirt_utils.get_disk_backing_file(path) virt_size = disk.get_disk_size(path) over_commit_size = int(virt_size) - dk_size else: backing_file = "" virt_size = dk_size over_commit_size = 0 disk_info.append({'type': disk_type, 'path': path, 'virt_disk_size': virt_size, 'backing_file': backing_file, 'disk_size': dk_size, 'over_committed_disk_size': over_commit_size}) return jsonutils.dumps(disk_info) def get_disk_over_committed_size_total(self): """Return total over committed disk size for all instances.""" # Disk size that all instance uses : virtual_size - disk_size instances_name = self.list_instances() disk_over_committed_size = 0 for i_name in instances_name: try: disk_infos = jsonutils.loads( self.get_instance_disk_info(i_name)) for info in disk_infos: disk_over_committed_size += int( info['over_committed_disk_size']) except OSError as e: if e.errno == errno.ENOENT: LOG.warning(_('Periodic task is updating the host stat, ' 'it is trying to get disk %(i_name)s, ' 'but disk file was removed by concurrent ' 'operations such as resize.'), {'i_name': i_name}) else: raise except exception.InstanceNotFound: # Instance was deleted during the check so ignore it pass # NOTE(gtt116): give change to do other task. greenthread.sleep(0) return disk_over_committed_size def unfilter_instance(self, instance, network_info): """See comments of same method in firewall_driver.""" self.firewall_driver.unfilter_instance(instance, network_info=network_info) def get_host_stats(self, refresh=False): """Return the current state of the host. If 'refresh' is True, run update the stats first. """ return self.host_state.get_host_stats(refresh=refresh) def get_host_cpu_stats(self): """Return the current CPU state of the host.""" # Extract node's CPU statistics. stats = self._conn.getCPUStats(libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0) # getInfo() returns various information about the host node # No. 3 is the expected CPU frequency. stats["frequency"] = self._conn.getInfo()[3] return stats def get_host_uptime(self, host): """Returns the result of calling "uptime".""" #NOTE(dprince): host seems to be ignored for this call and in # other compute drivers as well. Perhaps we should remove it? out, err = utils.execute('env', 'LANG=C', 'uptime') return out def manage_image_cache(self, context, all_instances): """Manage the local cache of images.""" self.image_cache_manager.update(context, all_instances) def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize, shared_storage=False): """Used only for cleanup in case migrate_disk_and_power_off fails.""" try: if os.path.exists(inst_base_resize): utils.execute('rm', '-rf', inst_base) utils.execute('mv', inst_base_resize, inst_base) if not shared_storage: utils.execute('ssh', dest, 'rm', '-rf', inst_base) except Exception: pass def _is_storage_shared_with(self, dest, inst_base): # NOTE (rmk): There are two methods of determining whether we are # on the same filesystem: the source and dest IP are the # same, or we create a file on the dest system via SSH # and check whether the source system can also see it. shared_storage = (dest == self.get_host_ip_addr()) if not shared_storage: tmp_file = uuid.uuid4().hex + '.tmp' tmp_path = os.path.join(inst_base, tmp_file) try: utils.execute('ssh', dest, 'touch', tmp_path) if os.path.exists(tmp_path): shared_storage = True os.unlink(tmp_path) else: utils.execute('ssh', dest, 'rm', tmp_path) except Exception: pass return shared_storage def migrate_disk_and_power_off(self, context, instance, dest, flavor, network_info, block_device_info=None): LOG.debug(_("Starting migrate_disk_and_power_off"), instance=instance) disk_info_text = self.get_instance_disk_info(instance['name'], block_device_info=block_device_info) disk_info = jsonutils.loads(disk_info_text) # copy disks to destination # rename instance dir to +_resize at first for using # shared storage for instance dir (eg. NFS). inst_base = libvirt_utils.get_instance_path(instance) inst_base_resize = inst_base + "_resize" shared_storage = self._is_storage_shared_with(dest, inst_base) # try to create the directory on the remote compute node # if this fails we pass the exception up the stack so we can catch # failures here earlier if not shared_storage: utils.execute('ssh', dest, 'mkdir', '-p', inst_base) self.power_off(instance) block_device_mapping = driver.block_device_info_get_mapping( block_device_info) for vol in block_device_mapping: connection_info = vol['connection_info'] disk_dev = vol['mount_device'].rpartition("/")[2] self.volume_driver_method('disconnect_volume', connection_info, disk_dev) try: utils.execute('mv', inst_base, inst_base_resize) # if we are migrating the instance with shared storage then # create the directory. If it is a remote node the directory # has already been created if shared_storage: dest = None utils.execute('mkdir', '-p', inst_base) for info in disk_info: # assume inst_base == dirname(info['path']) img_path = info['path'] fname = os.path.basename(img_path) from_path = os.path.join(inst_base_resize, fname) if info['type'] == 'qcow2' and info['backing_file']: tmp_path = from_path + "_rbase" # merge backing file utils.execute('qemu-img', 'convert', '-f', 'qcow2', '-O', 'qcow2', from_path, tmp_path) if shared_storage: utils.execute('mv', tmp_path, img_path) else: libvirt_utils.copy_image(tmp_path, img_path, host=dest) utils.execute('rm', '-f', tmp_path) else: # raw or qcow2 with no backing file libvirt_utils.copy_image(from_path, img_path, host=dest) except Exception: with excutils.save_and_reraise_exception(): self._cleanup_remote_migration(dest, inst_base, inst_base_resize, shared_storage) return disk_info_text def _wait_for_running(self, instance): state = self.get_info(instance)['state'] if state == power_state.RUNNING: LOG.info(_("Instance running successfully."), instance=instance) raise loopingcall.LoopingCallDone() def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance, block_device_info=None, power_on=True): LOG.debug(_("Starting finish_migration"), instance=instance) # resize disks. only "disk" and "disk.local" are necessary. disk_info = jsonutils.loads(disk_info) for info in disk_info: fname = os.path.basename(info['path']) if fname == 'disk': size = instance['root_gb'] elif fname == 'disk.local': size = instance['ephemeral_gb'] else: size = 0 size *= units.Gi # If we have a non partitioned image that we can extend # then ensure we're in 'raw' format so we can extend file system. fmt = info['type'] if (size and fmt == 'qcow2' and disk.can_resize_image(info['path'], size) and disk.is_image_partitionless(info['path'], use_cow=True)): path_raw = info['path'] + '_raw' utils.execute('qemu-img', 'convert', '-f', 'qcow2', '-O', 'raw', info['path'], path_raw) utils.execute('mv', path_raw, info['path']) fmt = 'raw' if size: use_cow = fmt == 'qcow2' disk.extend(info['path'], size, use_cow=use_cow) if fmt == 'raw' and CONF.use_cow_images: # back to qcow2 (no backing_file though) so that snapshot # will be available path_qcow = info['path'] + '_qcow' utils.execute('qemu-img', 'convert', '-f', 'raw', '-O', 'qcow2', info['path'], path_qcow) utils.execute('mv', path_qcow, info['path']) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, block_device_info, image_meta) # assume _create_image do nothing if a target file exists. self._create_image(context, instance, disk_mapping=disk_info['mapping'], network_info=network_info, block_device_info=None, inject_files=False) xml = self.to_xml(context, instance, network_info, disk_info, block_device_info=block_device_info, write_to_disk=True) self._create_domain_and_network(context, xml, instance, network_info, block_device_info, power_on) if power_on: timer = loopingcall.FixedIntervalLoopingCall( self._wait_for_running, instance) timer.start(interval=0.5).wait() def _cleanup_failed_migration(self, inst_base): """Make sure that a failed migrate doesn't prevent us from rolling back in a revert. """ try: shutil.rmtree(inst_base) except OSError as e: if e.errno != errno.ENOENT: raise def finish_revert_migration(self, context, instance, network_info, block_device_info=None, power_on=True): LOG.debug(_("Starting finish_revert_migration"), instance=instance) inst_base = libvirt_utils.get_instance_path(instance) inst_base_resize = inst_base + "_resize" # NOTE(danms): if we're recovering from a failed migration, # make sure we don't have a left-over same-host base directory # that would conflict. Also, don't fail on the rename if the # failure happened early. if os.path.exists(inst_base_resize): self._cleanup_failed_migration(inst_base) utils.execute('mv', inst_base_resize, inst_base) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, block_device_info) xml = self.to_xml(context, instance, network_info, disk_info, block_device_info=block_device_info) self._create_domain_and_network(context, xml, instance, network_info, block_device_info, power_on) if power_on: timer = loopingcall.FixedIntervalLoopingCall( self._wait_for_running, instance) timer.start(interval=0.5).wait() def confirm_migration(self, migration, instance, network_info): """Confirms a resize, destroying the source VM.""" self._cleanup_resize(instance, network_info) def get_diagnostics(self, instance): def get_io_devices(xml_doc): """get the list of io devices from the xml document.""" result = {"volumes": [], "ifaces": []} try: doc = etree.fromstring(xml_doc) except Exception: return result blocks = [('./devices/disk', 'volumes'), ('./devices/interface', 'ifaces')] for block, key in blocks: section = doc.findall(block) for node in section: for child in node.getchildren(): if child.tag == 'target' and child.get('dev'): result[key].append(child.get('dev')) return result domain = self._lookup_by_name(instance['name']) output = {} # get cpu time, might launch an exception if the method # is not supported by the underlying hypervisor being # used by libvirt try: cputime = domain.vcpus()[0] for i in range(len(cputime)): output["cpu" + str(i) + "_time"] = cputime[i][2] except libvirt.libvirtError: pass # get io status xml = domain.XMLDesc(0) dom_io = get_io_devices(xml) for disk in dom_io["volumes"]: try: # blockStats might launch an exception if the method # is not supported by the underlying hypervisor being # used by libvirt stats = domain.blockStats(disk) output[disk + "_read_req"] = stats[0] output[disk + "_read"] = stats[1] output[disk + "_write_req"] = stats[2] output[disk + "_write"] = stats[3] output[disk + "_errors"] = stats[4] except libvirt.libvirtError: pass for interface in dom_io["ifaces"]: try: # interfaceStats might launch an exception if the method # is not supported by the underlying hypervisor being # used by libvirt stats = domain.interfaceStats(interface) output[interface + "_rx"] = stats[0] output[interface + "_rx_packets"] = stats[1] output[interface + "_rx_errors"] = stats[2] output[interface + "_rx_drop"] = stats[3] output[interface + "_tx"] = stats[4] output[interface + "_tx_packets"] = stats[5] output[interface + "_tx_errors"] = stats[6] output[interface + "_tx_drop"] = stats[7] except libvirt.libvirtError: pass output["memory"] = domain.maxMemory() # memoryStats might launch an exception if the method # is not supported by the underlying hypervisor being # used by libvirt try: mem = domain.memoryStats() for key in mem.keys(): output["memory-" + key] = mem[key] except (libvirt.libvirtError, AttributeError): pass return output def instance_on_disk(self, instance): # ensure directories exist and are writable instance_path = libvirt_utils.get_instance_path(instance) LOG.debug(_('Checking instance files accessibility %s'), instance_path) return os.access(instance_path, os.W_OK) def inject_network_info(self, instance, nw_info): self.firewall_driver.setup_basic_filtering(instance, nw_info) def _delete_instance_files(self, instance): # NOTE(mikal): a shim to handle this file not using instance objects # everywhere. Remove this when that conversion happens. context = nova_context.get_admin_context() inst_obj = instance_obj.Instance.get_by_uuid(context, instance['uuid']) # NOTE(mikal): this code should be pushed up a layer when this shim is # removed. attempts = int(inst_obj.system_metadata.get('clean_attempts', '0')) success = self.delete_instance_files(inst_obj) inst_obj.system_metadata['clean_attempts'] = str(attempts + 1) if success: inst_obj.cleaned = True inst_obj.save(context) def delete_instance_files(self, instance): target = libvirt_utils.get_instance_path(instance) if os.path.exists(target): LOG.info(_('Deleting instance files %s'), target, instance=instance) try: shutil.rmtree(target) except OSError as e: LOG.error(_('Failed to cleanup directory %(target)s: ' '%(e)s'), {'target': target, 'e': e}, instance=instance) # It is possible that the delete failed, if so don't mark the instance # as cleaned. if os.path.exists(target): LOG.info(_('Deletion of %s failed'), target, instance=instance) return False LOG.info(_('Deletion of %s complete'), target, instance=instance) return True @property def need_legacy_block_device_info(self): return False def default_root_device_name(self, instance, image_meta, root_bdm): disk_bus = blockinfo.get_disk_bus_for_device_type( CONF.libvirt.virt_type, image_meta, "disk") cdrom_bus = blockinfo.get_disk_bus_for_device_type( CONF.libvirt.virt_type, image_meta, "cdrom") root_info = blockinfo.get_root_info( CONF.libvirt.virt_type, image_meta, root_bdm, disk_bus, cdrom_bus) return block_device.prepend_dev(root_info['dev']) def default_device_names_for_instance(self, instance, root_device_name, *block_device_lists): ephemerals, swap, block_device_mapping = block_device_lists[:3] blockinfo.default_device_names(CONF.libvirt.virt_type, nova_context.get_admin_context(), instance, root_device_name, ephemerals, swap, block_device_mapping) class HostState(object): """Manages information about the compute node through libvirt.""" def __init__(self, driver): super(HostState, self).__init__() self._stats = {} self.driver = driver self.update_status() def get_host_stats(self, refresh=False): """Return the current state of the host. If 'refresh' is True, run update the stats first. """ if refresh or not self._stats: self.update_status() return self._stats def update_status(self): """Retrieve status info from libvirt.""" def _get_disk_available_least(): """Return total real disk available least size. The size of available disk, when block_migration command given disk_over_commit param is FALSE. The size that deducted real instance disk size from the total size of the virtual disk of all instances. """ disk_free_gb = disk_info_dict['free'] disk_over_committed = (self.driver. get_disk_over_committed_size_total()) # Disk available least size available_least = disk_free_gb * units.Gi - disk_over_committed return (available_least / units.Gi) LOG.debug(_("Updating host stats")) disk_info_dict = self.driver.get_local_gb_info() data = {} #NOTE(dprince): calling capabilities before getVersion works around # an initialization issue with some versions of Libvirt (1.0.5.5). # See: https://bugzilla.redhat.com/show_bug.cgi?id=1000116 # See: https://bugs.launchpad.net/nova/+bug/1215593 data["supported_instances"] = \ self.driver.get_instance_capabilities() data["vcpus"] = self.driver.get_vcpu_total() data["memory_mb"] = self.driver.get_memory_mb_total() data["local_gb"] = disk_info_dict['total'] data["vcpus_used"] = self.driver.get_vcpu_used() data["memory_mb_used"] = self.driver.get_memory_mb_used() data["local_gb_used"] = disk_info_dict['used'] data["hypervisor_type"] = self.driver.get_hypervisor_type() data["hypervisor_version"] = self.driver.get_hypervisor_version() data["hypervisor_hostname"] = self.driver.get_hypervisor_hostname() data["cpu_info"] = self.driver.get_cpu_info() data['disk_available_least'] = _get_disk_available_least() data['pci_passthrough_devices'] = \ self.driver.get_pci_passthrough_devices() self._stats = data return data
apache-2.0
a0c/odoo
addons/product_extended/__init__.py
374
1068
############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import product_extended import wizard # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
SimpleTimeTracker/simpletimetracker
source/jsonpickle/util.py
4
13473
# -*- coding: utf-8 -*- # # Copyright (C) 2008 John Paulett (john -at- paulett.org) # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. """Helper functions for pickling and unpickling. Most functions assist in determining the type of an object. """ from __future__ import absolute_import, division, unicode_literals import base64 import collections import io import operator import time import types import inspect from . import tags from .compat import set, unicode, long, bytes, PY3 if not PY3: import __builtin__ SEQUENCES = (list, set, tuple) SEQUENCES_SET = set(SEQUENCES) PRIMITIVES = set((unicode, bool, float, int, long)) def is_type(obj): """Returns True is obj is a reference to a type. >>> is_type(1) False >>> is_type(object) True >>> class Klass: pass >>> is_type(Klass) True """ # use "isinstance" and not "is" to allow for metaclasses if PY3: return isinstance(obj, type) else: return isinstance(obj, (type, types.ClassType)) def has_method(obj, name): # false if attribute doesn't exist if not hasattr(obj, name): return False func = getattr(obj, name) # builtin descriptors like __getnewargs__ if isinstance(func, types.BuiltinMethodType): return True # note that FunctionType has a different meaning in py2/py3 if not isinstance(func, (types.MethodType, types.FunctionType)): return False # need to go through __dict__'s since in py3 methods are essentially descriptors base_type = obj if is_type(obj) else obj.__class__ # __class__ for old-style classes original = None for subtype in inspect.getmro(base_type): # there is no .mro() for old-style classes original = vars(subtype).get(name) if original is not None: break # name not found in the mro if original is None: return False # static methods are always fine if isinstance(original, staticmethod): return True # at this point, the method has to be an instancemthod or a classmethod self_attr = '__self__' if PY3 else 'im_self' if not hasattr(func, self_attr): return False bound_to = getattr(func, self_attr) # class methods if isinstance(original, classmethod): return issubclass(base_type, bound_to) # bound methods return isinstance(obj, type(bound_to)) def is_object(obj): """Returns True is obj is a reference to an object instance. >>> is_object(1) True >>> is_object(object()) True >>> is_object(lambda x: 1) False """ return (isinstance(obj, object) and not isinstance(obj, (type, types.FunctionType))) def is_primitive(obj): """Helper method to see if the object is a basic data type. Unicode strings, integers, longs, floats, booleans, and None are considered primitive and will return True when passed into *is_primitive()* >>> is_primitive(3) True >>> is_primitive([4,4]) False """ if obj is None: return True elif type(obj) in PRIMITIVES: return True return False def is_dictionary(obj): """Helper method for testing if the object is a dictionary. >>> is_dictionary({'key':'value'}) True """ return type(obj) is dict def is_sequence(obj): """Helper method to see if the object is a sequence (list, set, or tuple). >>> is_sequence([4]) True """ return type(obj) in SEQUENCES_SET def is_list(obj): """Helper method to see if the object is a Python list. >>> is_list([4]) True """ return type(obj) is list def is_set(obj): """Helper method to see if the object is a Python set. >>> is_set(set()) True """ return type(obj) is set def is_bytes(obj): """Helper method to see if the object is a bytestring. >>> is_bytes(b'foo') True """ return type(obj) is bytes def is_unicode(obj): """Helper method to see if the object is a unicode string""" return type(obj) is unicode def is_tuple(obj): """Helper method to see if the object is a Python tuple. >>> is_tuple((1,)) True """ return type(obj) is tuple def is_dictionary_subclass(obj): """Returns True if *obj* is a subclass of the dict type. *obj* must be a subclass and not the actual builtin dict. >>> class Temp(dict): pass >>> is_dictionary_subclass(Temp()) True """ # TODO: add UserDict return (hasattr(obj, '__class__') and issubclass(obj.__class__, dict) and not is_dictionary(obj)) def is_sequence_subclass(obj): """Returns True if *obj* is a subclass of list, set or tuple. *obj* must be a subclass and not the actual builtin, such as list, set, tuple, etc.. >>> class Temp(list): pass >>> is_sequence_subclass(Temp()) True """ return (hasattr(obj, '__class__') and (issubclass(obj.__class__, SEQUENCES) or is_list_like(obj)) and not is_sequence(obj)) def is_noncomplex(obj): """Returns True if *obj* is a special (weird) class, that is more complex than primitive data types, but is not a full object. Including: * :class:`~time.struct_time` """ if type(obj) is time.struct_time: return True return False def is_function(obj): """Returns true if passed a function >>> is_function(lambda x: 1) True >>> is_function(locals) True >>> def method(): pass >>> is_function(method) True >>> is_function(1) False """ if type(obj) in (types.FunctionType, types.MethodType, types.LambdaType, types.BuiltinFunctionType, types.BuiltinMethodType): return True if not hasattr(obj, '__class__'): return False module = translate_module_name(obj.__class__.__module__) name = obj.__class__.__name__ return (module == '__builtin__' and name in ('function', 'builtin_function_or_method', 'instancemethod', 'method-wrapper')) def is_module_function(obj): """Return True if `obj` is a module-global function >>> import os >>> is_module_function(os.path.exists) True >>> is_module_function(lambda: None) False """ return (hasattr(obj, '__class__') and isinstance(obj, types.FunctionType) and hasattr(obj, '__module__') and hasattr(obj, '__name__') and obj.__name__ != '<lambda>') def is_module(obj): """Returns True if passed a module >>> import os >>> is_module(os) True """ return isinstance(obj, types.ModuleType) def is_picklable(name, value): """Return True if an object can be pickled >>> import os >>> is_picklable('os', os) True >>> def foo(): pass >>> is_picklable('foo', foo) True >>> is_picklable('foo', lambda: None) False """ if name in tags.RESERVED: return False return is_module_function(value) or not is_function(value) def is_installed(module): """Tests to see if ``module`` is available on the sys.path >>> is_installed('sys') True >>> is_installed('hopefullythisisnotarealmodule') False """ try: __import__(module) return True except ImportError: return False def is_list_like(obj): return hasattr(obj, '__getitem__') and hasattr(obj, 'append') def is_iterator(obj): is_file = False if not PY3: is_file = isinstance(obj, __builtin__.file) return (isinstance(obj, collections.Iterator) and not isinstance(obj, io.IOBase) and not is_file) def is_collections(obj): try: return type(obj).__module__ == 'collections' except: return False IteratorType = type(iter('')) def is_reducible(obj): """ Returns false if of a type which have special casing, and should not have their __reduce__ methods used """ # defaultdicts may contain functions which we cannot serialise if is_collections(obj) and not isinstance(obj, collections.defaultdict): return True return (not (is_list(obj) or is_list_like(obj) or is_primitive(obj) or is_bytes(obj) or is_unicode(obj) or is_dictionary(obj) or is_sequence(obj) or is_set(obj) or is_tuple(obj) or is_dictionary_subclass(obj) or is_sequence_subclass(obj) or is_function(obj) or is_module(obj) or is_iterator(obj) or type(getattr(obj, '__slots__', None)) is IteratorType or type(obj) is object or obj is object or (is_type(obj) and obj.__module__ == 'datetime') )) def in_dict(obj, key, default=False): """ Returns true if key exists in obj.__dict__; false if not in. If obj.__dict__ is absent, return default """ return (key in obj.__dict__) if getattr(obj, '__dict__', None) else default def in_slots(obj, key, default=False): """ Returns true if key exists in obj.__slots__; false if not in. If obj.__slots__ is absent, return default """ return (key in obj.__slots__) if getattr(obj, '__slots__', None) else default def has_reduce(obj): """ Tests if __reduce__ or __reduce_ex__ exists in the object dict or in the class dicts of every class in the MRO *except object*. Returns a tuple of booleans (has_reduce, has_reduce_ex) """ if not is_reducible(obj) or is_type(obj): return (False, False) # in this case, reduce works and is desired # notwithstanding depending on default object # reduce if is_noncomplex(obj): return (False, True) has_reduce = False has_reduce_ex = False REDUCE = '__reduce__' REDUCE_EX = '__reduce_ex__' # For object instance has_reduce = in_dict(obj, REDUCE) or in_slots(obj, REDUCE) has_reduce_ex = in_dict(obj, REDUCE_EX) or in_slots(obj, REDUCE_EX) # turn to the MRO for base in type(obj).__mro__: if is_reducible(base): has_reduce = has_reduce or in_dict(base, REDUCE) has_reduce_ex = has_reduce_ex or in_dict(base, REDUCE_EX) if has_reduce and has_reduce_ex: return (has_reduce, has_reduce_ex) # for things that don't have a proper dict but can be getattred (rare, but includes some # builtins) cls = type(obj) object_reduce = getattr(object, REDUCE) object_reduce_ex = getattr(object, REDUCE_EX) if not has_reduce: has_reduce_cls = getattr(cls, REDUCE, False) if not has_reduce_cls is object_reduce: has_reduce = has_reduce_cls if not has_reduce_ex: has_reduce_ex_cls = getattr(cls, REDUCE_EX, False) if not has_reduce_ex_cls is object_reduce_ex: has_reduce_ex = has_reduce_ex_cls return (has_reduce, has_reduce_ex) def translate_module_name(module): """Rename builtin modules to a consistent (Python2) module name This is used so that references to Python's `builtins` module can be loaded in both Python 2 and 3. We remap to the "__builtin__" name and unmap it when importing. See untranslate_module_name() for the reverse operation. """ if (PY3 and module == 'builtins') or module == 'exceptions': # We map the Python2 `exceptions` module to `__builtin__` because # `__builtin__` is a superset and contains everything that is # available in `exceptions`, which makes the translation simpler. return '__builtin__' else: return module def untranslate_module_name(module): """Rename module names mention in JSON to names that we can import This reverses the translation applied by translate_module_name() to a module name available to the current version of Python. """ if PY3: # remap `__builtin__` and `exceptions` to the `builtins` module if module == '__builtin__': module = 'builtins' elif module == 'exceptions': module = 'builtins' return module def importable_name(cls): """ >>> class Example(object): ... pass >>> ex = Example() >>> importable_name(ex.__class__) == 'jsonpickle.util.Example' True >>> importable_name(type(25)) == '__builtin__.int' True >>> importable_name(None.__class__) == '__builtin__.NoneType' True >>> importable_name(False.__class__) == '__builtin__.bool' True >>> importable_name(AttributeError) == '__builtin__.AttributeError' True """ name = cls.__name__ module = translate_module_name(cls.__module__) return '%s.%s' % (module, name) def b64encode(data): payload = base64.b64encode(data) if PY3 and type(payload) is bytes: payload = payload.decode('ascii') return payload def b64decode(payload): if PY3 and type(payload) is not bytes: payload = bytes(payload, 'ascii') return base64.b64decode(payload) def itemgetter(obj, getter=operator.itemgetter(0)): return unicode(getter(obj))
gpl-3.0
Adel-Magebinary/odoo
addons/sale_crm/__init__.py
353
1076
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import wizard import sale_crm # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
nikodtbVf/aima-si
tests/test_csp.py
26
1067
import pytest from csp import * #noqa def test_backtracking_search(): assert (backtracking_search(australia) is not None) == True assert (backtracking_search(australia, select_unassigned_variable=mrv) is not None) == True assert (backtracking_search(australia, order_domain_values=lcv) is not None) == True assert (backtracking_search(australia, select_unassigned_variable=mrv, order_domain_values=lcv) is not None) == True assert (backtracking_search(australia, inference=forward_checking) is not None) == True assert (backtracking_search(australia, inference=mac) is not None) == True assert (backtracking_search(usa, select_unassigned_variable=mrv, order_domain_values=lcv, inference=mac) is not None) == True def test_universal_dict(): d = UniversalDict(42) assert d['life'] == 42 def test_parse_neighbours(): assert parse_neighbors('X: Y Z; Y: Z') == {'Y': ['X', 'Z'], 'X': ['Y', 'Z'], 'Z': ['X', 'Y']} if __name__ == "__main__": pytest.main()
mit
meee1/pymavlink
pymavlink/examples/sigloss.py
29
1902
#!/usr/bin/env python ''' show times when signal is lost ''' import sys, time, os # allow import from the parent directory, where mavlink.py is sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')) from optparse import OptionParser parser = OptionParser("sigloss.py [options]") parser.add_option("--no-timestamps",dest="notimestamps", action='store_true', help="Log doesn't have timestamps") parser.add_option("--planner",dest="planner", action='store_true', help="use planner file format") parser.add_option("--robust",dest="robust", action='store_true', help="Enable robust parsing (skip over bad data)") parser.add_option("--mav10", action='store_true', default=False, help="Use MAVLink protocol 1.0") parser.add_option("--deltat", type='float', default=1.0, help="loss threshold in seconds") (opts, args) = parser.parse_args() if opts.mav10: os.environ['MAVLINK10'] = '1' import mavutil if len(args) < 1: print("Usage: sigloss.py [options] <LOGFILE...>") sys.exit(1) def sigloss(logfile): '''work out signal loss times for a log file''' print("Processing log %s" % filename) mlog = mavutil.mavlink_connection(filename, planner_format=opts.planner, notimestamps=opts.notimestamps, robust_parsing=opts.robust) last_t = 0 while True: m = mlog.recv_match() if m is None: return if opts.notimestamps: if not 'usec' in m._fieldnames: continue t = m.usec / 1.0e6 else: t = m._timestamp if last_t != 0: if t - last_t > opts.deltat: print("Sig lost for %.1fs at %s" % (t-last_t, time.asctime(time.localtime(t)))) last_t = t total = 0.0 for filename in args: sigloss(filename)
lgpl-3.0
alienity/three.js
utils/converters/ctm/join_ctm.py
399
2521
"""Join multiple binary files into single file and generate JSON snippet with offsets ------------------------------------- How to use ------------------------------------- python join_ctm.py -i "part_*.ctm" -o joined.ctm [-j offsets.js] Will read multiple files following wildcard pattern (ordered lexicographically): part_000.ctm part_001.ctm part_002.ctm ... part_XXX.ctm And generate single concatenated files: joined.ctm offsets.js (optional, offsets are also dumped to standard output) """ import getopt import glob import sys import os # ##################################################### # Templates # ##################################################### TEMPLATE_JSON = u"""\ "offsets": [ %(offsets)s ], """ # ############################################################################# # Helpers # ############################################################################# def usage(): print 'Usage: %s -i "filename_*.ctm" -o filename.ctm [-j offsets.js]' % os.path.basename(sys.argv[0]) # ##################################################### # Main # ##################################################### if __name__ == "__main__": # get parameters from the command line try: opts, args = getopt.getopt(sys.argv[1:], "hi:o:j:", ["help", "input=", "output=", "json="]) except getopt.GetoptError: usage() sys.exit(2) inpattern = "" outname = "" jsonname = "" for o, a in opts: if o in ("-h", "--help"): usage() sys.exit() elif o in ("-i", "--input"): inpattern = a elif o in ("-o", "--output"): outname = a elif o in ("-j", "--json"): jsonname = a # quit if required parameters are missing if inpattern == "" or outname == "": usage() sys.exit(2) outfile = open(outname, "wb") matches = glob.glob(inpattern) matches.sort() total = 0 offsets = [] for filename in matches: filesize = os.path.getsize(filename) offsets.append(total) total += filesize print filename, filesize infile = open(filename, "rb") buffer = infile.read() outfile.write(buffer) infile.close() outfile.close() json_str = TEMPLATE_JSON % { "offsets" : ", ".join(["%d" % o for o in offsets]) } print json_str if jsonname: jsonfile = open(jsonname, "w") jsonfile.write(json_str) jsonfile.close()
mit
Big-B702/python-for-android
python-modules/twisted/twisted/web/client.py
49
24649
# -*- test-case-name: twisted.web.test.test_webclient -*- # Copyright (c) 2001-2010 Twisted Matrix Laboratories. # See LICENSE for details. """ HTTP client. """ import os, types from urlparse import urlunparse from twisted.python import log from twisted.web import http from twisted.internet import defer, protocol, reactor from twisted.python import failure from twisted.python.util import InsensitiveDict from twisted.web import error from twisted.web.http_headers import Headers from twisted.python.compat import set class PartialDownloadError(error.Error): """ Page was only partially downloaded, we got disconnected in middle. @ivar response: All of the response body which was downloaded. """ class HTTPPageGetter(http.HTTPClient): """ Gets a resource via HTTP, then quits. Typically used with L{HTTPClientFactory}. Note that this class does not, by itself, do anything with the response. If you want to download a resource into a file, use L{HTTPPageDownloader} instead. """ quietLoss = 0 followRedirect = True failed = 0 _specialHeaders = set(('host', 'user-agent', 'cookie', 'content-length')) def connectionMade(self): method = getattr(self.factory, 'method', 'GET') self.sendCommand(method, self.factory.path) self.sendHeader('Host', self.factory.headers.get("host", self.factory.host)) self.sendHeader('User-Agent', self.factory.agent) data = getattr(self.factory, 'postdata', None) if data is not None: self.sendHeader("Content-Length", str(len(data))) cookieData = [] for (key, value) in self.factory.headers.items(): if key.lower() not in self._specialHeaders: # we calculated it on our own self.sendHeader(key, value) if key.lower() == 'cookie': cookieData.append(value) for cookie, cookval in self.factory.cookies.items(): cookieData.append('%s=%s' % (cookie, cookval)) if cookieData: self.sendHeader('Cookie', '; '.join(cookieData)) self.endHeaders() self.headers = {} if data is not None: self.transport.write(data) def handleHeader(self, key, value): """ Called every time a header is received. Stores the header information as key-value pairs in the C{headers} attribute. @type key: C{str} @param key: An HTTP header field name. @type value: C{str} @param value: An HTTP header field value. """ key = key.lower() l = self.headers.setdefault(key, []) l.append(value) def handleStatus(self, version, status, message): self.version, self.status, self.message = version, status, message self.factory.gotStatus(version, status, message) def handleEndHeaders(self): self.factory.gotHeaders(self.headers) m = getattr(self, 'handleStatus_'+self.status, self.handleStatusDefault) m() def handleStatus_200(self): pass handleStatus_201 = lambda self: self.handleStatus_200() handleStatus_202 = lambda self: self.handleStatus_200() def handleStatusDefault(self): self.failed = 1 def handleStatus_301(self): l = self.headers.get('location') if not l: self.handleStatusDefault() return url = l[0] if self.followRedirect: scheme, host, port, path = \ _parse(url, defaultPort=self.transport.getPeer().port) self.factory._redirectCount += 1 if self.factory._redirectCount >= self.factory.redirectLimit: err = error.InfiniteRedirection( self.status, 'Infinite redirection detected', location=url) self.factory.noPage(failure.Failure(err)) self.quietLoss = True self.transport.loseConnection() return self.factory.setURL(url) if self.factory.scheme == 'https': from twisted.internet import ssl contextFactory = ssl.ClientContextFactory() reactor.connectSSL(self.factory.host, self.factory.port, self.factory, contextFactory) else: reactor.connectTCP(self.factory.host, self.factory.port, self.factory) else: self.handleStatusDefault() self.factory.noPage( failure.Failure( error.PageRedirect( self.status, self.message, location = url))) self.quietLoss = True self.transport.loseConnection() def handleStatus_302(self): if self.afterFoundGet: self.handleStatus_303() self.handleStatus_301() def handleStatus_303(self): self.factory.method = 'GET' self.handleStatus_301() def connectionLost(self, reason): if not self.quietLoss: http.HTTPClient.connectionLost(self, reason) self.factory.noPage(reason) def handleResponse(self, response): if self.quietLoss: return if self.failed: self.factory.noPage( failure.Failure( error.Error( self.status, self.message, response))) if self.factory.method == 'HEAD': # Callback with empty string, since there is never a response # body for HEAD requests. self.factory.page('') elif self.length != None and self.length != 0: self.factory.noPage(failure.Failure( PartialDownloadError(self.status, self.message, response))) else: self.factory.page(response) # server might be stupid and not close connection. admittedly # the fact we do only one request per connection is also # stupid... self.transport.loseConnection() def timeout(self): self.quietLoss = True self.transport.loseConnection() self.factory.noPage(defer.TimeoutError("Getting %s took longer than %s seconds." % (self.factory.url, self.factory.timeout))) class HTTPPageDownloader(HTTPPageGetter): transmittingPage = 0 def handleStatus_200(self, partialContent=0): HTTPPageGetter.handleStatus_200(self) self.transmittingPage = 1 self.factory.pageStart(partialContent) def handleStatus_206(self): self.handleStatus_200(partialContent=1) def handleResponsePart(self, data): if self.transmittingPage: self.factory.pagePart(data) def handleResponseEnd(self): if self.length: self.transmittingPage = 0 self.factory.noPage( failure.Failure( PartialDownloadError(self.status))) if self.transmittingPage: self.factory.pageEnd() self.transmittingPage = 0 if self.failed: self.factory.noPage( failure.Failure( error.Error( self.status, self.message, None))) self.transport.loseConnection() class HTTPClientFactory(protocol.ClientFactory): """Download a given URL. @type deferred: Deferred @ivar deferred: A Deferred that will fire when the content has been retrieved. Once this is fired, the ivars `status', `version', and `message' will be set. @type status: str @ivar status: The status of the response. @type version: str @ivar version: The version of the response. @type message: str @ivar message: The text message returned with the status. @type response_headers: dict @ivar response_headers: The headers that were specified in the response from the server. @type method: str @ivar method: The HTTP method to use in the request. This should be one of OPTIONS, GET, HEAD, POST, PUT, DELETE, TRACE, or CONNECT (case matters). Other values may be specified if the server being contacted supports them. @type redirectLimit: int @ivar redirectLimit: The maximum number of HTTP redirects that can occur before it is assumed that the redirection is endless. @type afterFoundGet: C{bool} @ivar afterFoundGet: Deviate from the HTTP 1.1 RFC by handling redirects the same way as most web browsers; if the request method is POST and a 302 status is encountered, the redirect is followed with a GET method @type _redirectCount: int @ivar _redirectCount: The current number of HTTP redirects encountered. """ protocol = HTTPPageGetter url = None scheme = None host = '' port = None path = None def __init__(self, url, method='GET', postdata=None, headers=None, agent="Twisted PageGetter", timeout=0, cookies=None, followRedirect=True, redirectLimit=20, afterFoundGet=False): self.followRedirect = followRedirect self.redirectLimit = redirectLimit self._redirectCount = 0 self.timeout = timeout self.agent = agent self.afterFoundGet = afterFoundGet if cookies is None: cookies = {} self.cookies = cookies if headers is not None: self.headers = InsensitiveDict(headers) else: self.headers = InsensitiveDict() if postdata is not None: self.headers.setdefault('Content-Length', len(postdata)) # just in case a broken http/1.1 decides to keep connection alive self.headers.setdefault("connection", "close") self.postdata = postdata self.method = method self.setURL(url) self.waiting = 1 self.deferred = defer.Deferred() self.response_headers = None def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.url) def setURL(self, url): self.url = url scheme, host, port, path = _parse(url) if scheme and host: self.scheme = scheme self.host = host self.port = port self.path = path def buildProtocol(self, addr): p = protocol.ClientFactory.buildProtocol(self, addr) p.followRedirect = self.followRedirect p.afterFoundGet = self.afterFoundGet if self.timeout: timeoutCall = reactor.callLater(self.timeout, p.timeout) self.deferred.addBoth(self._cancelTimeout, timeoutCall) return p def _cancelTimeout(self, result, timeoutCall): if timeoutCall.active(): timeoutCall.cancel() return result def gotHeaders(self, headers): self.response_headers = headers if headers.has_key('set-cookie'): for cookie in headers['set-cookie']: cookparts = cookie.split(';') cook = cookparts[0] cook.lstrip() k, v = cook.split('=', 1) self.cookies[k.lstrip()] = v.lstrip() def gotStatus(self, version, status, message): self.version, self.status, self.message = version, status, message def page(self, page): if self.waiting: self.waiting = 0 self.deferred.callback(page) def noPage(self, reason): if self.waiting: self.waiting = 0 self.deferred.errback(reason) def clientConnectionFailed(self, _, reason): if self.waiting: self.waiting = 0 self.deferred.errback(reason) class HTTPDownloader(HTTPClientFactory): """Download to a file.""" protocol = HTTPPageDownloader value = None def __init__(self, url, fileOrName, method='GET', postdata=None, headers=None, agent="Twisted client", supportPartial=0, timeout=0, cookies=None, followRedirect=1, redirectLimit=20): self.requestedPartial = 0 if isinstance(fileOrName, types.StringTypes): self.fileName = fileOrName self.file = None if supportPartial and os.path.exists(self.fileName): fileLength = os.path.getsize(self.fileName) if fileLength: self.requestedPartial = fileLength if headers == None: headers = {} headers["range"] = "bytes=%d-" % fileLength else: self.file = fileOrName HTTPClientFactory.__init__( self, url, method=method, postdata=postdata, headers=headers, agent=agent, timeout=timeout, cookies=cookies, followRedirect=followRedirect, redirectLimit=redirectLimit) def gotHeaders(self, headers): HTTPClientFactory.gotHeaders(self, headers) if self.requestedPartial: contentRange = headers.get("content-range", None) if not contentRange: # server doesn't support partial requests, oh well self.requestedPartial = 0 return start, end, realLength = http.parseContentRange(contentRange[0]) if start != self.requestedPartial: # server is acting wierdly self.requestedPartial = 0 def openFile(self, partialContent): if partialContent: file = open(self.fileName, 'rb+') file.seek(0, 2) else: file = open(self.fileName, 'wb') return file def pageStart(self, partialContent): """Called on page download start. @param partialContent: tells us if the download is partial download we requested. """ if partialContent and not self.requestedPartial: raise ValueError, "we shouldn't get partial content response if we didn't want it!" if self.waiting: try: if not self.file: self.file = self.openFile(partialContent) except IOError: #raise self.deferred.errback(failure.Failure()) def pagePart(self, data): if not self.file: return try: self.file.write(data) except IOError: #raise self.file = None self.deferred.errback(failure.Failure()) def noPage(self, reason): """ Close the storage file and errback the waiting L{Deferred} with the given reason. """ if self.waiting: self.waiting = 0 if self.file: try: self.file.close() except: log.err(None, "Error closing HTTPDownloader file") self.deferred.errback(reason) def pageEnd(self): self.waiting = 0 if not self.file: return try: self.file.close() except IOError: self.deferred.errback(failure.Failure()) return self.deferred.callback(self.value) def _parse(url, defaultPort=None): """ Split the given URL into the scheme, host, port, and path. @type url: C{str} @param url: An URL to parse. @type defaultPort: C{int} or C{None} @param defaultPort: An alternate value to use as the port if the URL does not include one. @return: A four-tuple of the scheme, host, port, and path of the URL. All of these are C{str} instances except for port, which is an C{int}. """ url = url.strip() parsed = http.urlparse(url) scheme = parsed[0] path = urlunparse(('', '') + parsed[2:]) if defaultPort is None: if scheme == 'https': defaultPort = 443 else: defaultPort = 80 host, port = parsed[1], defaultPort if ':' in host: host, port = host.split(':') try: port = int(port) except ValueError: port = defaultPort if path == '': path = '/' return scheme, host, port, path def _makeGetterFactory(url, factoryFactory, contextFactory=None, *args, **kwargs): """ Create and connect an HTTP page getting factory. Any additional positional or keyword arguments are used when calling C{factoryFactory}. @param factoryFactory: Factory factory that is called with C{url}, C{args} and C{kwargs} to produce the getter @param contextFactory: Context factory to use when creating a secure connection, defaulting to C{None} @return: The factory created by C{factoryFactory} """ scheme, host, port, path = _parse(url) factory = factoryFactory(url, *args, **kwargs) if scheme == 'https': from twisted.internet import ssl if contextFactory is None: contextFactory = ssl.ClientContextFactory() reactor.connectSSL(host, port, factory, contextFactory) else: reactor.connectTCP(host, port, factory) return factory def getPage(url, contextFactory=None, *args, **kwargs): """ Download a web page as a string. Download a page. Return a deferred, which will callback with a page (as a string) or errback with a description of the error. See L{HTTPClientFactory} to see what extra arguments can be passed. """ return _makeGetterFactory( url, HTTPClientFactory, contextFactory=contextFactory, *args, **kwargs).deferred def downloadPage(url, file, contextFactory=None, *args, **kwargs): """ Download a web page to a file. @param file: path to file on filesystem, or file-like object. See HTTPDownloader to see what extra args can be passed. """ factoryFactory = lambda url, *a, **kw: HTTPDownloader(url, file, *a, **kw) return _makeGetterFactory( url, factoryFactory, contextFactory=contextFactory, *args, **kwargs).deferred # The code which follows is based on the new HTTP client implementation. It # should be significantly better than anything above, though it is not yet # feature equivalent. from twisted.internet.protocol import ClientCreator from twisted.web.error import SchemeNotSupported from twisted.web._newclient import ResponseDone, Request, HTTP11ClientProtocol from twisted.web._newclient import Response try: from twisted.internet.ssl import ClientContextFactory except ImportError: class WebClientContextFactory(object): """ A web context factory which doesn't work because the necessary SSL support is missing. """ def getContext(self, hostname, port): raise NotImplementedError("SSL support unavailable") else: class WebClientContextFactory(ClientContextFactory): """ A web context factory which ignores the hostname and port and does no certificate verification. """ def getContext(self, hostname, port): return ClientContextFactory.getContext(self) class _WebToNormalContextFactory(object): """ Adapt a web context factory to a normal context factory. @ivar _webContext: A web context factory which accepts a hostname and port number to its C{getContext} method. @ivar _hostname: The hostname which will be passed to C{_webContext.getContext}. @ivar _port: The port number which will be passed to C{_webContext.getContext}. """ def __init__(self, webContext, hostname, port): self._webContext = webContext self._hostname = hostname self._port = port def getContext(self): """ Called the wrapped web context factory's C{getContext} method with a hostname and port number and return the resulting context object. """ return self._webContext.getContext(self._hostname, self._port) class Agent(object): """ L{Agent} is a very basic HTTP client. It supports I{HTTP} and I{HTTPS} scheme URIs (but performs no certificate checking by default). It does not support persistent connections. @ivar _reactor: The L{IReactorTCP} and L{IReactorSSL} implementation which will be used to set up connections over which to issue requests. @ivar _contextFactory: A web context factory which will be used to create SSL context objects for any SSL connections the agent needs to make. @since: 9.0 """ _protocol = HTTP11ClientProtocol def __init__(self, reactor, contextFactory=WebClientContextFactory()): self._reactor = reactor self._contextFactory = contextFactory def _wrapContextFactory(self, host, port): """ Create and return a normal context factory wrapped around C{self._contextFactory} in such a way that C{self._contextFactory} will have the host and port information passed to it. @param host: A C{str} giving the hostname which will be connected to in order to issue a request. @param port: An C{int} giving the port number the connection will be on. @return: A context factory suitable to be passed to C{reactor.connectSSL}. """ return _WebToNormalContextFactory(self._contextFactory, host, port) def _connect(self, scheme, host, port): """ Connect to the given host and port, using a transport selected based on scheme. @param scheme: A string like C{'http'} or C{'https'} (the only two supported values) to use to determine how to establish the connection. @param host: A C{str} giving the hostname which will be connected to in order to issue a request. @param port: An C{int} giving the port number the connection will be on. @return: A L{Deferred} which fires with a connected instance of C{self._protocol}. """ cc = ClientCreator(self._reactor, self._protocol) if scheme == 'http': d = cc.connectTCP(host, port) elif scheme == 'https': d = cc.connectSSL(host, port, self._wrapContextFactory(host, port)) else: d = defer.fail(SchemeNotSupported( "Unsupported scheme: %r" % (scheme,))) return d def request(self, method, uri, headers=None, bodyProducer=None): """ Issue a new request. @param method: The request method to send. @type method: C{str} @param uri: The request URI send. @type uri: C{str} @param headers: The request headers to send. If no I{Host} header is included, one will be added based on the request URI. @type headers: L{Headers} @param bodyProducer: An object which will produce the request body or, if the request body is to be empty, L{None}. @type bodyProducer: L{IBodyProducer} provider @return: A L{Deferred} which fires with the result of the request (a L{Response} instance), or fails if there is a problem setting up a connection over which to issue the request. It may also fail with L{SchemeNotSupported} if the scheme of the given URI is not supported. @rtype: L{Deferred} """ scheme, host, port, path = _parse(uri) d = self._connect(scheme, host, port) if headers is None: headers = Headers() if not headers.hasHeader('host'): # This is a lot of copying. It might be nice if there were a bit # less. headers = Headers(dict(headers.getAllRawHeaders())) headers.addRawHeader( 'host', self._computeHostValue(scheme, host, port)) def cbConnected(proto): return proto.request(Request(method, path, headers, bodyProducer)) d.addCallback(cbConnected) return d def _computeHostValue(self, scheme, host, port): """ Compute the string to use for the value of the I{Host} header, based on the given scheme, host name, and port number. """ if (scheme, port) in (('http', 80), ('https', 443)): return host return '%s:%d' % (host, port) __all__ = [ 'PartialDownloadError', 'HTTPPageGetter', 'HTTPPageDownloader', 'HTTPClientFactory', 'HTTPDownloader', 'getPage', 'downloadPage', 'ResponseDone', 'Response', 'Agent']
apache-2.0
slyphon/pants
tests/python/pants_test/backend/jvm/subsystems/test_shader.py
8
7720
# coding=utf-8 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os import tempfile import unittest from pants.backend.jvm.subsystems.shader import Shader, Shading from pants.java.distribution.distribution import DistributionLocator from pants.java.executor import SubprocessExecutor from pants.util.contextutil import open_zip from pants.util.dirutil import safe_delete from pants_test.subsystem.subsystem_util import subsystem_instance class ShaderTest(unittest.TestCase): def setUp(self): self.jarjar = '/not/really/jarjar.jar' with subsystem_instance(DistributionLocator): executor = SubprocessExecutor(DistributionLocator.cached()) self.shader = Shader(jarjar_classpath=[self.jarjar], executor=executor) self.output_jar = '/not/really/shaded.jar' def populate_input_jar(self, *entries): fd, input_jar_path = tempfile.mkstemp() os.close(fd) self.addCleanup(safe_delete, input_jar_path) with open_zip(input_jar_path, 'w') as jar: for entry in entries: jar.writestr(entry, '0xCAFEBABE') return input_jar_path def test_assemble_default_rules(self): input_jar = self.populate_input_jar('org/pantsbuild/tools/fake/Main.class', 'com/google/common/base/Function.class') rules = self.shader.assemble_binary_rules('org.pantsbuild.tools.fake.Main', input_jar) self.assertEqual(Shader.exclude_package('org.pantsbuild.tools.fake'), rules[0]) self.assertIn(Shader.exclude_package('javax.annotation'), rules[1:-1]) self.assertEqual(Shader.shade_package('com.google.common.base'), rules[-1]) def test_assemble_default_rules_default_package(self): input_jar = self.populate_input_jar('main.class', 'com/google/common/base/Function.class') rules = self.shader.assemble_binary_rules('main', input_jar) self.assertEqual(Shader.exclude_package(), rules[0]) self.assertIn(Shader.exclude_package('javax.annotation'), rules[1:-1]) self.assertEqual(Shader.shade_package('com.google.common.base'), rules[-1]) def test_assemble_custom_rules(self): input_jar = self.populate_input_jar('main.class') rules = self.shader.assemble_binary_rules('main', input_jar, custom_rules=[Shader.shade_class('bob'), Shader.exclude_class('fred')]) self.assertEqual(Shader.shade_class('bob'), rules[0]) self.assertEqual(Shader.exclude_class('fred'), rules[1]) self.assertEqual(Shader.exclude_package(), rules[2]) self.assertIn(Shader.exclude_package('javax.annotation'), rules[3:]) def test_runner_command(self): input_jar = self.populate_input_jar('main.class', 'com/google/common/base/Function.class') custom_rules = [Shader.exclude_package('log4j', recursive=True)] with self.shader.binary_shader(self.output_jar, 'main', input_jar, custom_rules=custom_rules) as shader: command = shader.command self.assertTrue(command.pop(0).endswith('java')) jar_or_cp = command.pop(0) self.assertIn(jar_or_cp, {'-cp', 'classpath', '-jar'}) self.assertEqual(self.jarjar, os.path.abspath(command.pop(0))) if jar_or_cp != '-jar': # We don't really care what the name of the jarjar main class is - shader.command[2] command.pop(0) self.assertEqual('process', command.pop(0)) rules_file = command.pop(0) self.assertTrue(os.path.exists(rules_file)) with open(rules_file) as fp: lines = fp.read().splitlines() self.assertEqual('rule log4j.** log4j.@1', lines[0]) # The custom rule. self.assertEqual('rule * @1', lines[1]) # Exclude main's package. self.assertIn('rule javax.annotation.* javax.annotation.@1', lines) # Exclude system. self.assertEqual('rule com.google.common.base.* {}com.google.common.base.@1' .format(Shading.SHADE_PREFIX), lines[-1]) # Shade the rest. self.assertEqual(input_jar, command.pop(0)) self.assertEqual(self.output_jar, command.pop(0)) def test_sanitize_package_name(self): def assert_sanitize(name, sanitized): self.assertEqual(sanitized, Shading.Relocate._sanitize_package_name(name)) assert_sanitize('hello', 'hello') assert_sanitize('hello.goodbye', 'hello.goodbye') assert_sanitize('.hello.goodbye', 'hello.goodbye') assert_sanitize('hello.goodbye.', 'hello.goodbye') assert_sanitize('123', '_123') assert_sanitize('123.456', '_123._456') assert_sanitize('123.v2', '_123.v2') assert_sanitize('hello-goodbye', 'hello_goodbye') assert_sanitize('hello-/.goodbye.?', 'hello__.goodbye._') assert_sanitize('one.two..three....four.', 'one.two.three.four') def test_infer_shaded_pattern(self): def assert_inference(from_pattern, prefix, to_pattern): result = ''.join(Shading.Relocate._infer_shaded_pattern_iter(from_pattern, prefix)) self.assertEqual(to_pattern, result) assert_inference('com.foo.bar.Main', None, 'com.foo.bar.Main') assert_inference('com.foo.bar.', None, 'com.foo.bar.') assert_inference('com.foo.bar.', '__prefix__.', '__prefix__.com.foo.bar.') assert_inference('com.*.bar.', None, 'com.@1.bar.') assert_inference('com.*.bar.*.', None, 'com.@1.bar.@2.') assert_inference('com.*.bar.**', None, 'com.@1.bar.@2') assert_inference('*', None, '@1') assert_inference('**', None, '@1') assert_inference('**', '__prefix__.', '__prefix__.@1') def test_shading_exclude(self): def assert_exclude(from_pattern, to_pattern): self.assertEqual((from_pattern, to_pattern), Shading.Exclude.new(from_pattern).rule()) assert_exclude('com.foo.bar.Main', 'com.foo.bar.Main') assert_exclude('com.foo.bar.**', 'com.foo.bar.@1') assert_exclude('com.*.bar.**', 'com.@1.bar.@2') def test_shading_exclude_package(self): self.assertEqual(('com.foo.bar.**', 'com.foo.bar.@1'), Shading.ExcludePackage.new('com.foo.bar').rule()) self.assertEqual(('com.foo.bar.*', 'com.foo.bar.@1'), Shading.ExcludePackage.new('com.foo.bar', recursive=False).rule()) def test_relocate(self): self.assertEqual(('com.foo.bar.**', '{}com.foo.bar.@1'.format(Shading.SHADE_PREFIX)), Shading.Relocate.new(from_pattern='com.foo.bar.**').rule()) self.assertEqual(('com.foo.bar.**', '{}com.foo.bar.@1'.format('__my_prefix__.')), Shading.Relocate.new(from_pattern='com.foo.bar.**', shade_prefix='__my_prefix__.').rule()) self.assertEqual(('com.foo.bar.**', 'org.biz.baz.@1'.format('__my_prefix__.')), Shading.Relocate.new(from_pattern='com.foo.bar.**', shade_prefix='__my_prefix__.', shade_pattern='org.biz.baz.@1').rule()) def test_relocate_package(self): self.assertEqual(('com.foo.bar.**', '{}com.foo.bar.@1'.format(Shading.SHADE_PREFIX)), Shading.RelocatePackage.new('com.foo.bar').rule()) self.assertEqual(('com.foo.bar.*', '{}com.foo.bar.@1'.format(Shading.SHADE_PREFIX)), Shading.RelocatePackage.new('com.foo.bar', recursive=False).rule()) self.assertEqual(('com.foo.bar.**', '__p__.com.foo.bar.@1'), Shading.RelocatePackage.new('com.foo.bar', shade_prefix='__p__.').rule())
apache-2.0
sjsafranek/pomegranate
app/urls.py
1
1034
#!/usr/bin/python # Import the utility functions from the URL handling library from django.conf.urls import url # Import auth_views for login and logout methods from django.contrib.auth import views as auth_views from . import views from . import api_imports from . import api_exports from . import apiV2 urlpatterns = [ url(r'^$', auth_views.login, { "template_name" : "login.html"}), url(r'^logout/$', auth_views.logout, { "next_page" : '/'}), url(r'^map$', views.map), url(r'^api/v1/zone$', api_imports.zone_info), url(r'^api/v1/room$', api_imports.room_info), url(r'^api/v1/furniture$', api_imports.furniture_info), url(r'^api/v1/person$', api_imports.person_info), url(r'^api/v1/zones/csv$', api_exports.zone_export_csv), url(r'^api/v1/persons/csv$', api_exports.person_export_csv), url(r'^api/v1/furniture/geojson$', api_exports.furniture_export_geojson), url(r'^api/v1/furniture/csv$', api_exports.furniture_export_csv), url(r'^api/v2/zones/export$', apiV2.zone_export_multithread) ]
mit
poojavade/Genomics_Docker
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/SQLAlchemy-0.9.7-py2.7-linux-x86_64.egg/sqlalchemy/util/_collections.py
11
26052
# util/_collections.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Collection classes and helpers.""" from __future__ import absolute_import import weakref import operator from .compat import threading, itertools_filterfalse from . import py2k import types EMPTY_SET = frozenset() class KeyedTuple(tuple): """``tuple`` subclass that adds labeled names. E.g.:: >>> k = KeyedTuple([1, 2, 3], labels=["one", "two", "three"]) >>> k.one 1 >>> k.two 2 Result rows returned by :class:`.Query` that contain multiple ORM entities and/or column expressions make use of this class to return rows. The :class:`.KeyedTuple` exhibits similar behavior to the ``collections.namedtuple()`` construct provided in the Python standard library, however is architected very differently. Unlike ``collections.namedtuple()``, :class:`.KeyedTuple` is does not rely on creation of custom subtypes in order to represent a new series of keys, instead each :class:`.KeyedTuple` instance receives its list of keys in place. The subtype approach of ``collections.namedtuple()`` introduces significant complexity and performance overhead, which is not necessary for the :class:`.Query` object's use case. .. versionchanged:: 0.8 Compatibility methods with ``collections.namedtuple()`` have been added including :attr:`.KeyedTuple._fields` and :meth:`.KeyedTuple._asdict`. .. seealso:: :ref:`ormtutorial_querying` """ def __new__(cls, vals, labels=None): t = tuple.__new__(cls, vals) t._labels = [] if labels: t.__dict__.update(zip(labels, vals)) t._labels = labels return t def keys(self): """Return a list of string key names for this :class:`.KeyedTuple`. .. seealso:: :attr:`.KeyedTuple._fields` """ return [l for l in self._labels if l is not None] @property def _fields(self): """Return a tuple of string key names for this :class:`.KeyedTuple`. This method provides compatibility with ``collections.namedtuple()``. .. versionadded:: 0.8 .. seealso:: :meth:`.KeyedTuple.keys` """ return tuple(self.keys()) def _asdict(self): """Return the contents of this :class:`.KeyedTuple` as a dictionary. This method provides compatibility with ``collections.namedtuple()``, with the exception that the dictionary returned is **not** ordered. .. versionadded:: 0.8 """ return dict((key, self.__dict__[key]) for key in self.keys()) class ImmutableContainer(object): def _immutable(self, *arg, **kw): raise TypeError("%s object is immutable" % self.__class__.__name__) __delitem__ = __setitem__ = __setattr__ = _immutable class immutabledict(ImmutableContainer, dict): clear = pop = popitem = setdefault = \ update = ImmutableContainer._immutable def __new__(cls, *args): new = dict.__new__(cls) dict.__init__(new, *args) return new def __init__(self, *args): pass def __reduce__(self): return immutabledict, (dict(self), ) def union(self, d): if not self: return immutabledict(d) else: d2 = immutabledict(self) dict.update(d2, d) return d2 def __repr__(self): return "immutabledict(%s)" % dict.__repr__(self) class Properties(object): """Provide a __getattr__/__setattr__ interface over a dict.""" def __init__(self, data): self.__dict__['_data'] = data def __len__(self): return len(self._data) def __iter__(self): return iter(list(self._data.values())) def __add__(self, other): return list(self) + list(other) def __setitem__(self, key, object): self._data[key] = object def __getitem__(self, key): return self._data[key] def __delitem__(self, key): del self._data[key] def __setattr__(self, key, object): self._data[key] = object def __getstate__(self): return {'_data': self.__dict__['_data']} def __setstate__(self, state): self.__dict__['_data'] = state['_data'] def __getattr__(self, key): try: return self._data[key] except KeyError: raise AttributeError(key) def __contains__(self, key): return key in self._data def as_immutable(self): """Return an immutable proxy for this :class:`.Properties`.""" return ImmutableProperties(self._data) def update(self, value): self._data.update(value) def get(self, key, default=None): if key in self: return self[key] else: return default def keys(self): return list(self._data) def values(self): return list(self._data.values()) def items(self): return list(self._data.items()) def has_key(self, key): return key in self._data def clear(self): self._data.clear() class OrderedProperties(Properties): """Provide a __getattr__/__setattr__ interface with an OrderedDict as backing store.""" def __init__(self): Properties.__init__(self, OrderedDict()) class ImmutableProperties(ImmutableContainer, Properties): """Provide immutable dict/object attribute to an underlying dictionary.""" class OrderedDict(dict): """A dict that returns keys/values/items in the order they were added.""" def __init__(self, ____sequence=None, **kwargs): self._list = [] if ____sequence is None: if kwargs: self.update(**kwargs) else: self.update(____sequence, **kwargs) def clear(self): self._list = [] dict.clear(self) def copy(self): return self.__copy__() def __copy__(self): return OrderedDict(self) def sort(self, *arg, **kw): self._list.sort(*arg, **kw) def update(self, ____sequence=None, **kwargs): if ____sequence is not None: if hasattr(____sequence, 'keys'): for key in ____sequence.keys(): self.__setitem__(key, ____sequence[key]) else: for key, value in ____sequence: self[key] = value if kwargs: self.update(kwargs) def setdefault(self, key, value): if key not in self: self.__setitem__(key, value) return value else: return self.__getitem__(key) def __iter__(self): return iter(self._list) if py2k: def values(self): return [self[key] for key in self._list] def keys(self): return self._list def itervalues(self): return iter([self[key] for key in self._list]) def iterkeys(self): return iter(self) def iteritems(self): return iter(self.items()) def items(self): return [(key, self[key]) for key in self._list] else: def values(self): # return (self[key] for key in self) return (self[key] for key in self._list) def keys(self): # return iter(self) return iter(self._list) def items(self): # return ((key, self[key]) for key in self) return ((key, self[key]) for key in self._list) _debug_iter = False if _debug_iter: # normally disabled to reduce function call # overhead def __iter__(self): len_ = len(self._list) for item in self._list: yield item assert len_ == len(self._list), \ "Dictionary changed size during iteration" def values(self): return (self[key] for key in self) def keys(self): return iter(self) def items(self): return ((key, self[key]) for key in self) def __setitem__(self, key, object): if key not in self: try: self._list.append(key) except AttributeError: # work around Python pickle loads() with # dict subclass (seems to ignore __setstate__?) self._list = [key] dict.__setitem__(self, key, object) def __delitem__(self, key): dict.__delitem__(self, key) self._list.remove(key) def pop(self, key, *default): present = key in self value = dict.pop(self, key, *default) if present: self._list.remove(key) return value def popitem(self): item = dict.popitem(self) self._list.remove(item[0]) return item class OrderedSet(set): def __init__(self, d=None): set.__init__(self) self._list = [] if d is not None: self.update(d) def add(self, element): if element not in self: self._list.append(element) set.add(self, element) def remove(self, element): set.remove(self, element) self._list.remove(element) def insert(self, pos, element): if element not in self: self._list.insert(pos, element) set.add(self, element) def discard(self, element): if element in self: self._list.remove(element) set.remove(self, element) def clear(self): set.clear(self) self._list = [] def __getitem__(self, key): return self._list[key] def __iter__(self): return iter(self._list) def __add__(self, other): return self.union(other) def __repr__(self): return '%s(%r)' % (self.__class__.__name__, self._list) __str__ = __repr__ def update(self, iterable): for e in iterable: if e not in self: self._list.append(e) set.add(self, e) return self __ior__ = update def union(self, other): result = self.__class__(self) result.update(other) return result __or__ = union def intersection(self, other): other = set(other) return self.__class__(a for a in self if a in other) __and__ = intersection def symmetric_difference(self, other): other = set(other) result = self.__class__(a for a in self if a not in other) result.update(a for a in other if a not in self) return result __xor__ = symmetric_difference def difference(self, other): other = set(other) return self.__class__(a for a in self if a not in other) __sub__ = difference def intersection_update(self, other): other = set(other) set.intersection_update(self, other) self._list = [a for a in self._list if a in other] return self __iand__ = intersection_update def symmetric_difference_update(self, other): set.symmetric_difference_update(self, other) self._list = [a for a in self._list if a in self] self._list += [a for a in other._list if a in self] return self __ixor__ = symmetric_difference_update def difference_update(self, other): set.difference_update(self, other) self._list = [a for a in self._list if a in self] return self __isub__ = difference_update class IdentitySet(object): """A set that considers only object id() for uniqueness. This strategy has edge cases for builtin types- it's possible to have two 'foo' strings in one of these sets, for example. Use sparingly. """ _working_set = set def __init__(self, iterable=None): self._members = dict() if iterable: for o in iterable: self.add(o) def add(self, value): self._members[id(value)] = value def __contains__(self, value): return id(value) in self._members def remove(self, value): del self._members[id(value)] def discard(self, value): try: self.remove(value) except KeyError: pass def pop(self): try: pair = self._members.popitem() return pair[1] except KeyError: raise KeyError('pop from an empty set') def clear(self): self._members.clear() def __cmp__(self, other): raise TypeError('cannot compare sets using cmp()') def __eq__(self, other): if isinstance(other, IdentitySet): return self._members == other._members else: return False def __ne__(self, other): if isinstance(other, IdentitySet): return self._members != other._members else: return True def issubset(self, iterable): other = type(self)(iterable) if len(self) > len(other): return False for m in itertools_filterfalse(other._members.__contains__, iter(self._members.keys())): return False return True def __le__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.issubset(other) def __lt__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return len(self) < len(other) and self.issubset(other) def issuperset(self, iterable): other = type(self)(iterable) if len(self) < len(other): return False for m in itertools_filterfalse(self._members.__contains__, iter(other._members.keys())): return False return True def __ge__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.issuperset(other) def __gt__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return len(self) > len(other) and self.issuperset(other) def union(self, iterable): result = type(self)() # testlib.pragma exempt:__hash__ members = self._member_id_tuples() other = _iter_id(iterable) result._members.update(self._working_set(members).union(other)) return result def __or__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.union(other) def update(self, iterable): self._members = self.union(iterable)._members def __ior__(self, other): if not isinstance(other, IdentitySet): return NotImplemented self.update(other) return self def difference(self, iterable): result = type(self)() # testlib.pragma exempt:__hash__ members = self._member_id_tuples() other = _iter_id(iterable) result._members.update(self._working_set(members).difference(other)) return result def __sub__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.difference(other) def difference_update(self, iterable): self._members = self.difference(iterable)._members def __isub__(self, other): if not isinstance(other, IdentitySet): return NotImplemented self.difference_update(other) return self def intersection(self, iterable): result = type(self)() # testlib.pragma exempt:__hash__ members = self._member_id_tuples() other = _iter_id(iterable) result._members.update(self._working_set(members).intersection(other)) return result def __and__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.intersection(other) def intersection_update(self, iterable): self._members = self.intersection(iterable)._members def __iand__(self, other): if not isinstance(other, IdentitySet): return NotImplemented self.intersection_update(other) return self def symmetric_difference(self, iterable): result = type(self)() # testlib.pragma exempt:__hash__ members = self._member_id_tuples() other = _iter_id(iterable) result._members.update( self._working_set(members).symmetric_difference(other)) return result def _member_id_tuples(self): return ((id(v), v) for v in self._members.values()) def __xor__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.symmetric_difference(other) def symmetric_difference_update(self, iterable): self._members = self.symmetric_difference(iterable)._members def __ixor__(self, other): if not isinstance(other, IdentitySet): return NotImplemented self.symmetric_difference(other) return self def copy(self): return type(self)(iter(self._members.values())) __copy__ = copy def __len__(self): return len(self._members) def __iter__(self): return iter(self._members.values()) def __hash__(self): raise TypeError('set objects are unhashable') def __repr__(self): return '%s(%r)' % (type(self).__name__, list(self._members.values())) class WeakSequence(object): def __init__(self, __elements=()): self._storage = [ weakref.ref(element, self._remove) for element in __elements ] def append(self, item): self._storage.append(weakref.ref(item, self._remove)) def _remove(self, ref): self._storage.remove(ref) def __len__(self): return len(self._storage) def __iter__(self): return (obj for obj in (ref() for ref in self._storage) if obj is not None) def __getitem__(self, index): try: obj = self._storage[index] except KeyError: raise IndexError("Index %s out of range" % index) else: return obj() class OrderedIdentitySet(IdentitySet): class _working_set(OrderedSet): # a testing pragma: exempt the OIDS working set from the test suite's # "never call the user's __hash__" assertions. this is a big hammer, # but it's safe here: IDS operates on (id, instance) tuples in the # working set. __sa_hash_exempt__ = True def __init__(self, iterable=None): IdentitySet.__init__(self) self._members = OrderedDict() if iterable: for o in iterable: self.add(o) class PopulateDict(dict): """A dict which populates missing values via a creation function. Note the creation function takes a key, unlike collections.defaultdict. """ def __init__(self, creator): self.creator = creator def __missing__(self, key): self[key] = val = self.creator(key) return val # Define collections that are capable of storing # ColumnElement objects as hashable keys/elements. # At this point, these are mostly historical, things # used to be more complicated. column_set = set column_dict = dict ordered_column_set = OrderedSet populate_column_dict = PopulateDict def unique_list(seq, hashfunc=None): seen = {} if not hashfunc: return [x for x in seq if x not in seen and not seen.__setitem__(x, True)] else: return [x for x in seq if hashfunc(x) not in seen and not seen.__setitem__(hashfunc(x), True)] class UniqueAppender(object): """Appends items to a collection ensuring uniqueness. Additional appends() of the same object are ignored. Membership is determined by identity (``is a``) not equality (``==``). """ def __init__(self, data, via=None): self.data = data self._unique = {} if via: self._data_appender = getattr(data, via) elif hasattr(data, 'append'): self._data_appender = data.append elif hasattr(data, 'add'): self._data_appender = data.add def append(self, item): id_ = id(item) if id_ not in self._unique: self._data_appender(item) self._unique[id_] = True def __iter__(self): return iter(self.data) def coerce_generator_arg(arg): if len(arg) == 1 and isinstance(arg[0], types.GeneratorType): return list(arg[0]) else: return arg def to_list(x, default=None): if x is None: return default if not isinstance(x, (list, tuple)): return [x] else: return x def to_set(x): if x is None: return set() if not isinstance(x, set): return set(to_list(x)) else: return x def to_column_set(x): if x is None: return column_set() if not isinstance(x, column_set): return column_set(to_list(x)) else: return x def update_copy(d, _new=None, **kw): """Copy the given dict and update with the given values.""" d = d.copy() if _new: d.update(_new) d.update(**kw) return d def flatten_iterator(x): """Given an iterator of which further sub-elements may also be iterators, flatten the sub-elements into a single iterator. """ for elem in x: if not isinstance(elem, str) and hasattr(elem, '__iter__'): for y in flatten_iterator(elem): yield y else: yield elem class LRUCache(dict): """Dictionary with 'squishy' removal of least recently used items. """ def __init__(self, capacity=100, threshold=.5): self.capacity = capacity self.threshold = threshold self._counter = 0 def _inc_counter(self): self._counter += 1 return self._counter def __getitem__(self, key): item = dict.__getitem__(self, key) item[2] = self._inc_counter() return item[1] def values(self): return [i[1] for i in dict.values(self)] def setdefault(self, key, value): if key in self: return self[key] else: self[key] = value return value def __setitem__(self, key, value): item = dict.get(self, key) if item is None: item = [key, value, self._inc_counter()] dict.__setitem__(self, key, item) else: item[1] = value self._manage_size() def _manage_size(self): while len(self) > self.capacity + self.capacity * self.threshold: by_counter = sorted(dict.values(self), key=operator.itemgetter(2), reverse=True) for item in by_counter[self.capacity:]: try: del self[item[0]] except KeyError: # if we couldn't find a key, most # likely some other thread broke in # on us. loop around and try again break class ScopedRegistry(object): """A Registry that can store one or multiple instances of a single class on the basis of a "scope" function. The object implements ``__call__`` as the "getter", so by calling ``myregistry()`` the contained object is returned for the current scope. :param createfunc: a callable that returns a new object to be placed in the registry :param scopefunc: a callable that will return a key to store/retrieve an object. """ def __init__(self, createfunc, scopefunc): """Construct a new :class:`.ScopedRegistry`. :param createfunc: A creation function that will generate a new value for the current scope, if none is present. :param scopefunc: A function that returns a hashable token representing the current scope (such as, current thread identifier). """ self.createfunc = createfunc self.scopefunc = scopefunc self.registry = {} def __call__(self): key = self.scopefunc() try: return self.registry[key] except KeyError: return self.registry.setdefault(key, self.createfunc()) def has(self): """Return True if an object is present in the current scope.""" return self.scopefunc() in self.registry def set(self, obj): """Set the value for the current scope.""" self.registry[self.scopefunc()] = obj def clear(self): """Clear the current scope, if any.""" try: del self.registry[self.scopefunc()] except KeyError: pass class ThreadLocalRegistry(ScopedRegistry): """A :class:`.ScopedRegistry` that uses a ``threading.local()`` variable for storage. """ def __init__(self, createfunc): self.createfunc = createfunc self.registry = threading.local() def __call__(self): try: return self.registry.value except AttributeError: val = self.registry.value = self.createfunc() return val def has(self): return hasattr(self.registry, "value") def set(self, obj): self.registry.value = obj def clear(self): try: del self.registry.value except AttributeError: pass def _iter_id(iterable): """Generator: ((id(o), o) for o in iterable).""" for item in iterable: yield id(item), item
apache-2.0
girving/tensorflow
tensorflow/contrib/cloud/python/ops/gcs_config_ops.py
23
6964
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """GCS file system configuration for TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os from tensorflow.contrib.cloud.python.ops import gen_gcs_config_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.training import training # @tf_export('contrib.cloud.BlockCacheParams') class BlockCacheParams(object): """BlockCacheParams is a struct used for configuring the GCS Block Cache.""" def __init__(self, block_size=None, max_bytes=None, max_staleness=None): self._block_size = block_size or 128 * 1024 * 1024 self._max_bytes = max_bytes or 2 * self._block_size self._max_staleness = max_staleness or 0 @property def block_size(self): return self._block_size @property def max_bytes(self): return self._max_bytes @property def max_staleness(self): return self._max_staleness # @tf_export('contrib.cloud.ConfigureGcsHook') class ConfigureGcsHook(training.SessionRunHook): """ConfigureGcsHook configures GCS when used with Estimator/TPUEstimator. Warning: GCS `credentials` may be transmitted over the network unencrypted. Please ensure that the network is trusted before using this function. For users running code entirely within Google Cloud, your data is protected by encryption in between data centers. For more information, please take a look at https://cloud.google.com/security/encryption-in-transit/. Example: ``` sess = tf.Session() refresh_token = raw_input("Refresh token: ") client_secret = raw_input("Client secret: ") client_id = "<REDACTED>" creds = { "client_id": client_id, "refresh_token": refresh_token, "client_secret": client_secret, "type": "authorized_user", } tf.contrib.cloud.configure_gcs(sess, credentials=creds) ``` """ def _verify_dictionary(self, creds_dict): if 'refresh_token' in creds_dict or 'private_key' in creds_dict: return True return False def __init__(self, credentials=None, block_cache=None): """Constructs a ConfigureGcsHook. Args: credentials: A json-formatted string. block_cache: A `BlockCacheParams` Raises: ValueError: If credentials is improperly formatted or block_cache is not a BlockCacheParams. """ if credentials is not None: if isinstance(credentials, str): try: data = json.loads(credentials) except ValueError as e: raise ValueError('credentials was not a well formed JSON string.', e) if not self._verify_dictionary(data): raise ValueError( 'credentials has neither a "refresh_token" nor a "private_key" ' 'field.') elif isinstance(credentials, dict): if not self._verify_dictionary(credentials): raise ValueError('credentials has neither a "refresh_token" nor a ' '"private_key" field.') credentials = json.dumps(credentials) else: raise ValueError('credentials is of an unknown type') self._credentials = credentials if block_cache and not isinstance(block_cache, BlockCacheParams): raise ValueError('block_cache must be an instance of BlockCacheParams.') self._block_cache = block_cache def begin(self): if self._credentials: self._credentials_placeholder = array_ops.placeholder(dtypes.string) self._credentials_op = gen_gcs_config_ops.gcs_configure_credentials( self._credentials_placeholder) else: self._credentials_op = None if self._block_cache: self._block_cache_op = gen_gcs_config_ops.gcs_configure_block_cache( max_cache_size=self._block_cache.max_bytes, block_size=self._block_cache.block_size, max_staleness=self._block_cache.max_staleness) else: self._block_cache_op = None def after_create_session(self, session, coord): del coord if self._credentials_op: session.run( self._credentials_op, feed_dict={self._credentials_placeholder: self._credentials}) if self._block_cache_op: session.run(self._block_cache_op) def configure_gcs(session, credentials=None, block_cache=None, device=None): """Configures the GCS file system for a given a session. Warning: GCS `credentials` may be transmitted over the network unencrypted. Please ensure that the network is trusted before using this function. For users running code entirely within Google Cloud, your data is protected by encryption in between data centers. For more information, please take a look at https://cloud.google.com/security/encryption-in-transit/. Args: session: A `tf.Session` session that should be used to configure the GCS file system. credentials: [Optional.] A JSON string block_cache: [Optional.] A BlockCacheParams to configure the block cache . device: [Optional.] The device to place the configure ops. """ def configure(credentials, block_cache): """Helper function to actually configure GCS.""" if credentials: if isinstance(credentials, dict): credentials = json.dumps(credentials) placeholder = array_ops.placeholder(dtypes.string) op = gen_gcs_config_ops.gcs_configure_credentials(placeholder) session.run(op, feed_dict={placeholder: credentials}) if block_cache: op = gen_gcs_config_ops.gcs_configure_block_cache( max_cache_size=block_cache.max_bytes, block_size=block_cache.block_size, max_staleness=block_cache.max_staleness) session.run(op) if device: with ops.device(device): return configure(credentials, block_cache) return configure(credentials, block_cache) def configure_colab_session(session): """ConfigureColabSession configures the GCS file system in Colab. Args: session: A `tf.Session` session. """ # Read from the application default credentials (adc). adc_filename = os.environ.get( 'GOOGLE_APPLICATION_CREDENTIALS', '/content/adc.json') with open(adc_filename) as f: data = json.load(f) configure_gcs(session, credentials=data)
apache-2.0
DiptoDas8/Biponi
lib/python2.7/site-packages/django/conf/locale/nn/formats.py
197
1810
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j. F Y' TIME_FORMAT = 'H:i' DATETIME_FORMAT = 'j. F Y H:i' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j. F' SHORT_DATE_FORMAT = 'd.m.Y' SHORT_DATETIME_FORMAT = 'd.m.Y H:i' FIRST_DAY_OF_WEEK = 1 # Monday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior # Kept ISO formats as they are in first position DATE_INPUT_FORMATS = ( '%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06' # '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006' # '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006' # '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006' ) DATETIME_INPUT_FORMATS = ( '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' '%Y-%m-%d', # '2006-10-25' '%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59' '%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200' '%d.%m.%Y %H:%M', # '25.10.2006 14:30' '%d.%m.%Y', # '25.10.2006' '%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59' '%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200' '%d.%m.%y %H:%M', # '25.10.06 14:30' '%d.%m.%y', # '25.10.06' ) DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '\xa0' # non-breaking space NUMBER_GROUPING = 3
mit
jgcaaprom/android_external_chromium_org
third_party/protobuf/python/google/protobuf/internal/generator_test.py
253
11418
#! /usr/bin/python # # Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # http://code.google.com/p/protobuf/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # TODO(robinson): Flesh this out considerably. We focused on reflection_test.py # first, since it's testing the subtler code, and since it provides decent # indirect testing of the protocol compiler output. """Unittest that directly tests the output of the pure-Python protocol compiler. See //google/protobuf/reflection_test.py for a test which further ensures that we can use Python protocol message objects as we expect. """ __author__ = 'robinson@google.com (Will Robinson)' import unittest from google.protobuf.internal import test_bad_identifiers_pb2 from google.protobuf import unittest_custom_options_pb2 from google.protobuf import unittest_import_pb2 from google.protobuf import unittest_import_public_pb2 from google.protobuf import unittest_mset_pb2 from google.protobuf import unittest_pb2 from google.protobuf import unittest_no_generic_services_pb2 from google.protobuf import service MAX_EXTENSION = 536870912 class GeneratorTest(unittest.TestCase): def testNestedMessageDescriptor(self): field_name = 'optional_nested_message' proto_type = unittest_pb2.TestAllTypes self.assertEqual( proto_type.NestedMessage.DESCRIPTOR, proto_type.DESCRIPTOR.fields_by_name[field_name].message_type) def testEnums(self): # We test only module-level enums here. # TODO(robinson): Examine descriptors directly to check # enum descriptor output. self.assertEqual(4, unittest_pb2.FOREIGN_FOO) self.assertEqual(5, unittest_pb2.FOREIGN_BAR) self.assertEqual(6, unittest_pb2.FOREIGN_BAZ) proto = unittest_pb2.TestAllTypes() self.assertEqual(1, proto.FOO) self.assertEqual(1, unittest_pb2.TestAllTypes.FOO) self.assertEqual(2, proto.BAR) self.assertEqual(2, unittest_pb2.TestAllTypes.BAR) self.assertEqual(3, proto.BAZ) self.assertEqual(3, unittest_pb2.TestAllTypes.BAZ) def testExtremeDefaultValues(self): message = unittest_pb2.TestExtremeDefaultValues() # Python pre-2.6 does not have isinf() or isnan() functions, so we have # to provide our own. def isnan(val): # NaN is never equal to itself. return val != val def isinf(val): # Infinity times zero equals NaN. return not isnan(val) and isnan(val * 0) self.assertTrue(isinf(message.inf_double)) self.assertTrue(message.inf_double > 0) self.assertTrue(isinf(message.neg_inf_double)) self.assertTrue(message.neg_inf_double < 0) self.assertTrue(isnan(message.nan_double)) self.assertTrue(isinf(message.inf_float)) self.assertTrue(message.inf_float > 0) self.assertTrue(isinf(message.neg_inf_float)) self.assertTrue(message.neg_inf_float < 0) self.assertTrue(isnan(message.nan_float)) self.assertEqual("? ? ?? ?? ??? ??/ ??-", message.cpp_trigraph) def testHasDefaultValues(self): desc = unittest_pb2.TestAllTypes.DESCRIPTOR expected_has_default_by_name = { 'optional_int32': False, 'repeated_int32': False, 'optional_nested_message': False, 'default_int32': True, } has_default_by_name = dict( [(f.name, f.has_default_value) for f in desc.fields if f.name in expected_has_default_by_name]) self.assertEqual(expected_has_default_by_name, has_default_by_name) def testContainingTypeBehaviorForExtensions(self): self.assertEqual(unittest_pb2.optional_int32_extension.containing_type, unittest_pb2.TestAllExtensions.DESCRIPTOR) self.assertEqual(unittest_pb2.TestRequired.single.containing_type, unittest_pb2.TestAllExtensions.DESCRIPTOR) def testExtensionScope(self): self.assertEqual(unittest_pb2.optional_int32_extension.extension_scope, None) self.assertEqual(unittest_pb2.TestRequired.single.extension_scope, unittest_pb2.TestRequired.DESCRIPTOR) def testIsExtension(self): self.assertTrue(unittest_pb2.optional_int32_extension.is_extension) self.assertTrue(unittest_pb2.TestRequired.single.is_extension) message_descriptor = unittest_pb2.TestRequired.DESCRIPTOR non_extension_descriptor = message_descriptor.fields_by_name['a'] self.assertTrue(not non_extension_descriptor.is_extension) def testOptions(self): proto = unittest_mset_pb2.TestMessageSet() self.assertTrue(proto.DESCRIPTOR.GetOptions().message_set_wire_format) def testMessageWithCustomOptions(self): proto = unittest_custom_options_pb2.TestMessageWithCustomOptions() enum_options = proto.DESCRIPTOR.enum_types_by_name['AnEnum'].GetOptions() self.assertTrue(enum_options is not None) # TODO(gps): We really should test for the presense of the enum_opt1 # extension and for its value to be set to -789. def testNestedTypes(self): self.assertEquals( set(unittest_pb2.TestAllTypes.DESCRIPTOR.nested_types), set([ unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR, unittest_pb2.TestAllTypes.OptionalGroup.DESCRIPTOR, unittest_pb2.TestAllTypes.RepeatedGroup.DESCRIPTOR, ])) self.assertEqual(unittest_pb2.TestEmptyMessage.DESCRIPTOR.nested_types, []) self.assertEqual( unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR.nested_types, []) def testContainingType(self): self.assertTrue( unittest_pb2.TestEmptyMessage.DESCRIPTOR.containing_type is None) self.assertTrue( unittest_pb2.TestAllTypes.DESCRIPTOR.containing_type is None) self.assertEqual( unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR.containing_type, unittest_pb2.TestAllTypes.DESCRIPTOR) self.assertEqual( unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR.containing_type, unittest_pb2.TestAllTypes.DESCRIPTOR) self.assertEqual( unittest_pb2.TestAllTypes.RepeatedGroup.DESCRIPTOR.containing_type, unittest_pb2.TestAllTypes.DESCRIPTOR) def testContainingTypeInEnumDescriptor(self): self.assertTrue(unittest_pb2._FOREIGNENUM.containing_type is None) self.assertEqual(unittest_pb2._TESTALLTYPES_NESTEDENUM.containing_type, unittest_pb2.TestAllTypes.DESCRIPTOR) def testPackage(self): self.assertEqual( unittest_pb2.TestAllTypes.DESCRIPTOR.file.package, 'protobuf_unittest') desc = unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR self.assertEqual(desc.file.package, 'protobuf_unittest') self.assertEqual( unittest_import_pb2.ImportMessage.DESCRIPTOR.file.package, 'protobuf_unittest_import') self.assertEqual( unittest_pb2._FOREIGNENUM.file.package, 'protobuf_unittest') self.assertEqual( unittest_pb2._TESTALLTYPES_NESTEDENUM.file.package, 'protobuf_unittest') self.assertEqual( unittest_import_pb2._IMPORTENUM.file.package, 'protobuf_unittest_import') def testExtensionRange(self): self.assertEqual( unittest_pb2.TestAllTypes.DESCRIPTOR.extension_ranges, []) self.assertEqual( unittest_pb2.TestAllExtensions.DESCRIPTOR.extension_ranges, [(1, MAX_EXTENSION)]) self.assertEqual( unittest_pb2.TestMultipleExtensionRanges.DESCRIPTOR.extension_ranges, [(42, 43), (4143, 4244), (65536, MAX_EXTENSION)]) def testFileDescriptor(self): self.assertEqual(unittest_pb2.DESCRIPTOR.name, 'google/protobuf/unittest.proto') self.assertEqual(unittest_pb2.DESCRIPTOR.package, 'protobuf_unittest') self.assertFalse(unittest_pb2.DESCRIPTOR.serialized_pb is None) def testNoGenericServices(self): self.assertTrue(hasattr(unittest_no_generic_services_pb2, "TestMessage")) self.assertTrue(hasattr(unittest_no_generic_services_pb2, "FOO")) self.assertTrue(hasattr(unittest_no_generic_services_pb2, "test_extension")) # Make sure unittest_no_generic_services_pb2 has no services subclassing # Proto2 Service class. if hasattr(unittest_no_generic_services_pb2, "TestService"): self.assertFalse(issubclass(unittest_no_generic_services_pb2.TestService, service.Service)) def testMessageTypesByName(self): file_type = unittest_pb2.DESCRIPTOR self.assertEqual( unittest_pb2._TESTALLTYPES, file_type.message_types_by_name[unittest_pb2._TESTALLTYPES.name]) # Nested messages shouldn't be included in the message_types_by_name # dictionary (like in the C++ API). self.assertFalse( unittest_pb2._TESTALLTYPES_NESTEDMESSAGE.name in file_type.message_types_by_name) def testPublicImports(self): # Test public imports as embedded message. all_type_proto = unittest_pb2.TestAllTypes() self.assertEqual(0, all_type_proto.optional_public_import_message.e) # PublicImportMessage is actually defined in unittest_import_public_pb2 # module, and is public imported by unittest_import_pb2 module. public_import_proto = unittest_import_pb2.PublicImportMessage() self.assertEqual(0, public_import_proto.e) self.assertTrue(unittest_import_public_pb2.PublicImportMessage is unittest_import_pb2.PublicImportMessage) def testBadIdentifiers(self): # We're just testing that the code was imported without problems. message = test_bad_identifiers_pb2.TestBadIdentifiers() self.assertEqual(message.Extensions[test_bad_identifiers_pb2.message], "foo") self.assertEqual(message.Extensions[test_bad_identifiers_pb2.descriptor], "bar") self.assertEqual(message.Extensions[test_bad_identifiers_pb2.reflection], "baz") self.assertEqual(message.Extensions[test_bad_identifiers_pb2.service], "qux") if __name__ == '__main__': unittest.main()
bsd-3-clause
randyzingle/tools
kub/services/archive/cdk/python/sample-app/.env/lib/python3.6/site-packages/pip/_internal/commands/configuration.py
10
7226
# The following comment should be removed at some point in the future. # mypy: disallow-untyped-defs=False import logging import os import subprocess from pip._internal.cli.base_command import Command from pip._internal.cli.status_codes import ERROR, SUCCESS from pip._internal.configuration import ( Configuration, get_configuration_files, kinds, ) from pip._internal.exceptions import PipError from pip._internal.utils.misc import get_prog, write_output logger = logging.getLogger(__name__) class ConfigurationCommand(Command): """Manage local and global configuration. Subcommands: list: List the active configuration (or from the file specified) edit: Edit the configuration file in an editor get: Get the value associated with name set: Set the name=value unset: Unset the value associated with name If none of --user, --global and --site are passed, a virtual environment configuration file is used if one is active and the file exists. Otherwise, all modifications happen on the to the user file by default. """ ignore_require_venv = True usage = """ %prog [<file-option>] list %prog [<file-option>] [--editor <editor-path>] edit %prog [<file-option>] get name %prog [<file-option>] set name value %prog [<file-option>] unset name """ def __init__(self, *args, **kwargs): super(ConfigurationCommand, self).__init__(*args, **kwargs) self.configuration = None self.cmd_opts.add_option( '--editor', dest='editor', action='store', default=None, help=( 'Editor to use to edit the file. Uses VISUAL or EDITOR ' 'environment variables if not provided.' ) ) self.cmd_opts.add_option( '--global', dest='global_file', action='store_true', default=False, help='Use the system-wide configuration file only' ) self.cmd_opts.add_option( '--user', dest='user_file', action='store_true', default=False, help='Use the user configuration file only' ) self.cmd_opts.add_option( '--site', dest='site_file', action='store_true', default=False, help='Use the current environment configuration file only' ) self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): handlers = { "list": self.list_values, "edit": self.open_in_editor, "get": self.get_name, "set": self.set_name_value, "unset": self.unset_name } # Determine action if not args or args[0] not in handlers: logger.error("Need an action ({}) to perform.".format( ", ".join(sorted(handlers))) ) return ERROR action = args[0] # Determine which configuration files are to be loaded # Depends on whether the command is modifying. try: load_only = self._determine_file( options, need_value=(action in ["get", "set", "unset", "edit"]) ) except PipError as e: logger.error(e.args[0]) return ERROR # Load a new configuration self.configuration = Configuration( isolated=options.isolated_mode, load_only=load_only ) self.configuration.load() # Error handling happens here, not in the action-handlers. try: handlers[action](options, args[1:]) except PipError as e: logger.error(e.args[0]) return ERROR return SUCCESS def _determine_file(self, options, need_value): file_options = [key for key, value in ( (kinds.USER, options.user_file), (kinds.GLOBAL, options.global_file), (kinds.SITE, options.site_file), ) if value] if not file_options: if not need_value: return None # Default to user, unless there's a site file. elif any( os.path.exists(site_config_file) for site_config_file in get_configuration_files()[kinds.SITE] ): return kinds.SITE else: return kinds.USER elif len(file_options) == 1: return file_options[0] raise PipError( "Need exactly one file to operate upon " "(--user, --site, --global) to perform." ) def list_values(self, options, args): self._get_n_args(args, "list", n=0) for key, value in sorted(self.configuration.items()): write_output("%s=%r", key, value) def get_name(self, options, args): key = self._get_n_args(args, "get [name]", n=1) value = self.configuration.get_value(key) write_output("%s", value) def set_name_value(self, options, args): key, value = self._get_n_args(args, "set [name] [value]", n=2) self.configuration.set_value(key, value) self._save_configuration() def unset_name(self, options, args): key = self._get_n_args(args, "unset [name]", n=1) self.configuration.unset_value(key) self._save_configuration() def open_in_editor(self, options, args): editor = self._determine_editor(options) fname = self.configuration.get_file_to_edit() if fname is None: raise PipError("Could not determine appropriate file.") try: subprocess.check_call([editor, fname]) except subprocess.CalledProcessError as e: raise PipError( "Editor Subprocess exited with exit code {}" .format(e.returncode) ) def _get_n_args(self, args, example, n): """Helper to make sure the command got the right number of arguments """ if len(args) != n: msg = ( 'Got unexpected number of arguments, expected {}. ' '(example: "{} config {}")' ).format(n, get_prog(), example) raise PipError(msg) if n == 1: return args[0] else: return args def _save_configuration(self): # We successfully ran a modifying command. Need to save the # configuration. try: self.configuration.save() except Exception: logger.error( "Unable to save configuration. Please report this as a bug.", exc_info=1 ) raise PipError("Internal Error.") def _determine_editor(self, options): if options.editor is not None: return options.editor elif "VISUAL" in os.environ: return os.environ["VISUAL"] elif "EDITOR" in os.environ: return os.environ["EDITOR"] else: raise PipError("Could not determine editor to use.")
apache-2.0
dennisss/sympy
sympy/physics/quantum/tests/test_qexpr.py
120
1457
from sympy import Symbol, Integer from sympy.physics.quantum.qexpr import QExpr, _qsympify_sequence from sympy.physics.quantum.hilbert import HilbertSpace from sympy.core.containers import Tuple x = Symbol('x') y = Symbol('y') def test_qexpr_new(): q = QExpr(0) assert q.label == (0,) assert q.hilbert_space == HilbertSpace() assert q.is_commutative is False q = QExpr(0, 1) assert q.label == (Integer(0), Integer(1)) q = QExpr._new_rawargs(HilbertSpace(), Integer(0), Integer(1)) assert q.label == (Integer(0), Integer(1)) assert q.hilbert_space == HilbertSpace() def test_qexpr_commutative(): q1 = QExpr(x) q2 = QExpr(y) assert q1.is_commutative is False assert q2.is_commutative is False assert q1*q2 != q2*q1 q = QExpr._new_rawargs(0, 1, HilbertSpace()) assert q.is_commutative is False def test_qexpr_commutative_free_symbols(): q1 = QExpr(x) assert q1.free_symbols.pop().is_commutative is False q2 = QExpr('q2') assert q2.free_symbols.pop().is_commutative is False def test_qexpr_subs(): q1 = QExpr(x, y) assert q1.subs(x, y) == QExpr(y, y) assert q1.subs({x: 1, y: 2}) == QExpr(1, 2) def test_qsympify(): assert _qsympify_sequence([[1, 2], [1, 3]]) == (Tuple(1, 2), Tuple(1, 3)) assert _qsympify_sequence(([1, 2, [3, 4, [2, ]], 1], 3)) == \ (Tuple(1, 2, Tuple(3, 4, Tuple(2,)), 1), 3) assert _qsympify_sequence((1,)) == (1,)
bsd-3-clause
Altaf-Mahdi/i9505
scripts/build-all.py
1182
9486
#! /usr/bin/env python # Copyright (c) 2009-2011, The Linux Foundation. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of The Linux Foundation nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Build the kernel for all targets using the Android build environment. # # TODO: Accept arguments to indicate what to build. import glob from optparse import OptionParser import subprocess import os import os.path import shutil import sys version = 'build-all.py, version 0.01' build_dir = '../all-kernels' make_command = ["vmlinux", "modules"] make_env = os.environ make_env.update({ 'ARCH': 'arm', 'CROSS_COMPILE': 'arm-none-linux-gnueabi-', 'KCONFIG_NOTIMESTAMP': 'true' }) all_options = {} def error(msg): sys.stderr.write("error: %s\n" % msg) def fail(msg): """Fail with a user-printed message""" error(msg) sys.exit(1) def check_kernel(): """Ensure that PWD is a kernel directory""" if (not os.path.isfile('MAINTAINERS') or not os.path.isfile('arch/arm/mach-msm/Kconfig')): fail("This doesn't seem to be an MSM kernel dir") def check_build(): """Ensure that the build directory is present.""" if not os.path.isdir(build_dir): try: os.makedirs(build_dir) except OSError as exc: if exc.errno == errno.EEXIST: pass else: raise def update_config(file, str): print 'Updating %s with \'%s\'\n' % (file, str) defconfig = open(file, 'a') defconfig.write(str + '\n') defconfig.close() def scan_configs(): """Get the full list of defconfigs appropriate for this tree.""" names = {} for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'): names[os.path.basename(n)[:-10]] = n for n in glob.glob('arch/arm/configs/qsd*_defconfig'): names[os.path.basename(n)[:-10]] = n for n in glob.glob('arch/arm/configs/apq*_defconfig'): names[os.path.basename(n)[:-10]] = n return names class Builder: def __init__(self, logname): self.logname = logname self.fd = open(logname, 'w') def run(self, args): devnull = open('/dev/null', 'r') proc = subprocess.Popen(args, stdin=devnull, env=make_env, bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) count = 0 # for line in proc.stdout: rawfd = proc.stdout.fileno() while True: line = os.read(rawfd, 1024) if not line: break self.fd.write(line) self.fd.flush() if all_options.verbose: sys.stdout.write(line) sys.stdout.flush() else: for i in range(line.count('\n')): count += 1 if count == 64: count = 0 print sys.stdout.write('.') sys.stdout.flush() print result = proc.wait() self.fd.close() return result failed_targets = [] def build(target): dest_dir = os.path.join(build_dir, target) log_name = '%s/log-%s.log' % (build_dir, target) print 'Building %s in %s log %s' % (target, dest_dir, log_name) if not os.path.isdir(dest_dir): os.mkdir(dest_dir) defconfig = 'arch/arm/configs/%s_defconfig' % target dotconfig = '%s/.config' % dest_dir savedefconfig = '%s/defconfig' % dest_dir shutil.copyfile(defconfig, dotconfig) devnull = open('/dev/null', 'r') subprocess.check_call(['make', 'O=%s' % dest_dir, '%s_defconfig' % target], env=make_env, stdin=devnull) devnull.close() if not all_options.updateconfigs: build = Builder(log_name) result = build.run(['make', 'O=%s' % dest_dir] + make_command) if result != 0: if all_options.keep_going: failed_targets.append(target) fail_or_error = error else: fail_or_error = fail fail_or_error("Failed to build %s, see %s" % (target, build.logname)) # Copy the defconfig back. if all_options.configs or all_options.updateconfigs: devnull = open('/dev/null', 'r') subprocess.check_call(['make', 'O=%s' % dest_dir, 'savedefconfig'], env=make_env, stdin=devnull) devnull.close() shutil.copyfile(savedefconfig, defconfig) def build_many(allconf, targets): print "Building %d target(s)" % len(targets) for target in targets: if all_options.updateconfigs: update_config(allconf[target], all_options.updateconfigs) build(target) if failed_targets: fail('\n '.join(["Failed targets:"] + [target for target in failed_targets])) def main(): global make_command check_kernel() check_build() configs = scan_configs() usage = (""" %prog [options] all -- Build all targets %prog [options] target target ... -- List specific targets %prog [options] perf -- Build all perf targets %prog [options] noperf -- Build all non-perf targets""") parser = OptionParser(usage=usage, version=version) parser.add_option('--configs', action='store_true', dest='configs', help="Copy configs back into tree") parser.add_option('--list', action='store_true', dest='list', help='List available targets') parser.add_option('-v', '--verbose', action='store_true', dest='verbose', help='Output to stdout in addition to log file') parser.add_option('--oldconfig', action='store_true', dest='oldconfig', help='Only process "make oldconfig"') parser.add_option('--updateconfigs', dest='updateconfigs', help="Update defconfigs with provided option setting, " "e.g. --updateconfigs=\'CONFIG_USE_THING=y\'") parser.add_option('-j', '--jobs', type='int', dest="jobs", help="Number of simultaneous jobs") parser.add_option('-l', '--load-average', type='int', dest='load_average', help="Don't start multiple jobs unless load is below LOAD_AVERAGE") parser.add_option('-k', '--keep-going', action='store_true', dest='keep_going', default=False, help="Keep building other targets if a target fails") parser.add_option('-m', '--make-target', action='append', help='Build the indicated make target (default: %s)' % ' '.join(make_command)) (options, args) = parser.parse_args() global all_options all_options = options if options.list: print "Available targets:" for target in configs.keys(): print " %s" % target sys.exit(0) if options.oldconfig: make_command = ["oldconfig"] elif options.make_target: make_command = options.make_target if options.jobs: make_command.append("-j%d" % options.jobs) if options.load_average: make_command.append("-l%d" % options.load_average) if args == ['all']: build_many(configs, configs.keys()) elif args == ['perf']: targets = [] for t in configs.keys(): if "perf" in t: targets.append(t) build_many(configs, targets) elif args == ['noperf']: targets = [] for t in configs.keys(): if "perf" not in t: targets.append(t) build_many(configs, targets) elif len(args) > 0: targets = [] for t in args: if t not in configs.keys(): parser.error("Target '%s' not one of %s" % (t, configs.keys())) targets.append(t) build_many(configs, targets) else: parser.error("Must specify a target to build, or 'all'") if __name__ == "__main__": main()
gpl-2.0
samuelhavron/heroku-buildpack-python
Python-3.4.3/Lib/test/test_ensurepip.py
75
11305
import unittest import unittest.mock import test.support import os import os.path import contextlib import sys import ensurepip import ensurepip._uninstall # pip currently requires ssl support, so we ensure we handle # it being missing (http://bugs.python.org/issue19744) ensurepip_no_ssl = test.support.import_fresh_module("ensurepip", blocked=["ssl"]) try: import ssl except ImportError: def requires_usable_pip(f): deco = unittest.skip(ensurepip._MISSING_SSL_MESSAGE) return deco(f) else: def requires_usable_pip(f): return f class TestEnsurePipVersion(unittest.TestCase): def test_returns_version(self): self.assertEqual(ensurepip._PIP_VERSION, ensurepip.version()) class EnsurepipMixin: def setUp(self): run_pip_patch = unittest.mock.patch("ensurepip._run_pip") self.run_pip = run_pip_patch.start() self.addCleanup(run_pip_patch.stop) # Avoid side effects on the actual os module real_devnull = os.devnull os_patch = unittest.mock.patch("ensurepip.os") patched_os = os_patch.start() self.addCleanup(os_patch.stop) patched_os.devnull = real_devnull patched_os.path = os.path self.os_environ = patched_os.environ = os.environ.copy() class TestBootstrap(EnsurepipMixin, unittest.TestCase): @requires_usable_pip def test_basic_bootstrapping(self): ensurepip.bootstrap() self.run_pip.assert_called_once_with( [ "install", "--no-index", "--find-links", unittest.mock.ANY, "setuptools", "pip", ], unittest.mock.ANY, ) additional_paths = self.run_pip.call_args[0][1] self.assertEqual(len(additional_paths), 2) @requires_usable_pip def test_bootstrapping_with_root(self): ensurepip.bootstrap(root="/foo/bar/") self.run_pip.assert_called_once_with( [ "install", "--no-index", "--find-links", unittest.mock.ANY, "--root", "/foo/bar/", "setuptools", "pip", ], unittest.mock.ANY, ) @requires_usable_pip def test_bootstrapping_with_user(self): ensurepip.bootstrap(user=True) self.run_pip.assert_called_once_with( [ "install", "--no-index", "--find-links", unittest.mock.ANY, "--user", "setuptools", "pip", ], unittest.mock.ANY, ) @requires_usable_pip def test_bootstrapping_with_upgrade(self): ensurepip.bootstrap(upgrade=True) self.run_pip.assert_called_once_with( [ "install", "--no-index", "--find-links", unittest.mock.ANY, "--upgrade", "setuptools", "pip", ], unittest.mock.ANY, ) @requires_usable_pip def test_bootstrapping_with_verbosity_1(self): ensurepip.bootstrap(verbosity=1) self.run_pip.assert_called_once_with( [ "install", "--no-index", "--find-links", unittest.mock.ANY, "-v", "setuptools", "pip", ], unittest.mock.ANY, ) @requires_usable_pip def test_bootstrapping_with_verbosity_2(self): ensurepip.bootstrap(verbosity=2) self.run_pip.assert_called_once_with( [ "install", "--no-index", "--find-links", unittest.mock.ANY, "-vv", "setuptools", "pip", ], unittest.mock.ANY, ) @requires_usable_pip def test_bootstrapping_with_verbosity_3(self): ensurepip.bootstrap(verbosity=3) self.run_pip.assert_called_once_with( [ "install", "--no-index", "--find-links", unittest.mock.ANY, "-vvv", "setuptools", "pip", ], unittest.mock.ANY, ) @requires_usable_pip def test_bootstrapping_with_regular_install(self): ensurepip.bootstrap() self.assertEqual(self.os_environ["ENSUREPIP_OPTIONS"], "install") @requires_usable_pip def test_bootstrapping_with_alt_install(self): ensurepip.bootstrap(altinstall=True) self.assertEqual(self.os_environ["ENSUREPIP_OPTIONS"], "altinstall") @requires_usable_pip def test_bootstrapping_with_default_pip(self): ensurepip.bootstrap(default_pip=True) self.assertNotIn("ENSUREPIP_OPTIONS", self.os_environ) def test_altinstall_default_pip_conflict(self): with self.assertRaises(ValueError): ensurepip.bootstrap(altinstall=True, default_pip=True) self.assertFalse(self.run_pip.called) @requires_usable_pip def test_pip_environment_variables_removed(self): # ensurepip deliberately ignores all pip environment variables # See http://bugs.python.org/issue19734 for details self.os_environ["PIP_THIS_SHOULD_GO_AWAY"] = "test fodder" ensurepip.bootstrap() self.assertNotIn("PIP_THIS_SHOULD_GO_AWAY", self.os_environ) @requires_usable_pip def test_pip_config_file_disabled(self): # ensurepip deliberately ignores the pip config file # See http://bugs.python.org/issue20053 for details ensurepip.bootstrap() self.assertEqual(self.os_environ["PIP_CONFIG_FILE"], os.devnull) @contextlib.contextmanager def fake_pip(version=ensurepip._PIP_VERSION): if version is None: pip = None else: class FakePip(): __version__ = version pip = FakePip() sentinel = object() orig_pip = sys.modules.get("pip", sentinel) sys.modules["pip"] = pip try: yield pip finally: if orig_pip is sentinel: del sys.modules["pip"] else: sys.modules["pip"] = orig_pip class TestUninstall(EnsurepipMixin, unittest.TestCase): def test_uninstall_skipped_when_not_installed(self): with fake_pip(None): ensurepip._uninstall_helper() self.assertFalse(self.run_pip.called) def test_uninstall_skipped_with_warning_for_wrong_version(self): with fake_pip("not a valid version"): with test.support.captured_stderr() as stderr: ensurepip._uninstall_helper() warning = stderr.getvalue().strip() self.assertIn("only uninstall a matching version", warning) self.assertFalse(self.run_pip.called) @requires_usable_pip def test_uninstall(self): with fake_pip(): ensurepip._uninstall_helper() self.run_pip.assert_called_once_with( ["uninstall", "-y", "pip", "setuptools"] ) @requires_usable_pip def test_uninstall_with_verbosity_1(self): with fake_pip(): ensurepip._uninstall_helper(verbosity=1) self.run_pip.assert_called_once_with( ["uninstall", "-y", "-v", "pip", "setuptools"] ) @requires_usable_pip def test_uninstall_with_verbosity_2(self): with fake_pip(): ensurepip._uninstall_helper(verbosity=2) self.run_pip.assert_called_once_with( ["uninstall", "-y", "-vv", "pip", "setuptools"] ) @requires_usable_pip def test_uninstall_with_verbosity_3(self): with fake_pip(): ensurepip._uninstall_helper(verbosity=3) self.run_pip.assert_called_once_with( ["uninstall", "-y", "-vvv", "pip", "setuptools"] ) @requires_usable_pip def test_pip_environment_variables_removed(self): # ensurepip deliberately ignores all pip environment variables # See http://bugs.python.org/issue19734 for details self.os_environ["PIP_THIS_SHOULD_GO_AWAY"] = "test fodder" with fake_pip(): ensurepip._uninstall_helper() self.assertNotIn("PIP_THIS_SHOULD_GO_AWAY", self.os_environ) @requires_usable_pip def test_pip_config_file_disabled(self): # ensurepip deliberately ignores the pip config file # See http://bugs.python.org/issue20053 for details with fake_pip(): ensurepip._uninstall_helper() self.assertEqual(self.os_environ["PIP_CONFIG_FILE"], os.devnull) class TestMissingSSL(EnsurepipMixin, unittest.TestCase): def setUp(self): sys.modules["ensurepip"] = ensurepip_no_ssl @self.addCleanup def restore_module(): sys.modules["ensurepip"] = ensurepip super().setUp() def test_bootstrap_requires_ssl(self): self.os_environ["PIP_THIS_SHOULD_STAY"] = "test fodder" with self.assertRaisesRegex(RuntimeError, "requires SSL/TLS"): ensurepip_no_ssl.bootstrap() self.assertFalse(self.run_pip.called) self.assertIn("PIP_THIS_SHOULD_STAY", self.os_environ) def test_uninstall_requires_ssl(self): self.os_environ["PIP_THIS_SHOULD_STAY"] = "test fodder" with self.assertRaisesRegex(RuntimeError, "requires SSL/TLS"): with fake_pip(): ensurepip_no_ssl._uninstall_helper() self.assertFalse(self.run_pip.called) self.assertIn("PIP_THIS_SHOULD_STAY", self.os_environ) def test_main_exits_early_with_warning(self): with test.support.captured_stderr() as stderr: ensurepip_no_ssl._main(["--version"]) warning = stderr.getvalue().strip() self.assertTrue(warning.endswith("requires SSL/TLS"), warning) self.assertFalse(self.run_pip.called) # Basic testing of the main functions and their argument parsing EXPECTED_VERSION_OUTPUT = "pip " + ensurepip._PIP_VERSION class TestBootstrappingMainFunction(EnsurepipMixin, unittest.TestCase): @requires_usable_pip def test_bootstrap_version(self): with test.support.captured_stdout() as stdout: with self.assertRaises(SystemExit): ensurepip._main(["--version"]) result = stdout.getvalue().strip() self.assertEqual(result, EXPECTED_VERSION_OUTPUT) self.assertFalse(self.run_pip.called) @requires_usable_pip def test_basic_bootstrapping(self): ensurepip._main([]) self.run_pip.assert_called_once_with( [ "install", "--no-index", "--find-links", unittest.mock.ANY, "setuptools", "pip", ], unittest.mock.ANY, ) additional_paths = self.run_pip.call_args[0][1] self.assertEqual(len(additional_paths), 2) class TestUninstallationMainFunction(EnsurepipMixin, unittest.TestCase): def test_uninstall_version(self): with test.support.captured_stdout() as stdout: with self.assertRaises(SystemExit): ensurepip._uninstall._main(["--version"]) result = stdout.getvalue().strip() self.assertEqual(result, EXPECTED_VERSION_OUTPUT) self.assertFalse(self.run_pip.called) @requires_usable_pip def test_basic_uninstall(self): with fake_pip(): ensurepip._uninstall._main([]) self.run_pip.assert_called_once_with( ["uninstall", "-y", "pip", "setuptools"] ) if __name__ == "__main__": test.support.run_unittest(__name__)
mit
bitglue/shinysdr
shinysdr/i/blocks.py
1
17157
# Copyright 2013, 2014, 2015, 2016, 2017 Kevin Reid <kpreid@switchb.org> # # This file is part of ShinySDR. # # ShinySDR is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ShinySDR is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ShinySDR. If not, see <http://www.gnu.org/licenses/>. """ GNU Radio flowgraph blocks for use by ShinySDR. This module is not an external API and not guaranteed to have a stable interface. """ from __future__ import absolute_import, division import math import os from zope.interface import Interface, implementer from gnuradio import gr from gnuradio import blocks from gnuradio.fft import fft_vcc, window as windows from shinysdr.filters import make_resampler from shinysdr.math import to_dB from shinysdr.signals import SignalType from shinysdr.types import BulkDataT, RangeT from shinysdr import units from shinysdr.values import ExportedState, LooseCell, StreamCell, exported_value, setter class RecursiveLockBlockMixin(object): """ For top blocks needing recursive locking and/or a notification to restart parts. """ __lock_count = 0 def _recursive_lock_hook(self): """To override.""" def _recursive_lock(self): # gnuradio uses a non-recursive lock, which is not adequate for our purposes because we want to make changes locally or globally without worrying about having a single lock entry point if self.__lock_count == 0: self.lock() self._recursive_lock_hook() self.__lock_count += 1 def _recursive_unlock(self): self.__lock_count -= 1 if self.__lock_count == 0: self.unlock() class Context(object): """ Client facet for RecursiveLockBlockMixin. """ def __init__(self, top): self.__top = top def lock(self): self.__top._recursive_lock() def unlock(self): self.__top._recursive_unlock() # TODO: This function is used by plugins. Put it in an appropriate module. def make_sink_to_process_stdin(process, itemsize=gr.sizeof_char): """Given a twisted Process, connect a sink to its stdin.""" fd_owned_by_twisted = process.pipes[0].fileno() # TODO: More public way to do this? fd_owned_by_sink = os.dup(fd_owned_by_twisted) process.closeStdin() return blocks.file_descriptor_sink(itemsize, fd_owned_by_sink) class _NoContext(object): def lock(self): pass def unlock(self): pass class MessageDistributorSink(gr.hier_block2): """Like gnuradio.blocks.message_sink, but copies its messages to a dynamic set of queues and saves the most recent item. Never blocks.""" def __init__(self, itemsize, context, migrate=None, notify=None): gr.hier_block2.__init__( self, type(self).__name__, gr.io_signature(1, 1, itemsize), gr.io_signature(0, 0, 0), ) self.__itemsize = itemsize self.__context = _NoContext() self.__peek = blocks.probe_signal_vb(itemsize) self.__subscriptions = {} self.__notify = None self.connect(self, self.__peek) if migrate is not None: assert isinstance(migrate, MessageDistributorSink) # sanity check for queue in migrate.__subscriptions.keys(): migrate.unsubscribe(queue) self.subscribe(queue) # set now, not earlier, so as not to trigger anything while migrating self.__context = context self.__notify = notify def get(self): return self.__peek.level() def get_subscription_count(self): return len(self.__subscriptions) def subscribe(self, queue): assert queue not in self.__subscriptions sink = blocks.message_sink(self.__itemsize, queue, True) self.__subscriptions[queue] = sink try: self.__context.lock() self.connect(self, sink) finally: self.__context.unlock() if self.__notify: self.__notify() def unsubscribe(self, queue): sink = self.__subscriptions[queue] del self.__subscriptions[queue] try: self.__context.lock() self.disconnect(self, sink) finally: self.__context.unlock() if self.__notify: self.__notify() _maximum_fft_rate = 500 class _OverlappedStreamToVector(gr.hier_block2): """ Block which is like gnuradio.blocks.stream_to_vector, but generates vectors which are overlapping segments of the input, multiplying the overall number of samples by a specified factor. """ # A disadvantage of our implementation strategy is that, because blocks.interleave does, we will always generate output vectors in bursts (of size = factor) rather than smoothly. def __init__(self, size, factor, itemsize=gr.sizeof_gr_complex): """ size: (int) vector size (FFT size) of next block factor: (int) output will have this many more samples than input If size is not divisible by factor, then the output will necessarily have jitter. """ size = int(size) factor = int(factor) # assert size % factor == 0 offset = size // factor gr.hier_block2.__init__( self, type(self).__name__, gr.io_signature(1, 1, itemsize), gr.io_signature(1, 1, itemsize * size), ) if factor == 1: # No duplication needed; simplify flowgraph self.connect(self, blocks.stream_to_vector(itemsize, size), self) else: interleave = blocks.interleave(itemsize * size) self.connect(interleave, self) for i in xrange(0, factor): self.connect( self, blocks.delay(itemsize, (factor - 1 - i) * offset), blocks.stream_to_vector(itemsize, size), (interleave, i)) class IMonitor(Interface): """Marker interface for client UI. Note that this is also implemented on the client for the local audio monitor. """ @implementer(IMonitor) class MonitorSink(gr.hier_block2, ExportedState): """Convenience wrapper around all the bits and pieces to display the signal spectrum to the client. The units of the FFT output are dB power/Hz (power spectral density) relative to unit amplitude (i.e. dBFS assuming the source clips at +/-1). Note this is different from the standard logpwrfft result of power _per bin_, which would be undesirably dependent on the sample rate and bin size. """ def __init__(self, signal_type=None, enable_scope=False, freq_resolution=4096, time_length=2048, frame_rate=30.0, input_center_freq=0.0, paused=False, context=None): assert isinstance(signal_type, SignalType) assert context is not None itemsize = signal_type.get_itemsize() gr.hier_block2.__init__( self, type(self).__name__, gr.io_signature(1, 1, itemsize), gr.io_signature(0, 0, 0), ) # constant parameters self.__power_offset = 40 # TODO autoset or controllable self.__itemsize = itemsize self.__context = context self.__enable_scope = enable_scope # settable parameters self.__signal_type = signal_type self.__freq_resolution = int(freq_resolution) self.__time_length = int(time_length) self.__frame_rate = float(frame_rate) self.__input_center_freq = float(input_center_freq) self.__paused = bool(paused) self.__interested_cell = LooseCell(key='interested', type=bool, value=False, writable=False, persists=False) # stuff created by __do_connect self.__gate = None self.__fft_sink = None self.__scope_sink = None self.__frame_dec = None self.__frame_rate_to_decimation_conversion = 0.0 self.__do_connect() def state_def(self): for d in super(MonitorSink, self).state_def(): yield d # TODO make this possible to be decorator style yield 'fft', StreamCell(self, 'fft', type=BulkDataT(array_format='b', info_format='dff'), label='Spectrum') yield 'scope', StreamCell(self, 'scope', type=BulkDataT(array_format='f', info_format='d'), label='Scope') def __do_connect(self): if self.__signal_type.is_analytic(): input_length = self.__freq_resolution output_length = self.__freq_resolution self.__after_fft = None else: # use vector_to_streams to cut the output in half and discard the redundant part input_length = self.__freq_resolution * 2 output_length = self.__freq_resolution self.__after_fft = blocks.vector_to_streams(itemsize=output_length * gr.sizeof_float, nstreams=2) sample_rate = self.__signal_type.get_sample_rate() overlap_factor = int(math.ceil(_maximum_fft_rate * input_length / sample_rate)) # sanity limit -- OverlapGimmick is not free overlap_factor = min(16, overlap_factor) self.__frame_rate_to_decimation_conversion = sample_rate * overlap_factor / input_length self.__gate = blocks.copy(gr.sizeof_gr_complex) self.__gate.set_enabled(not self.__paused) overlapper = _OverlappedStreamToVector( size=input_length, factor=overlap_factor, itemsize=self.__itemsize) self.__frame_dec = blocks.keep_one_in_n( itemsize=gr.sizeof_gr_complex * input_length, n=int(round(self.__frame_rate_to_decimation_conversion / self.__frame_rate))) # the actual FFT logic, which is similar to GR's logpwrfft_c window = windows.blackmanharris(input_length) window_power = sum(x * x for x in window) # TODO: use fft_vfc when applicable fft_block = fft_vcc( fft_size=input_length, forward=True, window=window) mag_squared = blocks.complex_to_mag_squared(input_length) logarithmizer = blocks.nlog10_ff( n=10, # the "deci" in "decibel" vlen=input_length, k=( -to_dB(window_power) + # compensate for window -to_dB(sample_rate) + # convert from power-per-sample to power-per-Hz self.__power_offset # offset for packing into bytes )) # It would make slightly more sense to use unsigned chars, but blocks.float_to_uchar does not support vlen. self.__fft_converter = blocks.float_to_char(vlen=self.__freq_resolution, scale=1.0) self.__fft_sink = MessageDistributorSink( itemsize=output_length * gr.sizeof_char, context=self.__context, migrate=self.__fft_sink, notify=self.__update_interested) self.__scope_sink = MessageDistributorSink( itemsize=self.__time_length * gr.sizeof_gr_complex, context=self.__context, migrate=self.__scope_sink, notify=self.__update_interested) scope_chunker = blocks.stream_to_vector_decimator( item_size=gr.sizeof_gr_complex, sample_rate=sample_rate, vec_rate=self.__frame_rate, # TODO doesn't need to be coupled vec_len=self.__time_length) # connect everything self.__context.lock() try: self.disconnect_all() self.connect( self, self.__gate, overlapper, self.__frame_dec, fft_block, mag_squared, logarithmizer) if self.__after_fft is not None: self.connect(logarithmizer, self.__after_fft) self.connect(self.__after_fft, self.__fft_converter, self.__fft_sink) self.connect((self.__after_fft, 1), blocks.null_sink(gr.sizeof_float * self.__freq_resolution)) else: self.connect(logarithmizer, self.__fft_converter, self.__fft_sink) if self.__enable_scope: self.connect( self.__gate, scope_chunker, self.__scope_sink) finally: self.__context.unlock() # non-exported def get_interested_cell(self): return self.__interested_cell def __update_interested(self): self.__interested_cell.set_internal(not self.__paused and ( self.__fft_sink.get_subscription_count() > 0 or self.__scope_sink.get_subscription_count() > 0)) @exported_value(type=SignalType, changes='explicit') def get_signal_type(self): return self.__signal_type # non-exported def set_signal_type(self, value): # TODO: don't rebuild if the rate did not change and the spectrum-sidedness of the type did not change assert self.__signal_type.compatible_items(value) self.__signal_type = value self.__do_connect() self.state_changed('signal_type') # non-exported def set_input_center_freq(self, value): self.__input_center_freq = float(value) @exported_value( type=RangeT([(2, 4096)], logarithmic=True, integer=True), changes='this_setter', label='Resolution', description='Frequency domain resolution; number of FFT bins.') def get_freq_resolution(self): return self.__freq_resolution @setter def set_freq_resolution(self, freq_resolution): self.__freq_resolution = freq_resolution self.__do_connect() @exported_value(type=RangeT([(1, 4096)], logarithmic=True, integer=True), changes='this_setter') def get_time_length(self): return self.__time_length @setter def set_time_length(self, value): self.__time_length = value self.__do_connect() @exported_value( type=RangeT([(1, _maximum_fft_rate)], unit=units.Hz, logarithmic=True, integer=False), changes='this_setter', label='Rate', description='Number of FFT frames per second.') def get_frame_rate(self): return self.__frame_rate @setter def set_frame_rate(self, value): n = int(round(self.__frame_rate_to_decimation_conversion / value)) self.__frame_dec.set_n(n) # derive effective value by calculating inverse self.__frame_rate = self.__frame_rate_to_decimation_conversion / n @exported_value(type=bool, changes='this_setter', label='Pause') def get_paused(self): return self.__paused @setter def set_paused(self, value): self.__paused = value self.__gate.set_enabled(not value) self.__update_interested() # exported via state_def def get_fft_info(self): return (self.__input_center_freq, self.__signal_type.get_sample_rate(), self.__power_offset) def get_fft_distributor(self): return self.__fft_sink # exported via state_def def get_scope_info(self): return (self.__signal_type.get_sample_rate(),) def get_scope_distributor(self): return self.__scope_sink # this is in shinysdr.i.blocks rather than shinysdr.filters because I don't consider it public (yet?) class VectorResampler(gr.hier_block2): def __init__(self, in_rate, out_rate, vlen, complex=False): # pylint: disable=redefined-builtin vitemsize = gr.sizeof_gr_complex if complex else gr.sizeof_float itemsize = vitemsize * vlen gr.hier_block2.__init__( self, type(self).__name__, gr.io_signature(1, 1, itemsize), gr.io_signature(1, 1, itemsize)) if vlen == 1: self.connect(self, make_resampler(in_rate, out_rate, complex=complex), self) else: splitter = blocks.vector_to_streams(vitemsize, vlen) joiner = blocks.streams_to_vector(vitemsize, vlen) self.connect(self, splitter) for ch in xrange(vlen): self.connect( (splitter, ch), make_resampler(in_rate, out_rate, complex=complex), (joiner, ch)) self.connect(joiner, self)
gpl-3.0
rlindner81/pyload
module/lib/simplejson/scanner.py
674
2560
"""JSON token scanner """ import re def _import_c_make_scanner(): try: from simplejson._speedups import make_scanner return make_scanner except ImportError: return None c_make_scanner = _import_c_make_scanner() __all__ = ['make_scanner'] NUMBER_RE = re.compile( r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?', (re.VERBOSE | re.MULTILINE | re.DOTALL)) def py_make_scanner(context): parse_object = context.parse_object parse_array = context.parse_array parse_string = context.parse_string match_number = NUMBER_RE.match encoding = context.encoding strict = context.strict parse_float = context.parse_float parse_int = context.parse_int parse_constant = context.parse_constant object_hook = context.object_hook object_pairs_hook = context.object_pairs_hook memo = context.memo def _scan_once(string, idx): try: nextchar = string[idx] except IndexError: raise StopIteration if nextchar == '"': return parse_string(string, idx + 1, encoding, strict) elif nextchar == '{': return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook, object_pairs_hook, memo) elif nextchar == '[': return parse_array((string, idx + 1), _scan_once) elif nextchar == 'n' and string[idx:idx + 4] == 'null': return None, idx + 4 elif nextchar == 't' and string[idx:idx + 4] == 'true': return True, idx + 4 elif nextchar == 'f' and string[idx:idx + 5] == 'false': return False, idx + 5 m = match_number(string, idx) if m is not None: integer, frac, exp = m.groups() if frac or exp: res = parse_float(integer + (frac or '') + (exp or '')) else: res = parse_int(integer) return res, m.end() elif nextchar == 'N' and string[idx:idx + 3] == 'NaN': return parse_constant('NaN'), idx + 3 elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity': return parse_constant('Infinity'), idx + 8 elif nextchar == '-' and string[idx:idx + 9] == '-Infinity': return parse_constant('-Infinity'), idx + 9 else: raise StopIteration def scan_once(string, idx): try: return _scan_once(string, idx) finally: memo.clear() return scan_once make_scanner = c_make_scanner or py_make_scanner
gpl-3.0