hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72cc8541c4c9a534a37b3d44f316d2620756960 | 973 | py | Python | nengo/copter.py | simondlevy/gym-copter | 7236769b7586b92026d4b47f12363258c84d9508 | [
"MIT"
] | 14 | 2019-11-03T05:17:46.000Z | 2022-02-26T05:37:32.000Z | nengo/copter.py | simondlevy/gym-copter | 7236769b7586b92026d4b47f12363258c84d9508 | [
"MIT"
] | 77 | 2020-05-17T01:56:29.000Z | 2021-06-19T02:46:52.000Z | nengo/copter.py | simondlevy/gym-copter | 7236769b7586b92026d4b47f12363258c84d9508 | [
"MIT"
] | 6 | 2020-01-01T07:22:15.000Z | 2021-05-11T17:45:33.000Z | '''
Quadcopter class for Nengo adaptive controller
Copyright (C) 2021 Xuan Choo, Simon D. Levy
MIT License
'''
import nengo
import gym
import numpy as np
from adaptive import run
class Copter:
def __init__(self, seed=None):
self.env = gym.make('gym_copter:Hover1D-v0')
self.reset(seed)
def reset(self, seed):
self.state = self.env.reset()
def step(self, u):
u = np.clip(u, 0, 1)
self.env.render()
z, dz, = self.state
# Negate for NED => ENU
z, dz = -z, -dz
print('%f | %+3.3f %+3.3f' % (u, z, dz))
self.state, _reward, _done, _info = self.env.step((u,))
return z, dz
def set_extra_force(self, force):
self.extra_mass = force
def generate_html(self, desired):
'''
Copter is simulated externally
'''
return None
with nengo.Network(seed=3) as model:
run(Copter, 'Copter', 'Position', 'Wind Force')
| 16.775862 | 63 | 0.572456 |
import nengo
import gym
import numpy as np
from adaptive import run
class Copter:
def __init__(self, seed=None):
self.env = gym.make('gym_copter:Hover1D-v0')
self.reset(seed)
def reset(self, seed):
self.state = self.env.reset()
def step(self, u):
u = np.clip(u, 0, 1)
self.env.render()
z, dz, = self.state
z, dz = -z, -dz
print('%f | %+3.3f %+3.3f' % (u, z, dz))
self.state, _reward, _done, _info = self.env.step((u,))
return z, dz
def set_extra_force(self, force):
self.extra_mass = force
def generate_html(self, desired):
return None
with nengo.Network(seed=3) as model:
run(Copter, 'Copter', 'Position', 'Wind Force')
| true | true |
f72cc9142cb85a864de5beb77fe66a359cf63d16 | 18,754 | py | Python | tools/sr_mapping/bfast_wrapper.py | ramezrawas/galaxy-1 | c03748dd49c060a68d07bce56eae33e0ba154414 | [
"CC-BY-3.0"
] | null | null | null | tools/sr_mapping/bfast_wrapper.py | ramezrawas/galaxy-1 | c03748dd49c060a68d07bce56eae33e0ba154414 | [
"CC-BY-3.0"
] | 7 | 2016-12-07T22:19:37.000Z | 2019-01-30T15:04:26.000Z | tools/sr_mapping/bfast_wrapper.py | ramezrawas/galaxy-1 | c03748dd49c060a68d07bce56eae33e0ba154414 | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/env python
"""
Runs BFAST on single-end or paired-end data.
TODO: more documentation
TODO:
- auto-detect gzip or bz2
- split options (?)
- queue lengths (?)
- assumes reference always has been indexed
- main and secondary indexes
- scoring matrix file ?
- read group file ?
usage: bfast_wrapper.py [options]
-r, --ref=r: The reference genome to use or index
-f, --fastq=f: The fastq file to use for the mapping
-F, --output=u: The file to save the output (SAM format)
-s, --fileSource=s: Whether to use a previously indexed reference sequence or one from history (indexed or history)
-p, --params=p: Parameter setting to use (pre_set or full)
-n, --numThreads=n: The number of threads to use
-A, --space=A: The encoding space (0: base 1: color)
-o, --offsets=o: The offsets for 'match'
-l, --loadAllIndexes=l: Load all indexes into memory
-k, --keySize=k: truncate key size in 'match'
-K, --maxKeyMatches=K: the maximum number of matches to allow before a key is ignored
-M, --maxNumMatches=M: the maximum number of matches to allow before the read is discarded
-w, --whichStrand=w: the strands to consider (0: both 1: forward 2: reverse)
-t, --timing=t: output timing information to stderr
-u, --ungapped=u: performed ungapped local alignment
-U, --unconstrained=U: performed local alignment without mask constraints
-O, --offset=O: the number of bases before and after each hit to consider in local alignment
-q, --avgMismatchQuality=q: average mismatch quality
-a, --algorithm=a: post processing algorithm (0: no filtering, 1: all passing filters, 2: unique, 3: best scoring unique, 4: best score all)
-P, --disallowPairing=P: do not choose alignments based on pairing
-R, --reverse=R: paired end reads are given on reverse strands
-z, --random=z: output a random best scoring alignment
-D, --dbkey=D: Dbkey for reference genome
-H, --suppressHeader=H: Suppress the sam header
"""
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
def stop_err( msg ):
sys.stderr.write( '%s\n' % msg )
sys.exit()
def __main__():
parser = optparse.OptionParser()
parser.add_option( '-r', '--ref', dest='ref', help='The reference genome to index and use' )
parser.add_option( '-f', '--fastq', dest='fastq', help='The fastq file to use for the mapping' )
parser.add_option( '-F', '--output', dest='output', help='The file to save the output (SAM format)' )
parser.add_option( '-A', '--space', dest='space', type="choice", default='0', choices=('0', '1'), help='The encoding space (0: base 1: color)' )
parser.add_option( '-H', '--suppressHeader', action="store_true", dest='suppressHeader', default=False, help='Suppress header' )
parser.add_option( '-n', '--numThreads', dest='numThreads', type="int", default="1", help='The number of threads to use' )
parser.add_option( '-t', '--timing', action="store_true", default=False, dest='timing', help='output timming information to stderr' )
parser.add_option( '-l', '--loadAllIndexes', action="store_true", default=False, dest='loadAllIndexes', help='Load all indexes into memory' )
parser.add_option( '-m', '--indexMask', dest='indexMask', help='String containing info on how to build custom indexes' )
parser.add_option( "-b", "--buildIndex", action="store_true", dest="buildIndex", default=False, help='String containing info on how to build custom indexes' )
parser.add_option( "--indexRepeatMasker", action="store_true", dest="indexRepeatMasker", default=False, help='Do not index lower case sequences. Such as those created by RepeatMasker' )
parser.add_option( '--indexContigOptions', dest='indexContigOptions', default="", help='The contig range options to use for the indexing' )
parser.add_option( '--indexExonsFileName', dest='indexExonsFileName', default="", help='The exons file to use for the indexing' )
parser.add_option( '-o', '--offsets', dest='offsets', default="", help='The offsets for \'match\'' )
parser.add_option( '-k', '--keySize', dest='keySize', type="int", default="-1", help='truncate key size in \'match\'' )
parser.add_option( '-K', '--maxKeyMatches', dest='maxKeyMatches', type="int", default="-1", help='the maximum number of matches to allow before a key is ignored' )
parser.add_option( '-M', '--maxNumMatches', dest='maxNumMatches', type="int", default="-1", help='the maximum number of matches to allow bfore the read is discarded' )
parser.add_option( '-w', '--whichStrand', dest='whichStrand', type="choice", default='0', choices=('0', '1', '2'), help='the strands to consider (0: both 1: forward 2: reverse)' )
parser.add_option( '--scoringMatrixFileName', dest='scoringMatrixFileName', help='Scoring Matrix file used to score the alignments' )
parser.add_option( '-u', '--ungapped', dest='ungapped', action="store_true", default=False, help='performed ungapped local alignment' )
parser.add_option( '-U', '--unconstrained', dest='unconstrained', action="store_true", default=False, help='performed local alignment without mask constraints' )
parser.add_option( '-O', '--offset', dest='offset', type="int", default="0", help='the number of bases before and after each hit to consider in local alignment' )
parser.add_option( '-q', '--avgMismatchQuality', type="int", default="-1", dest='avgMismatchQuality', help='average mismatch quality' )
parser.add_option( '-a', '--algorithm', dest='algorithm', default='0', type="choice", choices=('0', '1', '2', '3', '4'), help='post processing algorithm (0: no filtering, 1: all passing filters, 2: unique, 3: best scoring unique, 4: best score all' )
parser.add_option( '--unpaired', dest='unpaired', action="store_true", default=False, help='do not choose alignments based on pairing' )
parser.add_option( '--reverseStrand', dest='reverseStrand', action="store_true", default=False, help='paired end reads are given on reverse strands' )
parser.add_option( '--pairedEndInfer', dest='pairedEndInfer', action="store_true", default=False, help='break ties when one end of a paired end read by estimating the insert size distribution' )
parser.add_option( '--randomBest', dest='randomBest', action="store_true", default=False, help='output a random best scoring alignment' )
(options, args) = parser.parse_args()
# output version # of tool
try:
tmp = tempfile.NamedTemporaryFile().name
tmp_stdout = open( tmp, 'wb' )
proc = subprocess.Popen( args='bfast 2>&1', shell=True, stdout=tmp_stdout )
tmp_stdout.close()
returncode = proc.wait()
stdout = None
for line in open( tmp_stdout.name, 'rb' ):
if line.lower().find( 'version' ) >= 0:
stdout = line.strip()
break
if stdout:
sys.stdout.write( '%s\n' % stdout )
else:
raise Exception
except:
sys.stdout.write( 'Could not determine BFAST version\n' )
buffsize = 1048576
# make temp directory for bfast, requires trailing slash
tmp_dir = '%s/' % tempfile.mkdtemp()
# 'generic' options used in all bfast commands here
if options.timing:
all_cmd_options = "-t"
else:
all_cmd_options = ""
try:
if options.buildIndex:
reference_filepath = tempfile.NamedTemporaryFile( dir=tmp_dir, suffix='.fa' ).name
# build bfast indexes
os.symlink( options.ref, reference_filepath )
# bfast fast2brg
try:
nuc_space = [ "0" ]
if options.space == "1":
# color space localalign appears to require nuc space brg
nuc_space.append( "1" )
for space in nuc_space:
cmd = 'bfast fasta2brg -f "%s" -A "%s" %s' % ( reference_filepath, space, all_cmd_options )
tmp = tempfile.NamedTemporaryFile( dir=tmp_dir ).name
tmp_stderr = open( tmp, 'wb' )
proc = subprocess.Popen( args=cmd, shell=True, cwd=tmp_dir, stderr=tmp_stderr.fileno() )
returncode = proc.wait()
tmp_stderr.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open( tmp, 'rb' )
stderr = ''
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
if returncode != 0:
raise Exception(stderr)
except Exception as e:
raise Exception('Error in \'bfast fasta2brg\'.\n' + str( e ))
# bfast index
try:
all_index_cmds = 'bfast index %s -f "%s" -A "%s" -n "%s"' % ( all_cmd_options, reference_filepath, options.space, options.numThreads )
if options.indexRepeatMasker:
all_index_cmds += " -R"
if options.indexContigOptions:
index_contig_options = [ int(_) for _ in options.indexContigOptions.split( ',' ) ]
if index_contig_options[0] >= 0:
all_index_cmds += ' -s "%s"' % index_contig_options[0]
if index_contig_options[1] >= 0:
all_index_cmds += ' -S "%s"' % index_contig_options[1]
if index_contig_options[2] >= 0:
all_index_cmds += ' -e "%s"' % index_contig_options[2]
if index_contig_options[3] >= 0:
all_index_cmds += ' -E "%s"' % index_contig_options[3]
elif options.indexExonsFileName:
all_index_cmds += ' -x "%s"' % options.indexExonsFileName
index_count = 1
for mask, hash_width in [ mask.split( ':' ) for mask in options.indexMask.split( ',' ) ]:
cmd = '%s -m "%s" -w "%s" -i "%i"' % ( all_index_cmds, mask, hash_width, index_count )
tmp = tempfile.NamedTemporaryFile( dir=tmp_dir ).name
tmp_stderr = open( tmp, 'wb' )
proc = subprocess.Popen( args=cmd, shell=True, cwd=tmp_dir, stderr=tmp_stderr.fileno() )
returncode = proc.wait()
tmp_stderr.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open( tmp, 'rb' )
stderr = ''
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
if returncode != 0:
raise Exception(stderr)
index_count += 1
except Exception as e:
raise Exception('Error in \'bfast index\'.\n' + str( e ))
else:
reference_filepath = options.ref
assert reference_filepath and os.path.exists( reference_filepath ), 'A valid genome reference was not provided.'
# set up aligning and generate aligning command options
# set up temp output files
tmp_bmf = tempfile.NamedTemporaryFile( dir=tmp_dir )
tmp_bmf_name = tmp_bmf.name
tmp_bmf.close()
tmp_baf = tempfile.NamedTemporaryFile( dir=tmp_dir )
tmp_baf_name = tmp_baf.name
tmp_baf.close()
bfast_match_cmd = 'bfast match -f "%s" -r "%s" -n "%s" -A "%s" -T "%s" -w "%s" %s' % ( reference_filepath, options.fastq, options.numThreads, options.space, tmp_dir, options.whichStrand, all_cmd_options )
bfast_localalign_cmd = 'bfast localalign -f "%s" -m "%s" -n "%s" -A "%s" -o "%s" %s' % ( reference_filepath, tmp_bmf_name, options.numThreads, options.space, options.offset, all_cmd_options )
bfast_postprocess_cmd = 'bfast postprocess -O 1 -f "%s" -i "%s" -n "%s" -A "%s" -a "%s" %s' % ( reference_filepath, tmp_baf_name, options.numThreads, options.space, options.algorithm, all_cmd_options )
if options.offsets:
bfast_match_cmd += ' -o "%s"' % options.offsets
if options.keySize >= 0:
bfast_match_cmd += ' -k "%s"' % options.keySize
if options.maxKeyMatches >= 0:
bfast_match_cmd += ' -K "%s"' % options.maxKeyMatches
if options.maxNumMatches >= 0:
bfast_match_cmd += ' -M "%s"' % options.maxNumMatches
bfast_localalign_cmd += ' -M "%s"' % options.maxNumMatches
if options.scoringMatrixFileName:
bfast_localalign_cmd += ' -x "%s"' % options.scoringMatrixFileName
bfast_postprocess_cmd += ' -x "%s"' % options.scoringMatrixFileName
if options.ungapped:
bfast_localalign_cmd += ' -u'
if options.unconstrained:
bfast_localalign_cmd += ' -U'
if options.avgMismatchQuality >= 0:
bfast_localalign_cmd += ' -q "%s"' % options.avgMismatchQuality
bfast_postprocess_cmd += ' -q "%s"' % options.avgMismatchQuality
if options.algorithm == 3:
if options.pairedEndInfer:
bfast_postprocess_cmd += ' -P'
if options.randomBest:
bfast_postprocess_cmd += ' -z'
if options.unpaired:
bfast_postprocess_cmd += ' -U'
if options.reverseStrand:
bfast_postprocess_cmd += ' -R'
# instead of using temp files, should we stream through pipes?
bfast_match_cmd += " > %s" % tmp_bmf_name
bfast_localalign_cmd += " > %s" % tmp_baf_name
bfast_postprocess_cmd += " > %s" % options.output
# need to nest try-except in try-finally to handle 2.4
try:
# bfast 'match'
try:
tmp = tempfile.NamedTemporaryFile( dir=tmp_dir ).name
tmp_stderr = open( tmp, 'wb' )
proc = subprocess.Popen( args=bfast_match_cmd, shell=True, cwd=tmp_dir, stderr=tmp_stderr.fileno() )
returncode = proc.wait()
tmp_stderr.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open( tmp, 'rb' )
stderr = ''
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
if returncode != 0:
raise Exception(stderr)
except Exception as e:
raise Exception('Error in \'bfast match\'. \n' + str( e ))
# bfast 'localalign'
try:
tmp = tempfile.NamedTemporaryFile( dir=tmp_dir ).name
tmp_stderr = open( tmp, 'wb' )
proc = subprocess.Popen( args=bfast_localalign_cmd, shell=True, cwd=tmp_dir, stderr=tmp_stderr.fileno() )
returncode = proc.wait()
tmp_stderr.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open( tmp, 'rb' )
stderr = ''
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
if returncode != 0:
raise Exception(stderr)
except Exception as e:
raise Exception('Error in \'bfast localalign\'. \n' + str( e ))
# bfast 'postprocess'
try:
tmp = tempfile.NamedTemporaryFile( dir=tmp_dir ).name
tmp_stderr = open( tmp, 'wb' )
proc = subprocess.Popen( args=bfast_postprocess_cmd, shell=True, cwd=tmp_dir, stderr=tmp_stderr.fileno() )
returncode = proc.wait()
tmp_stderr.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open( tmp, 'rb' )
stderr = ''
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
if returncode != 0:
raise Exception(stderr)
except Exception as e:
raise Exception('Error in \'bfast postprocess\'. \n' + str( e ))
# remove header if necessary
if options.suppressHeader:
tmp_out = tempfile.NamedTemporaryFile( dir=tmp_dir)
tmp_out_name = tmp_out.name
tmp_out.close()
try:
shutil.move( options.output, tmp_out_name )
except Exception as e:
raise Exception('Error moving output file before removing headers. \n' + str( e ))
fout = open( options.output, 'w' )
for line in open( tmp_out.name, 'r' ):
if len( line ) < 3 or line[0:3] not in [ '@HD', '@SQ', '@RG', '@PG', '@CO' ]:
fout.write( line )
fout.close()
# check that there are results in the output file
if os.path.getsize( options.output ) > 0:
if "0" == options.space:
sys.stdout.write( 'BFAST run on Base Space data' )
else:
sys.stdout.write( 'BFAST run on Color Space data' )
else:
raise Exception('The output file is empty. You may simply have no matches, or there may be an error with your input file or settings.')
except Exception as e:
stop_err( 'The alignment failed.\n' + str( e ) )
finally:
# clean up temp dir
if os.path.exists( tmp_dir ):
shutil.rmtree( tmp_dir )
if __name__ == "__main__":
__main__()
| 53.430199 | 254 | 0.571878 |
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
def stop_err( msg ):
sys.stderr.write( '%s\n' % msg )
sys.exit()
def __main__():
parser = optparse.OptionParser()
parser.add_option( '-r', '--ref', dest='ref', help='The reference genome to index and use' )
parser.add_option( '-f', '--fastq', dest='fastq', help='The fastq file to use for the mapping' )
parser.add_option( '-F', '--output', dest='output', help='The file to save the output (SAM format)' )
parser.add_option( '-A', '--space', dest='space', type="choice", default='0', choices=('0', '1'), help='The encoding space (0: base 1: color)' )
parser.add_option( '-H', '--suppressHeader', action="store_true", dest='suppressHeader', default=False, help='Suppress header' )
parser.add_option( '-n', '--numThreads', dest='numThreads', type="int", default="1", help='The number of threads to use' )
parser.add_option( '-t', '--timing', action="store_true", default=False, dest='timing', help='output timming information to stderr' )
parser.add_option( '-l', '--loadAllIndexes', action="store_true", default=False, dest='loadAllIndexes', help='Load all indexes into memory' )
parser.add_option( '-m', '--indexMask', dest='indexMask', help='String containing info on how to build custom indexes' )
parser.add_option( "-b", "--buildIndex", action="store_true", dest="buildIndex", default=False, help='String containing info on how to build custom indexes' )
parser.add_option( "--indexRepeatMasker", action="store_true", dest="indexRepeatMasker", default=False, help='Do not index lower case sequences. Such as those created by RepeatMasker' )
parser.add_option( '--indexContigOptions', dest='indexContigOptions', default="", help='The contig range options to use for the indexing' )
parser.add_option( '--indexExonsFileName', dest='indexExonsFileName', default="", help='The exons file to use for the indexing' )
parser.add_option( '-o', '--offsets', dest='offsets', default="", help='The offsets for \'match\'' )
parser.add_option( '-k', '--keySize', dest='keySize', type="int", default="-1", help='truncate key size in \'match\'' )
parser.add_option( '-K', '--maxKeyMatches', dest='maxKeyMatches', type="int", default="-1", help='the maximum number of matches to allow before a key is ignored' )
parser.add_option( '-M', '--maxNumMatches', dest='maxNumMatches', type="int", default="-1", help='the maximum number of matches to allow bfore the read is discarded' )
parser.add_option( '-w', '--whichStrand', dest='whichStrand', type="choice", default='0', choices=('0', '1', '2'), help='the strands to consider (0: both 1: forward 2: reverse)' )
parser.add_option( '--scoringMatrixFileName', dest='scoringMatrixFileName', help='Scoring Matrix file used to score the alignments' )
parser.add_option( '-u', '--ungapped', dest='ungapped', action="store_true", default=False, help='performed ungapped local alignment' )
parser.add_option( '-U', '--unconstrained', dest='unconstrained', action="store_true", default=False, help='performed local alignment without mask constraints' )
parser.add_option( '-O', '--offset', dest='offset', type="int", default="0", help='the number of bases before and after each hit to consider in local alignment' )
parser.add_option( '-q', '--avgMismatchQuality', type="int", default="-1", dest='avgMismatchQuality', help='average mismatch quality' )
parser.add_option( '-a', '--algorithm', dest='algorithm', default='0', type="choice", choices=('0', '1', '2', '3', '4'), help='post processing algorithm (0: no filtering, 1: all passing filters, 2: unique, 3: best scoring unique, 4: best score all' )
parser.add_option( '--unpaired', dest='unpaired', action="store_true", default=False, help='do not choose alignments based on pairing' )
parser.add_option( '--reverseStrand', dest='reverseStrand', action="store_true", default=False, help='paired end reads are given on reverse strands' )
parser.add_option( '--pairedEndInfer', dest='pairedEndInfer', action="store_true", default=False, help='break ties when one end of a paired end read by estimating the insert size distribution' )
parser.add_option( '--randomBest', dest='randomBest', action="store_true", default=False, help='output a random best scoring alignment' )
(options, args) = parser.parse_args()
tmp = tempfile.NamedTemporaryFile().name
tmp_stdout = open( tmp, 'wb' )
proc = subprocess.Popen( args='bfast 2>&1', shell=True, stdout=tmp_stdout )
tmp_stdout.close()
returncode = proc.wait()
stdout = None
for line in open( tmp_stdout.name, 'rb' ):
if line.lower().find( 'version' ) >= 0:
stdout = line.strip()
break
if stdout:
sys.stdout.write( '%s\n' % stdout )
else:
raise Exception
except:
sys.stdout.write( 'Could not determine BFAST version\n' )
buffsize = 1048576
tmp_dir = '%s/' % tempfile.mkdtemp()
if options.timing:
all_cmd_options = "-t"
else:
all_cmd_options = ""
try:
if options.buildIndex:
reference_filepath = tempfile.NamedTemporaryFile( dir=tmp_dir, suffix='.fa' ).name
os.symlink( options.ref, reference_filepath )
try:
nuc_space = [ "0" ]
if options.space == "1":
nuc_space.append( "1" )
for space in nuc_space:
cmd = 'bfast fasta2brg -f "%s" -A "%s" %s' % ( reference_filepath, space, all_cmd_options )
tmp = tempfile.NamedTemporaryFile( dir=tmp_dir ).name
tmp_stderr = open( tmp, 'wb' )
proc = subprocess.Popen( args=cmd, shell=True, cwd=tmp_dir, stderr=tmp_stderr.fileno() )
returncode = proc.wait()
tmp_stderr.close()
tmp_stderr = open( tmp, 'rb' )
stderr = ''
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
if returncode != 0:
raise Exception(stderr)
except Exception as e:
raise Exception('Error in \'bfast fasta2brg\'.\n' + str( e ))
# bfast index
try:
all_index_cmds = 'bfast index %s -f "%s" -A "%s" -n "%s"' % ( all_cmd_options, reference_filepath, options.space, options.numThreads )
if options.indexRepeatMasker:
all_index_cmds += " -R"
if options.indexContigOptions:
index_contig_options = [ int(_) for _ in options.indexContigOptions.split( ',' ) ]
if index_contig_options[0] >= 0:
all_index_cmds += ' -s "%s"' % index_contig_options[0]
if index_contig_options[1] >= 0:
all_index_cmds += ' -S "%s"' % index_contig_options[1]
if index_contig_options[2] >= 0:
all_index_cmds += ' -e "%s"' % index_contig_options[2]
if index_contig_options[3] >= 0:
all_index_cmds += ' -E "%s"' % index_contig_options[3]
elif options.indexExonsFileName:
all_index_cmds += ' -x "%s"' % options.indexExonsFileName
index_count = 1
for mask, hash_width in [ mask.split( ':' ) for mask in options.indexMask.split( ',' ) ]:
cmd = '%s -m "%s" -w "%s" -i "%i"' % ( all_index_cmds, mask, hash_width, index_count )
tmp = tempfile.NamedTemporaryFile( dir=tmp_dir ).name
tmp_stderr = open( tmp, 'wb' )
proc = subprocess.Popen( args=cmd, shell=True, cwd=tmp_dir, stderr=tmp_stderr.fileno() )
returncode = proc.wait()
tmp_stderr.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open( tmp, 'rb' )
stderr = ''
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
if returncode != 0:
raise Exception(stderr)
index_count += 1
except Exception as e:
raise Exception('Error in \'bfast index\'.\n' + str( e ))
else:
reference_filepath = options.ref
assert reference_filepath and os.path.exists( reference_filepath ), 'A valid genome reference was not provided.'
tmp_bmf = tempfile.NamedTemporaryFile( dir=tmp_dir )
tmp_bmf_name = tmp_bmf.name
tmp_bmf.close()
tmp_baf = tempfile.NamedTemporaryFile( dir=tmp_dir )
tmp_baf_name = tmp_baf.name
tmp_baf.close()
bfast_match_cmd = 'bfast match -f "%s" -r "%s" -n "%s" -A "%s" -T "%s" -w "%s" %s' % ( reference_filepath, options.fastq, options.numThreads, options.space, tmp_dir, options.whichStrand, all_cmd_options )
bfast_localalign_cmd = 'bfast localalign -f "%s" -m "%s" -n "%s" -A "%s" -o "%s" %s' % ( reference_filepath, tmp_bmf_name, options.numThreads, options.space, options.offset, all_cmd_options )
bfast_postprocess_cmd = 'bfast postprocess -O 1 -f "%s" -i "%s" -n "%s" -A "%s" -a "%s" %s' % ( reference_filepath, tmp_baf_name, options.numThreads, options.space, options.algorithm, all_cmd_options )
if options.offsets:
bfast_match_cmd += ' -o "%s"' % options.offsets
if options.keySize >= 0:
bfast_match_cmd += ' -k "%s"' % options.keySize
if options.maxKeyMatches >= 0:
bfast_match_cmd += ' -K "%s"' % options.maxKeyMatches
if options.maxNumMatches >= 0:
bfast_match_cmd += ' -M "%s"' % options.maxNumMatches
bfast_localalign_cmd += ' -M "%s"' % options.maxNumMatches
if options.scoringMatrixFileName:
bfast_localalign_cmd += ' -x "%s"' % options.scoringMatrixFileName
bfast_postprocess_cmd += ' -x "%s"' % options.scoringMatrixFileName
if options.ungapped:
bfast_localalign_cmd += ' -u'
if options.unconstrained:
bfast_localalign_cmd += ' -U'
if options.avgMismatchQuality >= 0:
bfast_localalign_cmd += ' -q "%s"' % options.avgMismatchQuality
bfast_postprocess_cmd += ' -q "%s"' % options.avgMismatchQuality
if options.algorithm == 3:
if options.pairedEndInfer:
bfast_postprocess_cmd += ' -P'
if options.randomBest:
bfast_postprocess_cmd += ' -z'
if options.unpaired:
bfast_postprocess_cmd += ' -U'
if options.reverseStrand:
bfast_postprocess_cmd += ' -R'
bfast_match_cmd += " > %s" % tmp_bmf_name
bfast_localalign_cmd += " > %s" % tmp_baf_name
bfast_postprocess_cmd += " > %s" % options.output
try:
try:
tmp = tempfile.NamedTemporaryFile( dir=tmp_dir ).name
tmp_stderr = open( tmp, 'wb' )
proc = subprocess.Popen( args=bfast_match_cmd, shell=True, cwd=tmp_dir, stderr=tmp_stderr.fileno() )
returncode = proc.wait()
tmp_stderr.close()
tmp_stderr = open( tmp, 'rb' )
stderr = ''
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
if returncode != 0:
raise Exception(stderr)
except Exception as e:
raise Exception('Error in \'bfast match\'. \n' + str( e ))
# bfast 'localalign'
try:
tmp = tempfile.NamedTemporaryFile( dir=tmp_dir ).name
tmp_stderr = open( tmp, 'wb' )
proc = subprocess.Popen( args=bfast_localalign_cmd, shell=True, cwd=tmp_dir, stderr=tmp_stderr.fileno() )
returncode = proc.wait()
tmp_stderr.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open( tmp, 'rb' )
stderr = ''
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
if returncode != 0:
raise Exception(stderr)
except Exception as e:
raise Exception('Error in \'bfast localalign\'. \n' + str( e ))
try:
tmp = tempfile.NamedTemporaryFile( dir=tmp_dir ).name
tmp_stderr = open( tmp, 'wb' )
proc = subprocess.Popen( args=bfast_postprocess_cmd, shell=True, cwd=tmp_dir, stderr=tmp_stderr.fileno() )
returncode = proc.wait()
tmp_stderr.close()
tmp_stderr = open( tmp, 'rb' )
stderr = ''
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
if returncode != 0:
raise Exception(stderr)
except Exception as e:
raise Exception('Error in \'bfast postprocess\'. \n' + str( e ))
# remove header if necessary
if options.suppressHeader:
tmp_out = tempfile.NamedTemporaryFile( dir=tmp_dir)
tmp_out_name = tmp_out.name
tmp_out.close()
try:
shutil.move( options.output, tmp_out_name )
except Exception as e:
raise Exception('Error moving output file before removing headers. \n' + str( e ))
fout = open( options.output, 'w' )
for line in open( tmp_out.name, 'r' ):
if len( line ) < 3 or line[0:3] not in [ '@HD', '@SQ', '@RG', '@PG', '@CO' ]:
fout.write( line )
fout.close()
# check that there are results in the output file
if os.path.getsize( options.output ) > 0:
if "0" == options.space:
sys.stdout.write( 'BFAST run on Base Space data' )
else:
sys.stdout.write( 'BFAST run on Color Space data' )
else:
raise Exception('The output file is empty. You may simply have no matches, or there may be an error with your input file or settings.')
except Exception as e:
stop_err( 'The alignment failed.\n' + str( e ) )
finally:
# clean up temp dir
if os.path.exists( tmp_dir ):
shutil.rmtree( tmp_dir )
if __name__ == "__main__":
__main__()
| true | true |
f72cc9fd6006f3d37d591e4575e98e027bf672c1 | 11,546 | py | Python | markovify/text.py | iconesalut/markovify | e82077e2733b7613a6a153194ab1b288a9170e4e | [
"MIT"
] | null | null | null | markovify/text.py | iconesalut/markovify | e82077e2733b7613a6a153194ab1b288a9170e4e | [
"MIT"
] | null | null | null | markovify/text.py | iconesalut/markovify | e82077e2733b7613a6a153194ab1b288a9170e4e | [
"MIT"
] | null | null | null | import re
import json
import random
from .splitters import split_into_sentences
from .chain import Chain, BEGIN, END
from unidecode import unidecode
DEFAULT_MAX_OVERLAP_RATIO = 0.7
DEFAULT_MAX_OVERLAP_TOTAL = 15
DEFAULT_TRIES = 10
class ParamError(Exception):
pass
class Text(object):
reject_pat = re.compile(r"(^')|('$)|\s'|'\s|[\"(\(\)\[\])]")
def __init__(self, input_text, state_size=2, chain=None, parsed_sentences=None, retain_original=True, well_formed=True, reject_reg=''):
"""
input_text: A string.
state_size: An integer, indicating the number of words in the model's state.
chain: A trained markovify.Chain instance for this text, if pre-processed.
parsed_sentences: A list of lists, where each outer list is a "run"
of the process (e.g. a single sentence), and each inner list
contains the steps (e.g. words) in the run. If you want to simulate
an infinite process, you can come very close by passing just one, very
long run.
retain_original: Indicates whether to keep the original corpus.
well_formed: Indicates whether sentences should be well-formed, preventing
unmatched quotes, parenthesis by default, or a custom regular expression
can be provided.
reject_reg: If well_formed is True, this can be provided to override the
standard rejection pattern.
"""
self.well_formed = well_formed
if well_formed and reject_reg != '':
self.reject_pat = re.compile(reject_reg)
can_make_sentences = parsed_sentences is not None or input_text is not None
self.retain_original = retain_original and can_make_sentences
self.state_size = state_size
if self.retain_original:
self.parsed_sentences = parsed_sentences or list(self.generate_corpus(input_text))
# Rejoined text lets us assess the novelty of generated sentences
self.rejoined_text = self.sentence_join(map(self.word_join, self.parsed_sentences))
self.chain = chain or Chain(self.parsed_sentences, state_size)
else:
if not chain:
parsed = parsed_sentences or self.generate_corpus(input_text)
self.chain = chain or Chain(parsed, state_size)
def compile(self, inplace = False):
if inplace:
self.chain.compile(inplace = True)
return self
cchain = self.chain.compile(inplace = False)
psent = None
if hasattr(self, 'parsed_sentences'):
psent = self.parsed_sentences
return Text(None, \
state_size = self.state_size, \
chain = cchain, \
parsed_sentences = psent, \
retain_original = self.retain_original, \
well_formed = self.well_formed, \
reject_reg = self.reject_pat)
def to_dict(self):
"""
Returns the underlying data as a Python dict.
"""
return {
"state_size": self.state_size,
"chain": self.chain.to_json(),
"parsed_sentences": self.parsed_sentences if self.retain_original else None
}
def to_json(self):
"""
Returns the underlying data as a JSON string.
"""
return json.dumps(self.to_dict())
@classmethod
def from_dict(cls, obj, **kwargs):
return cls(
None,
state_size=obj["state_size"],
chain=Chain.from_json(obj["chain"]),
parsed_sentences=obj.get("parsed_sentences")
)
@classmethod
def from_json(cls, json_str):
return cls.from_dict(json.loads(json_str))
def sentence_split(self, text):
"""
Splits full-text string into a list of sentences.
"""
return split_into_sentences(text)
def sentence_join(self, sentences):
"""
Re-joins a list of sentences into the full text.
"""
return " ".join(sentences)
word_split_pattern = re.compile(r"\s+")
def word_split(self, sentence):
"""
Splits a sentence into a list of words.
"""
return re.split(self.word_split_pattern, sentence)
def word_join(self, words):
"""
Re-joins a list of words into a sentence.
"""
return " ".join(words)
def test_sentence_input(self, sentence):
"""
A basic sentence filter. The default rejects sentences that contain
the type of punctuation that would look strange on its own
in a randomly-generated sentence.
"""
if len(sentence.strip()) == 0: return False
# Decode unicode, mainly to normalize fancy quotation marks
if sentence.__class__.__name__ == "str": # pragma: no cover
decoded = sentence
else: # pragma: no cover
decoded = unidecode(sentence)
# Sentence shouldn't contain problematic characters
if self.well_formed and self.reject_pat.search(decoded): return False
return True
def generate_corpus(self, text):
"""
Given a text string, returns a list of lists; that is, a list of
"sentences," each of which is a list of words. Before splitting into
words, the sentences are filtered through `self.test_sentence_input`
"""
if isinstance(text, str):
sentences = self.sentence_split(text)
else:
sentences = []
for line in text:
sentences += self.sentence_split(line)
passing = filter(self.test_sentence_input, sentences)
runs = map(self.word_split, passing)
return runs
def test_sentence_output(self, words, max_overlap_ratio, max_overlap_total):
"""
Given a generated list of words, accept or reject it. This one rejects
sentences that too closely match the original text, namely those that
contain any identical sequence of words of X length, where X is the
smaller number of (a) `max_overlap_ratio` (default: 0.7) of the total
number of words, and (b) `max_overlap_total` (default: 15).
"""
# Reject large chunks of similarity
overlap_ratio = int(round(max_overlap_ratio * len(words)))
overlap_max = min(max_overlap_total, overlap_ratio)
overlap_over = overlap_max + 1
gram_count = max((len(words) - overlap_max), 1)
grams = [ words[i:i+overlap_over] for i in range(gram_count) ]
for g in grams:
gram_joined = self.word_join(g)
if gram_joined in self.rejoined_text:
return False
return True
def make_sentence(self, init_state=None, topic=[], minimum_topic_words=0, **kwargs):
"""
Attempts `tries` (default: 10) times to generate a valid sentence,
based on the model and `test_sentence_output`. Passes `max_overlap_ratio`
and `max_overlap_total` to `test_sentence_output`.
If successful, returns the sentence as a string. If not, returns None.
If `init_state` (a tuple of `self.chain.state_size` words) is not specified,
this method chooses a sentence-start at random, in accordance with
the model.
If `test_output` is set as False then the `test_sentence_output` check
will be skipped.
If `max_words` is specified, the word count for the sentence will be
evaluated against the provided limit.
"""
tries = kwargs.get('tries', DEFAULT_TRIES)
mor = kwargs.get('max_overlap_ratio', DEFAULT_MAX_OVERLAP_RATIO)
mot = kwargs.get('max_overlap_total', DEFAULT_MAX_OVERLAP_TOTAL)
test_output = kwargs.get('test_output', True)
max_words = kwargs.get('max_words', None)
if init_state != None:
prefix = list(init_state)
for word in prefix:
if word == BEGIN:
prefix = prefix[1:]
else:
break
else:
prefix = []
for _ in range(tries):
words = prefix + self.chain.walk(init_state, topic)
if max_words != None and len(words) > max_words:
continue
if test_output and hasattr(self, "rejoined_text"):
if self.test_sentence_output(words, mor, mot) and self.chain.topic_match >= minimum_topic_words:
return self.word_join(words)
elif self.chain.topic_match >= minimum_topic_words:
return self.word_join(words)
return None
def make_short_sentence(self, max_chars, min_chars=0, **kwargs):
"""
Tries making a sentence of no more than `max_chars` characters and optionally
no less than `min_chars` characters, passing **kwargs to `self.make_sentence`.
"""
tries = kwargs.get('tries', DEFAULT_TRIES)
for _ in range(tries):
sentence = self.make_sentence(**kwargs)
if sentence and len(sentence) <= max_chars and len(sentence) >= min_chars:
return sentence
def make_sentence_with_start(self, beginning, strict=True, **kwargs):
"""
Tries making a sentence that begins with `beginning` string,
which should be a string of one to `self.state` words known
to exist in the corpus.
If strict == True, then markovify will draw its initial inspiration
only from sentences that start with the specified word/phrase.
If strict == False, then markovify will draw its initial inspiration
from any sentence containing the specified word/phrase.
**kwargs are passed to `self.make_sentence`
"""
split = tuple(self.word_split(beginning))
word_count = len(split)
if word_count == self.state_size:
init_states = [ split ]
elif word_count > 0 and word_count < self.state_size:
if strict:
init_states = [ (BEGIN,) * (self.state_size - word_count) + split ]
else:
init_states = [ key for key in self.chain.model.keys()
# check for starting with begin as well ordered lists
if tuple(filter(lambda x: x != BEGIN, key))[:word_count] == split ]
random.shuffle(init_states)
else:
err_msg = "`make_sentence_with_start` for this model requires a string containing 1 to {0} words. Yours has {1}: {2}".format(self.state_size, word_count, str(split))
raise ParamError(err_msg)
for init_state in init_states:
output = self.make_sentence(init_state, **kwargs)
if output is not None:
return output
return None
@classmethod
def from_chain(cls, chain_json, corpus=None, parsed_sentences=None):
"""
Init a Text class based on an existing chain JSON string or object
If corpus is None, overlap checking won't work.
"""
chain = Chain.from_json(chain_json)
return cls(corpus or None, parsed_sentences=parsed_sentences, state_size=chain.state_size, chain=chain)
class NewlineText(Text):
"""
A (usable) example of subclassing markovify.Text. This one lets you markovify
text where the sentences are separated by newlines instead of ". "
"""
def sentence_split(self, text):
return re.split(r"\s*\n\s*", text)
| 39.406143 | 177 | 0.620908 | import re
import json
import random
from .splitters import split_into_sentences
from .chain import Chain, BEGIN, END
from unidecode import unidecode
DEFAULT_MAX_OVERLAP_RATIO = 0.7
DEFAULT_MAX_OVERLAP_TOTAL = 15
DEFAULT_TRIES = 10
class ParamError(Exception):
pass
class Text(object):
reject_pat = re.compile(r"(^')|('$)|\s'|'\s|[\"(\(\)\[\])]")
def __init__(self, input_text, state_size=2, chain=None, parsed_sentences=None, retain_original=True, well_formed=True, reject_reg=''):
self.well_formed = well_formed
if well_formed and reject_reg != '':
self.reject_pat = re.compile(reject_reg)
can_make_sentences = parsed_sentences is not None or input_text is not None
self.retain_original = retain_original and can_make_sentences
self.state_size = state_size
if self.retain_original:
self.parsed_sentences = parsed_sentences or list(self.generate_corpus(input_text))
# Rejoined text lets us assess the novelty of generated sentences
self.rejoined_text = self.sentence_join(map(self.word_join, self.parsed_sentences))
self.chain = chain or Chain(self.parsed_sentences, state_size)
else:
if not chain:
parsed = parsed_sentences or self.generate_corpus(input_text)
self.chain = chain or Chain(parsed, state_size)
def compile(self, inplace = False):
if inplace:
self.chain.compile(inplace = True)
return self
cchain = self.chain.compile(inplace = False)
psent = None
if hasattr(self, 'parsed_sentences'):
psent = self.parsed_sentences
return Text(None, \
state_size = self.state_size, \
chain = cchain, \
parsed_sentences = psent, \
retain_original = self.retain_original, \
well_formed = self.well_formed, \
reject_reg = self.reject_pat)
def to_dict(self):
return {
"state_size": self.state_size,
"chain": self.chain.to_json(),
"parsed_sentences": self.parsed_sentences if self.retain_original else None
}
def to_json(self):
return json.dumps(self.to_dict())
@classmethod
def from_dict(cls, obj, **kwargs):
return cls(
None,
state_size=obj["state_size"],
chain=Chain.from_json(obj["chain"]),
parsed_sentences=obj.get("parsed_sentences")
)
@classmethod
def from_json(cls, json_str):
return cls.from_dict(json.loads(json_str))
def sentence_split(self, text):
return split_into_sentences(text)
def sentence_join(self, sentences):
return " ".join(sentences)
word_split_pattern = re.compile(r"\s+")
def word_split(self, sentence):
return re.split(self.word_split_pattern, sentence)
def word_join(self, words):
return " ".join(words)
def test_sentence_input(self, sentence):
if len(sentence.strip()) == 0: return False
# Decode unicode, mainly to normalize fancy quotation marks
if sentence.__class__.__name__ == "str": # pragma: no cover
decoded = sentence
else: # pragma: no cover
decoded = unidecode(sentence)
# Sentence shouldn't contain problematic characters
if self.well_formed and self.reject_pat.search(decoded): return False
return True
def generate_corpus(self, text):
if isinstance(text, str):
sentences = self.sentence_split(text)
else:
sentences = []
for line in text:
sentences += self.sentence_split(line)
passing = filter(self.test_sentence_input, sentences)
runs = map(self.word_split, passing)
return runs
def test_sentence_output(self, words, max_overlap_ratio, max_overlap_total):
# Reject large chunks of similarity
overlap_ratio = int(round(max_overlap_ratio * len(words)))
overlap_max = min(max_overlap_total, overlap_ratio)
overlap_over = overlap_max + 1
gram_count = max((len(words) - overlap_max), 1)
grams = [ words[i:i+overlap_over] for i in range(gram_count) ]
for g in grams:
gram_joined = self.word_join(g)
if gram_joined in self.rejoined_text:
return False
return True
def make_sentence(self, init_state=None, topic=[], minimum_topic_words=0, **kwargs):
tries = kwargs.get('tries', DEFAULT_TRIES)
mor = kwargs.get('max_overlap_ratio', DEFAULT_MAX_OVERLAP_RATIO)
mot = kwargs.get('max_overlap_total', DEFAULT_MAX_OVERLAP_TOTAL)
test_output = kwargs.get('test_output', True)
max_words = kwargs.get('max_words', None)
if init_state != None:
prefix = list(init_state)
for word in prefix:
if word == BEGIN:
prefix = prefix[1:]
else:
break
else:
prefix = []
for _ in range(tries):
words = prefix + self.chain.walk(init_state, topic)
if max_words != None and len(words) > max_words:
continue
if test_output and hasattr(self, "rejoined_text"):
if self.test_sentence_output(words, mor, mot) and self.chain.topic_match >= minimum_topic_words:
return self.word_join(words)
elif self.chain.topic_match >= minimum_topic_words:
return self.word_join(words)
return None
def make_short_sentence(self, max_chars, min_chars=0, **kwargs):
tries = kwargs.get('tries', DEFAULT_TRIES)
for _ in range(tries):
sentence = self.make_sentence(**kwargs)
if sentence and len(sentence) <= max_chars and len(sentence) >= min_chars:
return sentence
def make_sentence_with_start(self, beginning, strict=True, **kwargs):
split = tuple(self.word_split(beginning))
word_count = len(split)
if word_count == self.state_size:
init_states = [ split ]
elif word_count > 0 and word_count < self.state_size:
if strict:
init_states = [ (BEGIN,) * (self.state_size - word_count) + split ]
else:
init_states = [ key for key in self.chain.model.keys()
# check for starting with begin as well ordered lists
if tuple(filter(lambda x: x != BEGIN, key))[:word_count] == split ]
random.shuffle(init_states)
else:
err_msg = "`make_sentence_with_start` for this model requires a string containing 1 to {0} words. Yours has {1}: {2}".format(self.state_size, word_count, str(split))
raise ParamError(err_msg)
for init_state in init_states:
output = self.make_sentence(init_state, **kwargs)
if output is not None:
return output
return None
@classmethod
def from_chain(cls, chain_json, corpus=None, parsed_sentences=None):
chain = Chain.from_json(chain_json)
return cls(corpus or None, parsed_sentences=parsed_sentences, state_size=chain.state_size, chain=chain)
class NewlineText(Text):
def sentence_split(self, text):
return re.split(r"\s*\n\s*", text)
| true | true |
f72ccad3cb20f24ab3326c7cf1774f63675af32d | 2,175 | py | Python | django_remote_submission/signals.py | YeemBoi/django-remote-submission | 665daa0701b7da5ac653115712d0c0f0aae041c6 | [
"ISC"
] | null | null | null | django_remote_submission/signals.py | YeemBoi/django-remote-submission | 665daa0701b7da5ac653115712d0c0f0aae041c6 | [
"ISC"
] | null | null | null | django_remote_submission/signals.py | YeemBoi/django-remote-submission | 665daa0701b7da5ac653115712d0c0f0aae041c6 | [
"ISC"
] | null | null | null | """Attach signals to this app's models."""
# -*- coding: utf-8 -*-
import json
import logging
import channels.layers
from asgiref.sync import async_to_sync
from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import Job, Log
logger = logging.getLogger(__name__) # pylint: disable=C0103
def send_message(event):
'''
Call back function to send message to the browser
'''
message = event['text']
channel_layer = channels.layers.get_channel_layer()
# Send message to WebSocket
async_to_sync(channel_layer.send)(text_data=json.dumps(
message
))
@receiver(post_save, sender=Job, dispatch_uid='update_job_status_listeners')
def update_job_status_listeners(sender, instance, **kwargs):
'''
Sends job status to the browser when a Job is modified
'''
logger.debug("Job modified: {} :: status = {}.".format(
instance, instance.status))
user = instance.owner
group_name = 'job-user-{}'.format(user.username)
message = {
'job_id': instance.id,
'title': instance.title,
'status': instance.status,
'modified': instance.modified.isoformat(),
}
channel_layer = channels.layers.get_channel_layer()
async_to_sync(channel_layer.group_send)(
group_name,
{
'type': 'send_message',
'text': message
}
)
@receiver(post_save, sender=Log, dispatch_uid='update_job_log_listeners')
def update_job_log_listeners(sender, instance, **kwargs):
'''
Sends job status to the browser when a Log is modified
'''
logger.debug("Log modified: {} :: content = {}.".format(
instance, instance.content))
job_pk = instance.job.id
group_name = 'job-log-{}'.format(job_pk)
message = {
'log_id': instance.id,
'time': instance.time.isoformat(),
'content': instance.content,
'stream': instance.stream,
}
channel_layer = channels.layers.get_channel_layer()
async_to_sync(channel_layer.group_send)(
group_name,
{
'type': 'send_message',
'text': message
}
)
| 24.715909 | 76 | 0.643218 |
import json
import logging
import channels.layers
from asgiref.sync import async_to_sync
from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import Job, Log
logger = logging.getLogger(__name__)
def send_message(event):
message = event['text']
channel_layer = channels.layers.get_channel_layer()
async_to_sync(channel_layer.send)(text_data=json.dumps(
message
))
@receiver(post_save, sender=Job, dispatch_uid='update_job_status_listeners')
def update_job_status_listeners(sender, instance, **kwargs):
logger.debug("Job modified: {} :: status = {}.".format(
instance, instance.status))
user = instance.owner
group_name = 'job-user-{}'.format(user.username)
message = {
'job_id': instance.id,
'title': instance.title,
'status': instance.status,
'modified': instance.modified.isoformat(),
}
channel_layer = channels.layers.get_channel_layer()
async_to_sync(channel_layer.group_send)(
group_name,
{
'type': 'send_message',
'text': message
}
)
@receiver(post_save, sender=Log, dispatch_uid='update_job_log_listeners')
def update_job_log_listeners(sender, instance, **kwargs):
logger.debug("Log modified: {} :: content = {}.".format(
instance, instance.content))
job_pk = instance.job.id
group_name = 'job-log-{}'.format(job_pk)
message = {
'log_id': instance.id,
'time': instance.time.isoformat(),
'content': instance.content,
'stream': instance.stream,
}
channel_layer = channels.layers.get_channel_layer()
async_to_sync(channel_layer.group_send)(
group_name,
{
'type': 'send_message',
'text': message
}
)
| true | true |
f72ccbbf32e6e2e0c7f9b522c887b0a968d79192 | 15,631 | py | Python | guild/external/setuptools/tests/test_wheel.py | msarahan/guildai | 99bdd09683291dbc206b6dde1b327d47401d29eb | [
"Apache-2.0"
] | 63 | 2016-11-01T13:06:46.000Z | 2018-08-21T08:38:36.000Z | guild/external/setuptools/tests/test_wheel.py | msarahan/guildai | 99bdd09683291dbc206b6dde1b327d47401d29eb | [
"Apache-2.0"
] | 28 | 2016-11-02T01:41:23.000Z | 2018-10-19T22:57:06.000Z | guild/external/setuptools/tests/test_wheel.py | msarahan/guildai | 99bdd09683291dbc206b6dde1b327d47401d29eb | [
"Apache-2.0"
] | 8 | 2017-01-15T14:58:43.000Z | 2018-07-27T11:51:39.000Z | # -*- coding: utf-8 -*-
"""wheel tests
"""
from distutils.sysconfig import get_config_var
from distutils.util import get_platform
import contextlib
import glob
import inspect
import os
import shutil
import subprocess
import sys
import zipfile
import pytest
from pkg_resources import Distribution, PathMetadata, PY_MAJOR
from setuptools.extern.packaging.utils import canonicalize_name
from setuptools.extern.packaging.tags import parse_tag
from setuptools.wheel import Wheel
from .contexts import tempdir
from .files import build_files
from .textwrap import DALS
__metaclass__ = type
WHEEL_INFO_TESTS = (
('invalid.whl', ValueError),
('simplewheel-2.0-1-py2.py3-none-any.whl', {
'project_name': 'simplewheel',
'version': '2.0',
'build': '1',
'py_version': 'py2.py3',
'abi': 'none',
'platform': 'any',
}),
('simple.dist-0.1-py2.py3-none-any.whl', {
'project_name': 'simple.dist',
'version': '0.1',
'build': None,
'py_version': 'py2.py3',
'abi': 'none',
'platform': 'any',
}),
('example_pkg_a-1-py3-none-any.whl', {
'project_name': 'example_pkg_a',
'version': '1',
'build': None,
'py_version': 'py3',
'abi': 'none',
'platform': 'any',
}),
('PyQt5-5.9-5.9.1-cp35.cp36.cp37-abi3-manylinux1_x86_64.whl', {
'project_name': 'PyQt5',
'version': '5.9',
'build': '5.9.1',
'py_version': 'cp35.cp36.cp37',
'abi': 'abi3',
'platform': 'manylinux1_x86_64',
}),
)
@pytest.mark.parametrize(
('filename', 'info'), WHEEL_INFO_TESTS,
ids=[t[0] for t in WHEEL_INFO_TESTS]
)
def test_wheel_info(filename, info):
if inspect.isclass(info):
with pytest.raises(info):
Wheel(filename)
return
w = Wheel(filename)
assert {k: getattr(w, k) for k in info.keys()} == info
@contextlib.contextmanager
def build_wheel(extra_file_defs=None, **kwargs):
file_defs = {
'setup.py': (DALS(
'''
# -*- coding: utf-8 -*-
from setuptools import setup
import setuptools
setup(**%r)
'''
) % kwargs).encode('utf-8'),
}
if extra_file_defs:
file_defs.update(extra_file_defs)
with tempdir() as source_dir:
build_files(file_defs, source_dir)
subprocess.check_call((sys.executable, 'setup.py',
'-q', 'bdist_wheel'), cwd=source_dir)
yield glob.glob(os.path.join(source_dir, 'dist', '*.whl'))[0]
def tree_set(root):
contents = set()
for dirpath, dirnames, filenames in os.walk(root):
for filename in filenames:
contents.add(os.path.join(os.path.relpath(dirpath, root),
filename))
return contents
def flatten_tree(tree):
"""Flatten nested dicts and lists into a full list of paths"""
output = set()
for node, contents in tree.items():
if isinstance(contents, dict):
contents = flatten_tree(contents)
for elem in contents:
if isinstance(elem, dict):
output |= {os.path.join(node, val)
for val in flatten_tree(elem)}
else:
output.add(os.path.join(node, elem))
return output
def format_install_tree(tree):
return {
x.format(
py_version=PY_MAJOR,
platform=get_platform(),
shlib_ext=get_config_var('EXT_SUFFIX') or get_config_var('SO'))
for x in tree}
def _check_wheel_install(filename, install_dir, install_tree_includes,
project_name, version, requires_txt):
w = Wheel(filename)
egg_path = os.path.join(install_dir, w.egg_name())
w.install_as_egg(egg_path)
if install_tree_includes is not None:
install_tree = format_install_tree(install_tree_includes)
exp = tree_set(install_dir)
assert install_tree.issubset(exp), (install_tree - exp)
metadata = PathMetadata(egg_path, os.path.join(egg_path, 'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
assert dist.project_name == project_name
assert dist.version == version
if requires_txt is None:
assert not dist.has_metadata('requires.txt')
else:
assert requires_txt == dist.get_metadata('requires.txt').lstrip()
class Record:
def __init__(self, id, **kwargs):
self._id = id
self._fields = kwargs
def __repr__(self):
return '%s(**%r)' % (self._id, self._fields)
WHEEL_INSTALL_TESTS = (
dict(
id='basic',
file_defs={
'foo': {
'__init__.py': ''
}
},
setup_kwargs=dict(
packages=['foo'],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': {
'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'top_level.txt'
],
'foo': ['__init__.py']
}
}),
),
dict(
id='utf-8',
setup_kwargs=dict(
description='Description accentuée',
)
),
dict(
id='data',
file_defs={
'data.txt': DALS(
'''
Some data...
'''
),
},
setup_kwargs=dict(
data_files=[('data_dir', ['data.txt'])],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': {
'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'top_level.txt'
],
'data_dir': [
'data.txt'
]
}
}),
),
dict(
id='extension',
file_defs={
'extension.c': DALS(
'''
#include "Python.h"
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"extension",
NULL,
0,
NULL,
NULL,
NULL,
NULL,
NULL
};
#define INITERROR return NULL
PyMODINIT_FUNC PyInit_extension(void)
#else
#define INITERROR return
void initextension(void)
#endif
{
#if PY_MAJOR_VERSION >= 3
PyObject *module = PyModule_Create(&moduledef);
#else
PyObject *module = Py_InitModule("extension", NULL);
#endif
if (module == NULL)
INITERROR;
#if PY_MAJOR_VERSION >= 3
return module;
#endif
}
'''
),
},
setup_kwargs=dict(
ext_modules=[
Record('setuptools.Extension',
name='extension',
sources=['extension.c'])
],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}-{platform}.egg': [
'extension{shlib_ext}',
{'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'top_level.txt',
]},
]
}),
),
dict(
id='header',
file_defs={
'header.h': DALS(
'''
'''
),
},
setup_kwargs=dict(
headers=['header.h'],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': [
'header.h',
{'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'top_level.txt',
]},
]
}),
),
dict(
id='script',
file_defs={
'script.py': DALS(
'''
#/usr/bin/python
print('hello world!')
'''
),
'script.sh': DALS(
'''
#/bin/sh
echo 'hello world!'
'''
),
},
setup_kwargs=dict(
scripts=['script.py', 'script.sh'],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': {
'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'top_level.txt',
{'scripts': [
'script.py',
'script.sh'
]}
]
}
})
),
dict(
id='requires1',
install_requires='foobar==2.0',
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': {
'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'requires.txt',
'top_level.txt',
]
}
}),
requires_txt=DALS(
'''
foobar==2.0
'''
),
),
dict(
id='requires2',
install_requires='''
bar
foo<=2.0; %r in sys_platform
''' % sys.platform,
requires_txt=DALS(
'''
bar
foo<=2.0
'''
),
),
dict(
id='requires3',
install_requires='''
bar; %r != sys_platform
''' % sys.platform,
),
dict(
id='requires4',
install_requires='''
foo
''',
extras_require={
'extra': 'foobar>3',
},
requires_txt=DALS(
'''
foo
[extra]
foobar>3
'''
),
),
dict(
id='requires5',
extras_require={
'extra': 'foobar; %r != sys_platform' % sys.platform,
},
requires_txt=DALS(
'''
[extra]
'''
),
),
dict(
id='namespace_package',
file_defs={
'foo': {
'bar': {
'__init__.py': ''
},
},
},
setup_kwargs=dict(
namespace_packages=['foo'],
packages=['foo.bar'],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': [
'foo-1.0-py{py_version}-nspkg.pth',
{'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'namespace_packages.txt',
'top_level.txt',
]},
{'foo': [
'__init__.py',
{'bar': ['__init__.py']},
]},
]
}),
),
dict(
id='empty_namespace_package',
file_defs={
'foobar': {
'__init__.py':
"__import__('pkg_resources').declare_namespace(__name__)",
},
},
setup_kwargs=dict(
namespace_packages=['foobar'],
packages=['foobar'],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': [
'foo-1.0-py{py_version}-nspkg.pth',
{'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'namespace_packages.txt',
'top_level.txt',
]},
{'foobar': [
'__init__.py',
]},
]
}),
),
dict(
id='data_in_package',
file_defs={
'foo': {
'__init__.py': '',
'data_dir': {
'data.txt': DALS(
'''
Some data...
'''
),
}
}
},
setup_kwargs=dict(
packages=['foo'],
data_files=[('foo/data_dir', ['foo/data_dir/data.txt'])],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': {
'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'top_level.txt',
],
'foo': [
'__init__.py',
{'data_dir': [
'data.txt',
]}
]
}
}),
),
)
@pytest.mark.parametrize(
'params', WHEEL_INSTALL_TESTS,
ids=list(params['id'] for params in WHEEL_INSTALL_TESTS),
)
def test_wheel_install(params):
project_name = params.get('name', 'foo')
version = params.get('version', '1.0')
install_requires = params.get('install_requires', [])
extras_require = params.get('extras_require', {})
requires_txt = params.get('requires_txt', None)
install_tree = params.get('install_tree')
file_defs = params.get('file_defs', {})
setup_kwargs = params.get('setup_kwargs', {})
with build_wheel(
name=project_name,
version=version,
install_requires=install_requires,
extras_require=extras_require,
extra_file_defs=file_defs,
**setup_kwargs
) as filename, tempdir() as install_dir:
_check_wheel_install(filename, install_dir,
install_tree, project_name,
version, requires_txt)
def test_wheel_install_pep_503():
project_name = 'Foo_Bar' # PEP 503 canonicalized name is "foo-bar"
version = '1.0'
with build_wheel(
name=project_name,
version=version,
) as filename, tempdir() as install_dir:
new_filename = filename.replace(project_name,
canonicalize_name(project_name))
shutil.move(filename, new_filename)
_check_wheel_install(new_filename, install_dir, None,
canonicalize_name(project_name),
version, None)
def test_wheel_no_dist_dir():
project_name = 'nodistinfo'
version = '1.0'
wheel_name = '{0}-{1}-py2.py3-none-any.whl'.format(project_name, version)
with tempdir() as source_dir:
wheel_path = os.path.join(source_dir, wheel_name)
# create an empty zip file
zipfile.ZipFile(wheel_path, 'w').close()
with tempdir() as install_dir:
with pytest.raises(ValueError):
_check_wheel_install(wheel_path, install_dir, None,
project_name,
version, None)
def test_wheel_is_compatible(monkeypatch):
def sys_tags():
for t in parse_tag('cp36-cp36m-manylinux1_x86_64'):
yield t
monkeypatch.setattr('setuptools.wheel.sys_tags', sys_tags)
assert Wheel(
'onnxruntime-0.1.2-cp36-cp36m-manylinux1_x86_64.whl').is_compatible()
| 26.674061 | 78 | 0.448852 |
from distutils.sysconfig import get_config_var
from distutils.util import get_platform
import contextlib
import glob
import inspect
import os
import shutil
import subprocess
import sys
import zipfile
import pytest
from pkg_resources import Distribution, PathMetadata, PY_MAJOR
from setuptools.extern.packaging.utils import canonicalize_name
from setuptools.extern.packaging.tags import parse_tag
from setuptools.wheel import Wheel
from .contexts import tempdir
from .files import build_files
from .textwrap import DALS
__metaclass__ = type
WHEEL_INFO_TESTS = (
('invalid.whl', ValueError),
('simplewheel-2.0-1-py2.py3-none-any.whl', {
'project_name': 'simplewheel',
'version': '2.0',
'build': '1',
'py_version': 'py2.py3',
'abi': 'none',
'platform': 'any',
}),
('simple.dist-0.1-py2.py3-none-any.whl', {
'project_name': 'simple.dist',
'version': '0.1',
'build': None,
'py_version': 'py2.py3',
'abi': 'none',
'platform': 'any',
}),
('example_pkg_a-1-py3-none-any.whl', {
'project_name': 'example_pkg_a',
'version': '1',
'build': None,
'py_version': 'py3',
'abi': 'none',
'platform': 'any',
}),
('PyQt5-5.9-5.9.1-cp35.cp36.cp37-abi3-manylinux1_x86_64.whl', {
'project_name': 'PyQt5',
'version': '5.9',
'build': '5.9.1',
'py_version': 'cp35.cp36.cp37',
'abi': 'abi3',
'platform': 'manylinux1_x86_64',
}),
)
@pytest.mark.parametrize(
('filename', 'info'), WHEEL_INFO_TESTS,
ids=[t[0] for t in WHEEL_INFO_TESTS]
)
def test_wheel_info(filename, info):
if inspect.isclass(info):
with pytest.raises(info):
Wheel(filename)
return
w = Wheel(filename)
assert {k: getattr(w, k) for k in info.keys()} == info
@contextlib.contextmanager
def build_wheel(extra_file_defs=None, **kwargs):
file_defs = {
'setup.py': (DALS(
'''
# -*- coding: utf-8 -*-
from setuptools import setup
import setuptools
setup(**%r)
'''
) % kwargs).encode('utf-8'),
}
if extra_file_defs:
file_defs.update(extra_file_defs)
with tempdir() as source_dir:
build_files(file_defs, source_dir)
subprocess.check_call((sys.executable, 'setup.py',
'-q', 'bdist_wheel'), cwd=source_dir)
yield glob.glob(os.path.join(source_dir, 'dist', '*.whl'))[0]
def tree_set(root):
contents = set()
for dirpath, dirnames, filenames in os.walk(root):
for filename in filenames:
contents.add(os.path.join(os.path.relpath(dirpath, root),
filename))
return contents
def flatten_tree(tree):
output = set()
for node, contents in tree.items():
if isinstance(contents, dict):
contents = flatten_tree(contents)
for elem in contents:
if isinstance(elem, dict):
output |= {os.path.join(node, val)
for val in flatten_tree(elem)}
else:
output.add(os.path.join(node, elem))
return output
def format_install_tree(tree):
return {
x.format(
py_version=PY_MAJOR,
platform=get_platform(),
shlib_ext=get_config_var('EXT_SUFFIX') or get_config_var('SO'))
for x in tree}
def _check_wheel_install(filename, install_dir, install_tree_includes,
project_name, version, requires_txt):
w = Wheel(filename)
egg_path = os.path.join(install_dir, w.egg_name())
w.install_as_egg(egg_path)
if install_tree_includes is not None:
install_tree = format_install_tree(install_tree_includes)
exp = tree_set(install_dir)
assert install_tree.issubset(exp), (install_tree - exp)
metadata = PathMetadata(egg_path, os.path.join(egg_path, 'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
assert dist.project_name == project_name
assert dist.version == version
if requires_txt is None:
assert not dist.has_metadata('requires.txt')
else:
assert requires_txt == dist.get_metadata('requires.txt').lstrip()
class Record:
def __init__(self, id, **kwargs):
self._id = id
self._fields = kwargs
def __repr__(self):
return '%s(**%r)' % (self._id, self._fields)
WHEEL_INSTALL_TESTS = (
dict(
id='basic',
file_defs={
'foo': {
'__init__.py': ''
}
},
setup_kwargs=dict(
packages=['foo'],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': {
'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'top_level.txt'
],
'foo': ['__init__.py']
}
}),
),
dict(
id='utf-8',
setup_kwargs=dict(
description='Description accentuée',
)
),
dict(
id='data',
file_defs={
'data.txt': DALS(
'''
Some data...
'''
),
},
setup_kwargs=dict(
data_files=[('data_dir', ['data.txt'])],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': {
'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'top_level.txt'
],
'data_dir': [
'data.txt'
]
}
}),
),
dict(
id='extension',
file_defs={
'extension.c': DALS(
'''
#include "Python.h"
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"extension",
NULL,
0,
NULL,
NULL,
NULL,
NULL,
NULL
};
#define INITERROR return NULL
PyMODINIT_FUNC PyInit_extension(void)
#else
#define INITERROR return
void initextension(void)
#endif
{
#if PY_MAJOR_VERSION >= 3
PyObject *module = PyModule_Create(&moduledef);
#else
PyObject *module = Py_InitModule("extension", NULL);
#endif
if (module == NULL)
INITERROR;
#if PY_MAJOR_VERSION >= 3
return module;
#endif
}
'''
),
},
setup_kwargs=dict(
ext_modules=[
Record('setuptools.Extension',
name='extension',
sources=['extension.c'])
],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}-{platform}.egg': [
'extension{shlib_ext}',
{'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'top_level.txt',
]},
]
}),
),
dict(
id='header',
file_defs={
'header.h': DALS(
'''
'''
),
},
setup_kwargs=dict(
headers=['header.h'],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': [
'header.h',
{'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'top_level.txt',
]},
]
}),
),
dict(
id='script',
file_defs={
'script.py': DALS(
'''
#/usr/bin/python
print('hello world!')
'''
),
'script.sh': DALS(
'''
#/bin/sh
echo 'hello world!'
'''
),
},
setup_kwargs=dict(
scripts=['script.py', 'script.sh'],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': {
'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'top_level.txt',
{'scripts': [
'script.py',
'script.sh'
]}
]
}
})
),
dict(
id='requires1',
install_requires='foobar==2.0',
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': {
'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'requires.txt',
'top_level.txt',
]
}
}),
requires_txt=DALS(
'''
foobar==2.0
'''
),
),
dict(
id='requires2',
install_requires='''
bar
foo<=2.0; %r in sys_platform
''' % sys.platform,
requires_txt=DALS(
'''
bar
foo<=2.0
'''
),
),
dict(
id='requires3',
install_requires='''
bar; %r != sys_platform
''' % sys.platform,
),
dict(
id='requires4',
install_requires='''
foo
''',
extras_require={
'extra': 'foobar>3',
},
requires_txt=DALS(
'''
foo
[extra]
foobar>3
'''
),
),
dict(
id='requires5',
extras_require={
'extra': 'foobar; %r != sys_platform' % sys.platform,
},
requires_txt=DALS(
'''
[extra]
'''
),
),
dict(
id='namespace_package',
file_defs={
'foo': {
'bar': {
'__init__.py': ''
},
},
},
setup_kwargs=dict(
namespace_packages=['foo'],
packages=['foo.bar'],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': [
'foo-1.0-py{py_version}-nspkg.pth',
{'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'namespace_packages.txt',
'top_level.txt',
]},
{'foo': [
'__init__.py',
{'bar': ['__init__.py']},
]},
]
}),
),
dict(
id='empty_namespace_package',
file_defs={
'foobar': {
'__init__.py':
"__import__('pkg_resources').declare_namespace(__name__)",
},
},
setup_kwargs=dict(
namespace_packages=['foobar'],
packages=['foobar'],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': [
'foo-1.0-py{py_version}-nspkg.pth',
{'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'namespace_packages.txt',
'top_level.txt',
]},
{'foobar': [
'__init__.py',
]},
]
}),
),
dict(
id='data_in_package',
file_defs={
'foo': {
'__init__.py': '',
'data_dir': {
'data.txt': DALS(
'''
Some data...
'''
),
}
}
},
setup_kwargs=dict(
packages=['foo'],
data_files=[('foo/data_dir', ['foo/data_dir/data.txt'])],
),
install_tree=flatten_tree({
'foo-1.0-py{py_version}.egg': {
'EGG-INFO': [
'PKG-INFO',
'RECORD',
'WHEEL',
'top_level.txt',
],
'foo': [
'__init__.py',
{'data_dir': [
'data.txt',
]}
]
}
}),
),
)
@pytest.mark.parametrize(
'params', WHEEL_INSTALL_TESTS,
ids=list(params['id'] for params in WHEEL_INSTALL_TESTS),
)
def test_wheel_install(params):
project_name = params.get('name', 'foo')
version = params.get('version', '1.0')
install_requires = params.get('install_requires', [])
extras_require = params.get('extras_require', {})
requires_txt = params.get('requires_txt', None)
install_tree = params.get('install_tree')
file_defs = params.get('file_defs', {})
setup_kwargs = params.get('setup_kwargs', {})
with build_wheel(
name=project_name,
version=version,
install_requires=install_requires,
extras_require=extras_require,
extra_file_defs=file_defs,
**setup_kwargs
) as filename, tempdir() as install_dir:
_check_wheel_install(filename, install_dir,
install_tree, project_name,
version, requires_txt)
def test_wheel_install_pep_503():
project_name = 'Foo_Bar'
version = '1.0'
with build_wheel(
name=project_name,
version=version,
) as filename, tempdir() as install_dir:
new_filename = filename.replace(project_name,
canonicalize_name(project_name))
shutil.move(filename, new_filename)
_check_wheel_install(new_filename, install_dir, None,
canonicalize_name(project_name),
version, None)
def test_wheel_no_dist_dir():
project_name = 'nodistinfo'
version = '1.0'
wheel_name = '{0}-{1}-py2.py3-none-any.whl'.format(project_name, version)
with tempdir() as source_dir:
wheel_path = os.path.join(source_dir, wheel_name)
zipfile.ZipFile(wheel_path, 'w').close()
with tempdir() as install_dir:
with pytest.raises(ValueError):
_check_wheel_install(wheel_path, install_dir, None,
project_name,
version, None)
def test_wheel_is_compatible(monkeypatch):
def sys_tags():
for t in parse_tag('cp36-cp36m-manylinux1_x86_64'):
yield t
monkeypatch.setattr('setuptools.wheel.sys_tags', sys_tags)
assert Wheel(
'onnxruntime-0.1.2-cp36-cp36m-manylinux1_x86_64.whl').is_compatible()
| true | true |
f72cccf7222dd5cd1076530b8dcc103fb3bb156d | 246 | py | Python | board/amebaz_dev/ucube.py | jinlongliu/AliOS-Things | ce051172a775f987183e7aca88bb6f3b809ea7b0 | [
"Apache-2.0"
] | 4 | 2019-03-12T11:04:48.000Z | 2019-10-22T06:06:53.000Z | board/amebaz_dev/ucube.py | IamBaoMouMou/AliOS-Things | 195a9160b871b3d78de6f8cf6c2ab09a71977527 | [
"Apache-2.0"
] | 3 | 2018-12-17T13:06:46.000Z | 2018-12-28T01:40:59.000Z | board/amebaz_dev/ucube.py | IamBaoMouMou/AliOS-Things | 195a9160b871b3d78de6f8cf6c2ab09a71977527 | [
"Apache-2.0"
] | 2 | 2018-01-23T07:54:08.000Z | 2018-01-23T11:38:59.000Z | linux_only_targets="linuxapp helloworld helloworld_nocli linkkitapp alinkapp networkapp tls uDataapp hdlcapp.hdlcserver wifihalapp coapapp nano linkkit_gateway blink linkkit_sched meshapp acapp netmgrapp mqttapp wifimonitor vflashdemo athostapp"
| 123 | 245 | 0.894309 | linux_only_targets="linuxapp helloworld helloworld_nocli linkkitapp alinkapp networkapp tls uDataapp hdlcapp.hdlcserver wifihalapp coapapp nano linkkit_gateway blink linkkit_sched meshapp acapp netmgrapp mqttapp wifimonitor vflashdemo athostapp"
| true | true |
f72ccd62512051e1b39bfbd0414538acd7914a7b | 1,403 | py | Python | Python/textrank/textrank.py | chasingegg/Data_Science | a499866ff92aa1107057b20563564bdd89fc370f | [
"MIT"
] | 1 | 2021-04-03T14:21:14.000Z | 2021-04-03T14:21:14.000Z | Python/textrank/textrank.py | chasingegg/Data_Science | a499866ff92aa1107057b20563564bdd89fc370f | [
"MIT"
] | null | null | null | Python/textrank/textrank.py | chasingegg/Data_Science | a499866ff92aa1107057b20563564bdd89fc370f | [
"MIT"
] | null | null | null | #!/usr/src/env python
# -*- coding: utf-8 -*-
# TextRank 博客 http://xiaosheng.me/2017/04/08/article49/
# 从PageRank转变而来,可以用来做关键字的提取。TextRank的计算公式其实跟PageRank可以认为是一样的
# 只不过就是要考虑权重的因素(算PageRank的时候就是均摊权值)
# 在TextRank构建的图中,节点是句子,权值就是两个句子的相似程度
# 提取关键字的时候,单词作为图的节点,把权值都设成1,此时其实退化成PageRank
# 把文本拆分成单词,将这一些单词设定一个简单的滑动窗口,每个窗口内的任意两个单词之间存在一条边
# 如果是要提取关键句,一般认为所有句子都是相邻的,不需要窗口提取。相似程度的计算公式一般是重合
# 单词数量除以总单词数量
import sys
import pandas as pd
import jieba.analyse
def textrank(data, topK):
idList, titleList, abstractList = data['id'], data['title'], data['abstract']
ids, title, keys = [], [], []
for i in range(len(idList)):
text = '%s。%s' % (titleList[i], abstractList[i]) #拼接
jieba.analyse.set_stop_words('data/stopWord.txt')
print("\"", titleList[i], "\"", " 10 keywords - TextRank :")
keywords = jieba.analyse.textrank(text, topK = topK, allowPOS=('n','nz','v','vd','vn','l','a','d'))
word_split = " ".join(keywords)
print(word_split)
keys.append(word_split.encode("utf-8"))
ids.append(idList[i])
title.append(titleList[i])
result = pd.DataFrame({"id":ids, "title":title, "key":keys}, columns=['id', 'title', 'key'])
return result
if __name__ == "__main__":
dataFile = 'data/sample_data.csv'
data = pd.read_csv(dataFile)
result = textrank(data, 10)
result.to_csv("result/keys_textrank.csv", index=False) | 36.921053 | 107 | 0.667142 |
import sys
import pandas as pd
import jieba.analyse
def textrank(data, topK):
idList, titleList, abstractList = data['id'], data['title'], data['abstract']
ids, title, keys = [], [], []
for i in range(len(idList)):
text = '%s。%s' % (titleList[i], abstractList[i])
jieba.analyse.set_stop_words('data/stopWord.txt')
print("\"", titleList[i], "\"", " 10 keywords - TextRank :")
keywords = jieba.analyse.textrank(text, topK = topK, allowPOS=('n','nz','v','vd','vn','l','a','d'))
word_split = " ".join(keywords)
print(word_split)
keys.append(word_split.encode("utf-8"))
ids.append(idList[i])
title.append(titleList[i])
result = pd.DataFrame({"id":ids, "title":title, "key":keys}, columns=['id', 'title', 'key'])
return result
if __name__ == "__main__":
dataFile = 'data/sample_data.csv'
data = pd.read_csv(dataFile)
result = textrank(data, 10)
result.to_csv("result/keys_textrank.csv", index=False) | true | true |
f72ccf21d93a0540ee9ca7edb99c6b9d48f98ad0 | 4,043 | py | Python | StandardDataSets/collada/library_geometries/geometry/mesh/vertices/input/position_texcoord_color_normal/position_texcoord_color_normal.py | KhronosGroup/COLLADA-CTS | 61f2a560cbb2a06ee62da8025241f6b08d06bfd9 | [
"MIT"
] | 20 | 2015-03-19T08:02:57.000Z | 2020-10-16T15:16:11.000Z | StandardDataSets/collada/library_geometries/geometry/mesh/vertices/input/position_texcoord_color_normal/position_texcoord_color_normal.py | Acidburn0zzz/COLLADA-CTS | 39a36188cf8710bbc003df43ed70b965eb4386bd | [
"MIT"
] | 4 | 2017-04-19T18:42:05.000Z | 2017-06-17T03:03:28.000Z | StandardDataSets/collada/library_geometries/geometry/mesh/vertices/input/position_texcoord_color_normal/position_texcoord_color_normal.py | Acidburn0zzz/COLLADA-CTS | 39a36188cf8710bbc003df43ed70b965eb4386bd | [
"MIT"
] | 10 | 2015-03-26T02:52:24.000Z | 2022-02-24T08:43:48.000Z | # Copyright (C) 2007 - 2009 Khronos Group
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
#
# This sample judging object does the following:
#
# JudgeBaseline: verifies that app did not crash, the required steps have been performed,
# the rendered images match, and the required element(s) has been preserved
# JudgeExemplary: returns Baseline status.
# JudgeSuperior: returns Baseline status.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = []
attrName = ''
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"])
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
self.status_superior = self.status_baseline
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
# if superior fails, no point in further checking
if (self.status_superior == False):
self.status_exemplary = self.status_superior
return self.status_exemplary
# Compare the rendered images between import and export
# Then compare images against reference test for non equivalence
if ( self.__assistant.CompareRenderedImages(context) ):
self.__assistant.CompareImagesAgainst(context, "position_texcoord_color", None, None, 5, True, False)
self.status_exemplary = self.__assistant.DeferJudgement(context)
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck); | 51.833333 | 466 | 0.73955 |
from StandardDataSets.scripts import JudgeAssistant
tagLst = []
attrName = ''
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
self.__assistant.CheckCrashes(context)
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"])
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
def JudgeSuperior(self, context):
self.status_superior = self.status_baseline
return self.status_superior
def JudgeExemplary(self, context):
if (self.status_superior == False):
self.status_exemplary = self.status_superior
return self.status_exemplary
if ( self.__assistant.CompareRenderedImages(context) ):
self.__assistant.CompareImagesAgainst(context, "position_texcoord_color", None, None, 5, True, False)
self.status_exemplary = self.__assistant.DeferJudgement(context)
return self.status_exemplary
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck); | true | true |
f72ccf7bbd22c8e0b01ca0371dd14f7cf18a0601 | 2,792 | py | Python | tests/unit/utils.py | canonical/hotsos | 1960e80a3f7529045c44798b0d3ac27d75036562 | [
"Apache-2.0"
] | 6 | 2021-10-01T19:46:14.000Z | 2022-03-31T17:05:08.000Z | tests/unit/utils.py | canonical/hotsos | 1960e80a3f7529045c44798b0d3ac27d75036562 | [
"Apache-2.0"
] | 111 | 2021-10-01T18:18:17.000Z | 2022-03-29T12:23:20.000Z | tests/unit/utils.py | canonical/hotsos | 1960e80a3f7529045c44798b0d3ac27d75036562 | [
"Apache-2.0"
] | 10 | 2021-09-29T14:47:54.000Z | 2022-03-18T14:52:16.000Z | import os
import shutil
import tempfile
import unittest
# disable for stestr otherwise output is much too verbose
from hotsos.core.log import log, logging, setup_logging
from hotsos.core.config import setup_config
# Must be set prior to other imports
TESTS_DIR = os.environ["TESTS_DIR"]
DEFAULT_FAKE_ROOT = 'fake_data_root/openstack'
setup_config(DATA_ROOT=os.path.join(TESTS_DIR, DEFAULT_FAKE_ROOT))
def is_def_filter(def_filename):
"""
Filter hotsos.core.ycheck.YDefsLoader._is_def to only match a file with the
given name. This permits a unit test to only run the ydef checks that are
under test.
Note that in order for directory globals to run def_filename must be a
relative path that includes the parent directory name e.g. foo/bar.yaml
where bar contains the checks and there is also a file called foo/foo.yaml
that contains directory globals.
"""
def inner(_inst, abs_path):
# filename may optionally have a parent dir which allows us to permit
# directory globals to be run.
parent_dir = os.path.dirname(def_filename)
""" Ensure we only load/run the yaml def with the given name. """
if parent_dir:
# allow directory global to run
base_dir = os.path.basename(os.path.dirname(abs_path))
if base_dir != parent_dir:
return False
if os.path.basename(abs_path) == "{}.yaml".format(parent_dir):
return True
if abs_path.endswith(def_filename):
return True
return False
return inner
class BaseTestCase(unittest.TestCase):
def part_output_to_actual(self, output):
actual = {}
for key, entry in output.items():
actual[key] = entry.data
return actual
def setUp(self):
self.maxDiff = None
# ensure locale consistency wherever tests are run
os.environ["LANG"] = 'C.UTF-8'
self.global_tmp_dir = tempfile.mkdtemp()
self.plugin_tmp_dir = tempfile.mkdtemp(dir=self.global_tmp_dir)
# Always reset env globals
# If a test relies on loading info from defs yaml this needs to be set
# to actual plugin name.
setup_config(DATA_ROOT=os.path.join(TESTS_DIR, DEFAULT_FAKE_ROOT),
PLUGIN_NAME="testplugin",
PLUGIN_YAML_DEFS=os.path.join(TESTS_DIR, "defs"),
PART_NAME="01part",
GLOBAL_TMP_DIR=self.global_tmp_dir,
PLUGIN_TMP_DIR=self.plugin_tmp_dir,
USE_ALL_LOGS=True)
setup_logging(debug_mode=True)
log.setLevel(logging.INFO)
def tearDown(self):
if os.path.isdir(self.plugin_tmp_dir):
shutil.rmtree(self.plugin_tmp_dir)
| 34.469136 | 79 | 0.655802 | import os
import shutil
import tempfile
import unittest
from hotsos.core.log import log, logging, setup_logging
from hotsos.core.config import setup_config
TESTS_DIR = os.environ["TESTS_DIR"]
DEFAULT_FAKE_ROOT = 'fake_data_root/openstack'
setup_config(DATA_ROOT=os.path.join(TESTS_DIR, DEFAULT_FAKE_ROOT))
def is_def_filter(def_filename):
def inner(_inst, abs_path):
parent_dir = os.path.dirname(def_filename)
if parent_dir:
base_dir = os.path.basename(os.path.dirname(abs_path))
if base_dir != parent_dir:
return False
if os.path.basename(abs_path) == "{}.yaml".format(parent_dir):
return True
if abs_path.endswith(def_filename):
return True
return False
return inner
class BaseTestCase(unittest.TestCase):
def part_output_to_actual(self, output):
actual = {}
for key, entry in output.items():
actual[key] = entry.data
return actual
def setUp(self):
self.maxDiff = None
os.environ["LANG"] = 'C.UTF-8'
self.global_tmp_dir = tempfile.mkdtemp()
self.plugin_tmp_dir = tempfile.mkdtemp(dir=self.global_tmp_dir)
setup_config(DATA_ROOT=os.path.join(TESTS_DIR, DEFAULT_FAKE_ROOT),
PLUGIN_NAME="testplugin",
PLUGIN_YAML_DEFS=os.path.join(TESTS_DIR, "defs"),
PART_NAME="01part",
GLOBAL_TMP_DIR=self.global_tmp_dir,
PLUGIN_TMP_DIR=self.plugin_tmp_dir,
USE_ALL_LOGS=True)
setup_logging(debug_mode=True)
log.setLevel(logging.INFO)
def tearDown(self):
if os.path.isdir(self.plugin_tmp_dir):
shutil.rmtree(self.plugin_tmp_dir)
| true | true |
f72cd1215468a583aa37950b7930fb4d28106380 | 1,428 | py | Python | dm_control/mujoco/wrapper/mjbindings/__init__.py | mhauskn/dm_control | b7944e0ed4392924f40a3e5c65b1a93c027b9718 | [
"Apache-2.0"
] | 2 | 2021-06-21T05:19:01.000Z | 2021-07-02T14:51:16.000Z | dm_control/mujoco/wrapper/mjbindings/__init__.py | akssri/dm_control | 1a0914f8df414685f1b336838e39e36fd378e0b9 | [
"Apache-2.0"
] | 2 | 2021-10-05T16:03:39.000Z | 2022-03-12T01:03:17.000Z | dm_control/mujoco/wrapper/mjbindings/__init__.py | akssri/dm_control | 1a0914f8df414685f1b336838e39e36fd378e0b9 | [
"Apache-2.0"
] | 2 | 2019-12-10T21:38:03.000Z | 2020-12-22T08:42:45.000Z | # Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Import core names of MuJoCo ctypes bindings."""
from absl import logging
from dm_control.mujoco.wrapper.mjbindings import constants
from dm_control.mujoco.wrapper.mjbindings import enums
from dm_control.mujoco.wrapper.mjbindings import sizes
from dm_control.mujoco.wrapper.mjbindings import types
from dm_control.mujoco.wrapper.mjbindings import wrappers
# pylint: disable=g-import-not-at-top
try:
from dm_control.mujoco.wrapper.mjbindings import functions
from dm_control.mujoco.wrapper.mjbindings.functions import mjlib
logging.info('MuJoCo library version is: %d', mjlib.mj_version())
except (IOError, OSError):
logging.warning('mjbindings failed to import mjlib and other functions. '
'libmujoco.so may not be accessible.')
| 40.8 | 78 | 0.736695 |
from absl import logging
from dm_control.mujoco.wrapper.mjbindings import constants
from dm_control.mujoco.wrapper.mjbindings import enums
from dm_control.mujoco.wrapper.mjbindings import sizes
from dm_control.mujoco.wrapper.mjbindings import types
from dm_control.mujoco.wrapper.mjbindings import wrappers
try:
from dm_control.mujoco.wrapper.mjbindings import functions
from dm_control.mujoco.wrapper.mjbindings.functions import mjlib
logging.info('MuJoCo library version is: %d', mjlib.mj_version())
except (IOError, OSError):
logging.warning('mjbindings failed to import mjlib and other functions. '
'libmujoco.so may not be accessible.')
| true | true |
f72cd2c662bd6709a900fab6f8b72a7287014147 | 16,047 | py | Python | natlas-server/app/admin/routes.py | rosswsnider/natlas | 71482c14213eb6b5e9e18365cde5875ed5441523 | [
"Apache-2.0"
] | null | null | null | natlas-server/app/admin/routes.py | rosswsnider/natlas | 71482c14213eb6b5e9e18365cde5875ed5441523 | [
"Apache-2.0"
] | null | null | null | natlas-server/app/admin/routes.py | rosswsnider/natlas | 71482c14213eb6b5e9e18365cde5875ed5441523 | [
"Apache-2.0"
] | null | null | null | from flask import render_template, redirect, url_for, current_app, flash, Response, abort, request
from flask_login import current_user
from app import db
from app.admin import bp
from app.elastic import Elastic
from app.admin.forms import *
from app.models import User, ScopeItem, ConfigItem, NatlasServices, AgentConfig, Tag
from app.auth.email import send_user_invite_email
from app.auth.wrappers import isAuthenticated, isAdmin
import ipaddress, hashlib
@bp.route('/', methods=['GET', 'POST'])
@isAuthenticated
@isAdmin
def admin():
configForm = ConfigForm()
configItems = current_app.config
if configForm.validate_on_submit():
for fieldname, fieldvalue in configForm.data.items():
if fieldname.upper() in ["SUBMIT", "CSRF_TOKEN"]:
continue
if fieldname.upper() == "ELASTICSEARCH_URL" and fieldvalue != current_app.config["ELASTICSEARCH_URL"]: # if we've got a new elasticsearch address, update our current handle to elastic
current_app.elastic = Elastic(fieldvalue)
current_app.config[fieldname.upper()] = fieldvalue
confitem = ConfigItem.query.filter_by(name=fieldname.upper()).first()
confitem.value=str(fieldvalue)
db.session.add(confitem)
db.session.commit()
return render_template("admin/index.html", configForm=configForm, configItems=configItems)
@bp.route('/users', methods=['GET', 'POST'])
@isAuthenticated
@isAdmin
def users():
users = User.query.all()
delForm = UserDeleteForm()
editForm = UserEditForm()
inviteForm = InviteUserForm()
if inviteForm.validate_on_submit():
validemail = User.validate_email(inviteForm.email.data)
if not validemail:
flash("%s does not appear to be a valid, deliverable email address." % inviteForm.email.data, "danger")
return redirect(request.referrer)
newUser = User(email=validemail)
db.session.add(newUser)
db.session.commit()
send_user_invite_email(newUser)
flash('Invitation Sent!', 'success')
return redirect(url_for('admin.users'))
return render_template("admin/users.html", users=users, delForm=delForm, editForm=editForm, inviteForm=inviteForm)
@bp.route('/users/<int:id>/delete', methods=['POST'])
@isAuthenticated
@isAdmin
def deleteUser(id):
delForm = UserDeleteForm()
if delForm.validate_on_submit():
if current_user.id == id:
flash('You can\'t delete yourself!', 'danger')
return redirect(url_for('admin.users'))
user = User.query.filter_by(id=id).first()
User.query.filter_by(id=id).delete()
db.session.commit()
flash('%s deleted!' % user.email, 'success')
return redirect(url_for('admin.users'))
else:
flash("Form couldn't validate!", 'danger')
return redirect(url_for('admin.users'))
@bp.route('/users/<int:id>/toggle', methods=['POST'])
@isAuthenticated
@isAdmin
def toggleUser(id):
editForm = UserEditForm()
if editForm.validate_on_submit():
user = User.query.filter_by(id=id).first()
if user.is_admin:
admins = User.query.filter_by(is_admin=True).all()
if len(admins) == 1:
flash('Can\'t delete the last admin!', 'danger')
return redirect(url_for('admin.users'))
user.is_admin = False
db.session.commit()
flash('User demoted!', 'success')
return redirect(url_for('admin.users'))
else:
user.is_admin = True
db.session.commit()
flash('User promoted!', 'success')
return redirect(url_for('admin.users'))
else:
flash("Form couldn't validate!", 'danger')
return redirect(url_for('admin.users'))
@bp.route('/scope', methods=['GET', 'POST'])
@isAuthenticated
@isAdmin
def scope():
scope = ScopeItem.getScope()
scopeSize = current_app.ScopeManager.getScopeSize()
if scopeSize == 0: # if it's zero, let's update the app's scopemanager
current_app.ScopeManager.update()
scopeSize = current_app.ScopeManager.getScopeSize() # if it's zero again that's fine, we just had to check
newForm = NewScopeForm()
delForm = ScopeDeleteForm()
editForm = ScopeToggleForm()
importForm = ImportScopeForm()
addTagForm = TagScopeForm()
addTagForm.tagname.choices = [(row.name, row.name) for row in Tag.query.all()]
if newForm.validate_on_submit():
if '/' not in newForm.target.data:
newForm.target.data = newForm.target.data + '/32'
target = ipaddress.ip_network(newForm.target.data, False)
newTarget = ScopeItem(target=target.with_prefixlen, blacklist=False)
db.session.add(newTarget)
db.session.commit()
current_app.ScopeManager.update()
flash('%s added!' % newTarget.target, 'success')
return redirect(url_for('admin.scope'))
return render_template("admin/scope.html", scope=scope, scopeSize=scopeSize, delForm=delForm, editForm=editForm, newForm=newForm, importForm=importForm, \
addTagForm=addTagForm)
@bp.route('/blacklist', methods=['GET', 'POST'])
@isAuthenticated
@isAdmin
def blacklist():
scope = ScopeItem.getBlacklist()
blacklistSize = current_app.ScopeManager.getBlacklistSize()
newForm = NewScopeForm()
delForm = ScopeDeleteForm()
editForm = ScopeToggleForm()
importForm = ImportBlacklistForm()
if newForm.validate_on_submit():
if '/' not in newForm.target.data:
newForm.target.data = newForm.target.data + '/32'
target = ipaddress.ip_network(newForm.target.data, False)
newTarget = ScopeItem(target=target.with_prefixlen, blacklist=True)
db.session.add(newTarget)
db.session.commit()
current_app.ScopeManager.update()
flash('%s blacklisted!' % newTarget.target, 'success')
return redirect(url_for('admin.blacklist'))
return render_template("admin/blacklist.html", scope=scope, blacklistSize=blacklistSize, delForm=delForm, editForm=editForm, newForm=newForm, importForm=importForm)
@bp.route('/import/<string:scopetype>', methods=['POST'])
@isAuthenticated
@isAdmin
def importScope(scopetype=''):
if scopetype == 'blacklist':
importBlacklist = True
importForm = ImportBlacklistForm()
elif scopetype == 'scope':
importBlacklist = False
importForm = ImportScopeForm()
else:
abort(404)
if importForm.validate_on_submit():
successImport = []
alreadyExists = []
failedImport = []
newScopeItems = importForm.scope.data.split('\n')
for item in newScopeItems:
item = item.strip()
if '/' not in item:
item = item + '/32'
try:
target = ipaddress.ip_network(item, False)
except ValueError as e:
failedImport.append(item) # this item couldn't be validated as an ip network
continue
exists = ScopeItem.query.filter_by(target=target.with_prefixlen).first()
if exists:
alreadyExists.append(target.with_prefixlen) # this range is already a scope item
continue
newTarget = ScopeItem(target=target.with_prefixlen, blacklist=importBlacklist)
db.session.add(newTarget)
successImport.append(newTarget.target)
db.session.commit()
current_app.ScopeManager.update()
if len(successImport) > 0:
flash('%s targets added to %s!' % (len(successImport), scopetype), 'success')
if len(alreadyExists) > 0:
flash('%s targets already existed!' % len(alreadyExists), 'info')
if len(failedImport) > 0:
flash('%s targets failed to import!' % len(failedImport), 'danger')
for item in failedImport:
flash('%s' % item, 'danger')
return redirect(url_for('admin.%s' % scopetype))
else:
for field, errors in importForm.errors.items():
for error in errors:
flash(error, 'danger')
return redirect(url_for('admin.%s' % scopetype))
@bp.route('/export/<string:scopetype>', methods=['GET'])
@isAuthenticated
@isAdmin
def exportScope(scopetype=''):
if scopetype == 'blacklist':
exportBlacklist = True
elif scopetype == 'scope':
exportBlacklist = False
else:
abort(404)
items = ScopeItem.query.filter_by(blacklist=exportBlacklist).all()
return Response('\n'.join(str(item.target) for item in items), mimetype='text/plain')
@bp.route('/scope/<int:id>/delete', methods=['POST'])
@isAuthenticated
@isAdmin
def deleteScopeItem(id):
delForm = ScopeDeleteForm()
if delForm.validate_on_submit():
item = ScopeItem.query.filter_by(id=id).first()
for tag in item.tags:
item.tags.remove(tag)
ScopeItem.query.filter_by(id=id).delete()
db.session.commit()
current_app.ScopeManager.update()
flash('%s deleted!' % item.target, 'success')
return redirect(request.referrer)
else:
flash("Form couldn't validate!", 'danger')
return redirect(request.referrer)
@bp.route('/scope/<int:id>/toggle', methods=['POST'])
@isAuthenticated
@isAdmin
def toggleScopeItem(id):
toggleForm = ScopeToggleForm()
if toggleForm.validate_on_submit():
item = ScopeItem.query.filter_by(id=id).first()
if item.blacklist:
item.blacklist = False
flash('%s removed from blacklist!' % item.target, 'success')
else:
item.blacklist = True
flash('%s blacklisted!' % item.target, 'success')
db.session.commit()
current_app.ScopeManager.update()
return redirect(request.referrer)
else:
flash("Form couldn't validate!", 'danger')
return redirect(request.referrer)
@bp.route('/scope/<int:id>/tag', methods=['POST'])
@isAuthenticated
@isAdmin
def tagScopeItem(id):
addTagForm = TagScopeForm()
addTagForm.tagname.choices = [(row.name, row.name) for row in Tag.query.all()]
if addTagForm.validate_on_submit():
scope = ScopeItem.query.get(id)
mytag = Tag.query.filter_by(name=addTagForm.tagname.data).first()
scope.addTag(mytag)
db.session.commit()
flash("Tag \"%s\" added to %s" % (mytag.name, scope.target), "success")
return redirect(request.referrer)
else:
flash("Form couldn't validate!", 'danger')
return redirect(request.referrer)
@bp.route('/scope/<int:id>/untag', methods=['POST'])
@isAuthenticated
@isAdmin
def untagScopeItem(id):
delTagForm = TagScopeForm()
scope = ScopeItem.query.get(id)
delTagForm.tagname.choices = [(row.name, row.name) for row in scope.tags.all()]
if delTagForm.validate_on_submit():
mytag = Tag.query.filter_by(name=delTagForm.tagname.data).first()
scope.delTag(mytag)
db.session.commit()
flash("Tag \"%s\" removed from %s" % (mytag.name, scope.target), "success")
return redirect(request.referrer)
else:
flash("Form couldn't validate!", 'danger')
return redirect(request.referrer)
@bp.route('/services', methods=['GET', 'POST'])
@isAuthenticated
@isAdmin
def services():
uploadForm = ServicesUploadForm(prefix="upload-services")
addServiceForm = AddServiceForm(prefix="add-service")
addServiceForm.serviceProtocol.choices = [("tcp", "TCP"), ("udp","UDP")]
if uploadForm.uploadFile.data and uploadForm.validate_on_submit():
newServicesContent = uploadForm.serviceFile.data.read().decode("utf-8").rstrip('\r\n')
newServicesSha = hashlib.sha256(newServicesContent.encode()).hexdigest()
if newServicesSha != current_app.current_services["sha256"]:
ns = NatlasServices(sha256=newServicesSha, services=newServicesContent)
db.session.add(ns)
db.session.commit()
current_app.current_services = NatlasServices.query.order_by(NatlasServices.id.desc()).first().as_dict()
flash("New services file with hash %s has been uploaded." % current_app.current_services["sha256"], "success")
return redirect(url_for('admin.services'))
else:
flash("That file is an exact match for our current services file!", "warning")
return redirect(url_for('admin.services'))
if addServiceForm.serviceName.data and addServiceForm.validate_on_submit():
newServiceName = addServiceForm.serviceName.data
newServicePort = str(addServiceForm.servicePort.data) + '/' + addServiceForm.serviceProtocol.data
if '\t' + newServicePort in str(current_app.current_services['services']):
flash("A service with port %s already exists!" % newServicePort, "danger")
return redirect(url_for('admin.services'))
else:
newServices = current_app.current_services["services"] + "\n" + newServiceName + "\t" + newServicePort
newSha = hashlib.sha256(newServices.encode()).hexdigest()
ns = NatlasServices(sha256=newSha, services=newServices)
db.session.add(ns)
db.session.commit()
current_app.current_services = NatlasServices.query.order_by(NatlasServices.id.desc()).first().as_dict()
flash("New service %s on port %s has been added." % (newServiceName, newServicePort), "success")
return redirect(url_for('admin.services'))
return render_template('admin/services.html', uploadForm=uploadForm, addServiceForm=addServiceForm, current_services=current_app.current_services, servlist=current_app.current_services['as_list'])
@bp.route('/services/export', methods=['GET'])
@isAuthenticated
@isAdmin
def exportServices():
return Response(str(current_app.current_services["services"]), mimetype='text/plain')
@bp.route('/agents', methods=['GET', 'POST'])
@isAuthenticated
@isAdmin
def agentConfig():
agentConfig = AgentConfig.query.get(1)
agentForm = AgentConfigForm(obj=agentConfig) # pass the model to the form to populate
addScriptForm = AddScriptForm(prefix="add-script")
delScriptForm = DeleteForm(prefix="del-script")
if agentForm.validate_on_submit():
agentForm.populate_obj(agentConfig) # populate the object from the form data
db.session.commit()
current_app.agentConfig = agentConfig.as_dict()
return render_template('admin/agents.html', agentForm=agentForm, scripts=current_app.agentScripts, \
addScriptForm=addScriptForm, delScriptForm=delScriptForm)
@bp.route('/agents/script/add', methods=['POST'])
@isAuthenticated
@isAdmin
def addScript():
addScriptForm = AddScriptForm(prefix="add-script")
if addScriptForm.validate_on_submit():
newscript = AgentScript(name=addScriptForm.scriptName.data)
db.session.add(newscript)
db.session.commit()
current_app.agentScripts = AgentScript.query.all()
current_app.agentScriptStr = AgentScript.getScriptsString(current_app.agentScripts)
flash("%s successfully added to scripts" % newscript.name, "success")
return redirect(request.referrer)
else:
flash("%s couldn't be added to scripts" % addScriptForm.scriptName.data, "danger")
return redirect(request.referrer)
@bp.route('/agents/script/<string:name>/delete', methods=['POST'])
@isAuthenticated
@isAdmin
def deleteScript(name):
deleteForm = DeleteForm()
if deleteForm.validate_on_submit():
delScript = AgentScript.query.filter_by(name=name).first()
if delScript:
db.session.delete(delScript)
db.session.commit()
current_app.agentScripts = AgentScript.query.all()
current_app.agentScriptStr = AgentScript.getScriptsString(current_app.agentScripts)
flash("%s successfully deleted." % name, "success")
else:
flash("%s doesn't exist" % name, "danger")
return redirect(request.referrer)
@bp.route('/scans/delete/<scan_id>', methods=['POST'])
@isAuthenticated
@isAdmin
def deleteScan(scan_id):
delForm = DeleteForm()
if delForm.validate_on_submit():
deleted = current_app.elastic.delete_scan(scan_id)
if deleted in [1,2]:
flash("Successfully deleted scan %s." % scan_id, "success")
if request.referrer:
if scan_id in request.referrer:
redirectLoc = request.referrer.rsplit(scan_id)[0]
else:
redirectLoc = request.referrer
else:
redirectLoc = url_for('main.search')
return redirect(redirectLoc)
else:
flash("Could not delete scan %s." % scan_id, "danger")
return redirect(request.referrer or url_for('main.search'))
else:
flash("Couldn't validate form!")
return redirect(request.referrer)
@bp.route('/hosts/delete/<ip>', methods=['POST'])
@isAuthenticated
@isAdmin
def deleteHost(ip):
delForm = DeleteForm()
if delForm.validate_on_submit():
deleted = current_app.elastic.delete_host(ip)
if deleted > 0:
flash("Successfully deleted host %s" % ip, "success")
return redirect(url_for('main.search'))
else:
flash("Couldn't delete host: %s" % ip, "danger")
else:
flash("Couldn't validate form!")
return redirect(request.referrer)
@bp.route('/tags', methods=['GET', 'POST'])
@isAuthenticated
@isAdmin
def tags():
tags = Tag.query.all()
addForm = AddTagForm()
if addForm.validate_on_submit():
newTag = Tag(name=addForm.tagname.data.lower())
db.session.add(newTag)
db.session.commit()
flash('Successfully added tag %s' % newTag.name, 'success')
return redirect(url_for('admin.tags'))
return render_template("admin/tags.html", tags=tags, addForm=addForm)
| 36.141892 | 197 | 0.737334 | from flask import render_template, redirect, url_for, current_app, flash, Response, abort, request
from flask_login import current_user
from app import db
from app.admin import bp
from app.elastic import Elastic
from app.admin.forms import *
from app.models import User, ScopeItem, ConfigItem, NatlasServices, AgentConfig, Tag
from app.auth.email import send_user_invite_email
from app.auth.wrappers import isAuthenticated, isAdmin
import ipaddress, hashlib
@bp.route('/', methods=['GET', 'POST'])
@isAuthenticated
@isAdmin
def admin():
configForm = ConfigForm()
configItems = current_app.config
if configForm.validate_on_submit():
for fieldname, fieldvalue in configForm.data.items():
if fieldname.upper() in ["SUBMIT", "CSRF_TOKEN"]:
continue
if fieldname.upper() == "ELASTICSEARCH_URL" and fieldvalue != current_app.config["ELASTICSEARCH_URL"]:
current_app.elastic = Elastic(fieldvalue)
current_app.config[fieldname.upper()] = fieldvalue
confitem = ConfigItem.query.filter_by(name=fieldname.upper()).first()
confitem.value=str(fieldvalue)
db.session.add(confitem)
db.session.commit()
return render_template("admin/index.html", configForm=configForm, configItems=configItems)
@bp.route('/users', methods=['GET', 'POST'])
@isAuthenticated
@isAdmin
def users():
users = User.query.all()
delForm = UserDeleteForm()
editForm = UserEditForm()
inviteForm = InviteUserForm()
if inviteForm.validate_on_submit():
validemail = User.validate_email(inviteForm.email.data)
if not validemail:
flash("%s does not appear to be a valid, deliverable email address." % inviteForm.email.data, "danger")
return redirect(request.referrer)
newUser = User(email=validemail)
db.session.add(newUser)
db.session.commit()
send_user_invite_email(newUser)
flash('Invitation Sent!', 'success')
return redirect(url_for('admin.users'))
return render_template("admin/users.html", users=users, delForm=delForm, editForm=editForm, inviteForm=inviteForm)
@bp.route('/users/<int:id>/delete', methods=['POST'])
@isAuthenticated
@isAdmin
def deleteUser(id):
delForm = UserDeleteForm()
if delForm.validate_on_submit():
if current_user.id == id:
flash('You can\'t delete yourself!', 'danger')
return redirect(url_for('admin.users'))
user = User.query.filter_by(id=id).first()
User.query.filter_by(id=id).delete()
db.session.commit()
flash('%s deleted!' % user.email, 'success')
return redirect(url_for('admin.users'))
else:
flash("Form couldn't validate!", 'danger')
return redirect(url_for('admin.users'))
@bp.route('/users/<int:id>/toggle', methods=['POST'])
@isAuthenticated
@isAdmin
def toggleUser(id):
editForm = UserEditForm()
if editForm.validate_on_submit():
user = User.query.filter_by(id=id).first()
if user.is_admin:
admins = User.query.filter_by(is_admin=True).all()
if len(admins) == 1:
flash('Can\'t delete the last admin!', 'danger')
return redirect(url_for('admin.users'))
user.is_admin = False
db.session.commit()
flash('User demoted!', 'success')
return redirect(url_for('admin.users'))
else:
user.is_admin = True
db.session.commit()
flash('User promoted!', 'success')
return redirect(url_for('admin.users'))
else:
flash("Form couldn't validate!", 'danger')
return redirect(url_for('admin.users'))
@bp.route('/scope', methods=['GET', 'POST'])
@isAuthenticated
@isAdmin
def scope():
scope = ScopeItem.getScope()
scopeSize = current_app.ScopeManager.getScopeSize()
if scopeSize == 0: # if it's zero, let's update the app's scopemanager
current_app.ScopeManager.update()
scopeSize = current_app.ScopeManager.getScopeSize()
newForm = NewScopeForm()
delForm = ScopeDeleteForm()
editForm = ScopeToggleForm()
importForm = ImportScopeForm()
addTagForm = TagScopeForm()
addTagForm.tagname.choices = [(row.name, row.name) for row in Tag.query.all()]
if newForm.validate_on_submit():
if '/' not in newForm.target.data:
newForm.target.data = newForm.target.data + '/32'
target = ipaddress.ip_network(newForm.target.data, False)
newTarget = ScopeItem(target=target.with_prefixlen, blacklist=False)
db.session.add(newTarget)
db.session.commit()
current_app.ScopeManager.update()
flash('%s added!' % newTarget.target, 'success')
return redirect(url_for('admin.scope'))
return render_template("admin/scope.html", scope=scope, scopeSize=scopeSize, delForm=delForm, editForm=editForm, newForm=newForm, importForm=importForm, \
addTagForm=addTagForm)
@bp.route('/blacklist', methods=['GET', 'POST'])
@isAuthenticated
@isAdmin
def blacklist():
scope = ScopeItem.getBlacklist()
blacklistSize = current_app.ScopeManager.getBlacklistSize()
newForm = NewScopeForm()
delForm = ScopeDeleteForm()
editForm = ScopeToggleForm()
importForm = ImportBlacklistForm()
if newForm.validate_on_submit():
if '/' not in newForm.target.data:
newForm.target.data = newForm.target.data + '/32'
target = ipaddress.ip_network(newForm.target.data, False)
newTarget = ScopeItem(target=target.with_prefixlen, blacklist=True)
db.session.add(newTarget)
db.session.commit()
current_app.ScopeManager.update()
flash('%s blacklisted!' % newTarget.target, 'success')
return redirect(url_for('admin.blacklist'))
return render_template("admin/blacklist.html", scope=scope, blacklistSize=blacklistSize, delForm=delForm, editForm=editForm, newForm=newForm, importForm=importForm)
@bp.route('/import/<string:scopetype>', methods=['POST'])
@isAuthenticated
@isAdmin
def importScope(scopetype=''):
if scopetype == 'blacklist':
importBlacklist = True
importForm = ImportBlacklistForm()
elif scopetype == 'scope':
importBlacklist = False
importForm = ImportScopeForm()
else:
abort(404)
if importForm.validate_on_submit():
successImport = []
alreadyExists = []
failedImport = []
newScopeItems = importForm.scope.data.split('\n')
for item in newScopeItems:
item = item.strip()
if '/' not in item:
item = item + '/32'
try:
target = ipaddress.ip_network(item, False)
except ValueError as e:
failedImport.append(item)
continue
exists = ScopeItem.query.filter_by(target=target.with_prefixlen).first()
if exists:
alreadyExists.append(target.with_prefixlen) # this range is already a scope item
continue
newTarget = ScopeItem(target=target.with_prefixlen, blacklist=importBlacklist)
db.session.add(newTarget)
successImport.append(newTarget.target)
db.session.commit()
current_app.ScopeManager.update()
if len(successImport) > 0:
flash('%s targets added to %s!' % (len(successImport), scopetype), 'success')
if len(alreadyExists) > 0:
flash('%s targets already existed!' % len(alreadyExists), 'info')
if len(failedImport) > 0:
flash('%s targets failed to import!' % len(failedImport), 'danger')
for item in failedImport:
flash('%s' % item, 'danger')
return redirect(url_for('admin.%s' % scopetype))
else:
for field, errors in importForm.errors.items():
for error in errors:
flash(error, 'danger')
return redirect(url_for('admin.%s' % scopetype))
@bp.route('/export/<string:scopetype>', methods=['GET'])
@isAuthenticated
@isAdmin
def exportScope(scopetype=''):
if scopetype == 'blacklist':
exportBlacklist = True
elif scopetype == 'scope':
exportBlacklist = False
else:
abort(404)
items = ScopeItem.query.filter_by(blacklist=exportBlacklist).all()
return Response('\n'.join(str(item.target) for item in items), mimetype='text/plain')
@bp.route('/scope/<int:id>/delete', methods=['POST'])
@isAuthenticated
@isAdmin
def deleteScopeItem(id):
delForm = ScopeDeleteForm()
if delForm.validate_on_submit():
item = ScopeItem.query.filter_by(id=id).first()
for tag in item.tags:
item.tags.remove(tag)
ScopeItem.query.filter_by(id=id).delete()
db.session.commit()
current_app.ScopeManager.update()
flash('%s deleted!' % item.target, 'success')
return redirect(request.referrer)
else:
flash("Form couldn't validate!", 'danger')
return redirect(request.referrer)
@bp.route('/scope/<int:id>/toggle', methods=['POST'])
@isAuthenticated
@isAdmin
def toggleScopeItem(id):
toggleForm = ScopeToggleForm()
if toggleForm.validate_on_submit():
item = ScopeItem.query.filter_by(id=id).first()
if item.blacklist:
item.blacklist = False
flash('%s removed from blacklist!' % item.target, 'success')
else:
item.blacklist = True
flash('%s blacklisted!' % item.target, 'success')
db.session.commit()
current_app.ScopeManager.update()
return redirect(request.referrer)
else:
flash("Form couldn't validate!", 'danger')
return redirect(request.referrer)
@bp.route('/scope/<int:id>/tag', methods=['POST'])
@isAuthenticated
@isAdmin
def tagScopeItem(id):
addTagForm = TagScopeForm()
addTagForm.tagname.choices = [(row.name, row.name) for row in Tag.query.all()]
if addTagForm.validate_on_submit():
scope = ScopeItem.query.get(id)
mytag = Tag.query.filter_by(name=addTagForm.tagname.data).first()
scope.addTag(mytag)
db.session.commit()
flash("Tag \"%s\" added to %s" % (mytag.name, scope.target), "success")
return redirect(request.referrer)
else:
flash("Form couldn't validate!", 'danger')
return redirect(request.referrer)
@bp.route('/scope/<int:id>/untag', methods=['POST'])
@isAuthenticated
@isAdmin
def untagScopeItem(id):
delTagForm = TagScopeForm()
scope = ScopeItem.query.get(id)
delTagForm.tagname.choices = [(row.name, row.name) for row in scope.tags.all()]
if delTagForm.validate_on_submit():
mytag = Tag.query.filter_by(name=delTagForm.tagname.data).first()
scope.delTag(mytag)
db.session.commit()
flash("Tag \"%s\" removed from %s" % (mytag.name, scope.target), "success")
return redirect(request.referrer)
else:
flash("Form couldn't validate!", 'danger')
return redirect(request.referrer)
@bp.route('/services', methods=['GET', 'POST'])
@isAuthenticated
@isAdmin
def services():
uploadForm = ServicesUploadForm(prefix="upload-services")
addServiceForm = AddServiceForm(prefix="add-service")
addServiceForm.serviceProtocol.choices = [("tcp", "TCP"), ("udp","UDP")]
if uploadForm.uploadFile.data and uploadForm.validate_on_submit():
newServicesContent = uploadForm.serviceFile.data.read().decode("utf-8").rstrip('\r\n')
newServicesSha = hashlib.sha256(newServicesContent.encode()).hexdigest()
if newServicesSha != current_app.current_services["sha256"]:
ns = NatlasServices(sha256=newServicesSha, services=newServicesContent)
db.session.add(ns)
db.session.commit()
current_app.current_services = NatlasServices.query.order_by(NatlasServices.id.desc()).first().as_dict()
flash("New services file with hash %s has been uploaded." % current_app.current_services["sha256"], "success")
return redirect(url_for('admin.services'))
else:
flash("That file is an exact match for our current services file!", "warning")
return redirect(url_for('admin.services'))
if addServiceForm.serviceName.data and addServiceForm.validate_on_submit():
newServiceName = addServiceForm.serviceName.data
newServicePort = str(addServiceForm.servicePort.data) + '/' + addServiceForm.serviceProtocol.data
if '\t' + newServicePort in str(current_app.current_services['services']):
flash("A service with port %s already exists!" % newServicePort, "danger")
return redirect(url_for('admin.services'))
else:
newServices = current_app.current_services["services"] + "\n" + newServiceName + "\t" + newServicePort
newSha = hashlib.sha256(newServices.encode()).hexdigest()
ns = NatlasServices(sha256=newSha, services=newServices)
db.session.add(ns)
db.session.commit()
current_app.current_services = NatlasServices.query.order_by(NatlasServices.id.desc()).first().as_dict()
flash("New service %s on port %s has been added." % (newServiceName, newServicePort), "success")
return redirect(url_for('admin.services'))
return render_template('admin/services.html', uploadForm=uploadForm, addServiceForm=addServiceForm, current_services=current_app.current_services, servlist=current_app.current_services['as_list'])
@bp.route('/services/export', methods=['GET'])
@isAuthenticated
@isAdmin
def exportServices():
return Response(str(current_app.current_services["services"]), mimetype='text/plain')
@bp.route('/agents', methods=['GET', 'POST'])
@isAuthenticated
@isAdmin
def agentConfig():
agentConfig = AgentConfig.query.get(1)
agentForm = AgentConfigForm(obj=agentConfig) # pass the model to the form to populate
addScriptForm = AddScriptForm(prefix="add-script")
delScriptForm = DeleteForm(prefix="del-script")
if agentForm.validate_on_submit():
agentForm.populate_obj(agentConfig) # populate the object from the form data
db.session.commit()
current_app.agentConfig = agentConfig.as_dict()
return render_template('admin/agents.html', agentForm=agentForm, scripts=current_app.agentScripts, \
addScriptForm=addScriptForm, delScriptForm=delScriptForm)
@bp.route('/agents/script/add', methods=['POST'])
@isAuthenticated
@isAdmin
def addScript():
addScriptForm = AddScriptForm(prefix="add-script")
if addScriptForm.validate_on_submit():
newscript = AgentScript(name=addScriptForm.scriptName.data)
db.session.add(newscript)
db.session.commit()
current_app.agentScripts = AgentScript.query.all()
current_app.agentScriptStr = AgentScript.getScriptsString(current_app.agentScripts)
flash("%s successfully added to scripts" % newscript.name, "success")
return redirect(request.referrer)
else:
flash("%s couldn't be added to scripts" % addScriptForm.scriptName.data, "danger")
return redirect(request.referrer)
@bp.route('/agents/script/<string:name>/delete', methods=['POST'])
@isAuthenticated
@isAdmin
def deleteScript(name):
deleteForm = DeleteForm()
if deleteForm.validate_on_submit():
delScript = AgentScript.query.filter_by(name=name).first()
if delScript:
db.session.delete(delScript)
db.session.commit()
current_app.agentScripts = AgentScript.query.all()
current_app.agentScriptStr = AgentScript.getScriptsString(current_app.agentScripts)
flash("%s successfully deleted." % name, "success")
else:
flash("%s doesn't exist" % name, "danger")
return redirect(request.referrer)
@bp.route('/scans/delete/<scan_id>', methods=['POST'])
@isAuthenticated
@isAdmin
def deleteScan(scan_id):
delForm = DeleteForm()
if delForm.validate_on_submit():
deleted = current_app.elastic.delete_scan(scan_id)
if deleted in [1,2]:
flash("Successfully deleted scan %s." % scan_id, "success")
if request.referrer:
if scan_id in request.referrer:
redirectLoc = request.referrer.rsplit(scan_id)[0]
else:
redirectLoc = request.referrer
else:
redirectLoc = url_for('main.search')
return redirect(redirectLoc)
else:
flash("Could not delete scan %s." % scan_id, "danger")
return redirect(request.referrer or url_for('main.search'))
else:
flash("Couldn't validate form!")
return redirect(request.referrer)
@bp.route('/hosts/delete/<ip>', methods=['POST'])
@isAuthenticated
@isAdmin
def deleteHost(ip):
delForm = DeleteForm()
if delForm.validate_on_submit():
deleted = current_app.elastic.delete_host(ip)
if deleted > 0:
flash("Successfully deleted host %s" % ip, "success")
return redirect(url_for('main.search'))
else:
flash("Couldn't delete host: %s" % ip, "danger")
else:
flash("Couldn't validate form!")
return redirect(request.referrer)
@bp.route('/tags', methods=['GET', 'POST'])
@isAuthenticated
@isAdmin
def tags():
tags = Tag.query.all()
addForm = AddTagForm()
if addForm.validate_on_submit():
newTag = Tag(name=addForm.tagname.data.lower())
db.session.add(newTag)
db.session.commit()
flash('Successfully added tag %s' % newTag.name, 'success')
return redirect(url_for('admin.tags'))
return render_template("admin/tags.html", tags=tags, addForm=addForm)
| true | true |
f72cd336a36193490a287d751208899587977749 | 3,822 | py | Python | CAIL2020/cocr/torchocr/datasets/icdar15/ICDAR15CropSave.py | ShenDezhou/CAIL | c4cfa98ab4ecedbce34a7a5a186830486047540c | [
"Apache-2.0"
] | 71 | 2020-07-16T01:49:27.000Z | 2022-03-27T16:55:00.000Z | CAIL2020/cocr/torchocr/datasets/icdar15/ICDAR15CropSave.py | ShenDezhou/CAIL | c4cfa98ab4ecedbce34a7a5a186830486047540c | [
"Apache-2.0"
] | 11 | 2020-09-18T14:26:25.000Z | 2022-02-09T23:49:33.000Z | CAIL2020/cocr/torchocr/datasets/icdar15/ICDAR15CropSave.py | ShenDezhou/CAIL | c4cfa98ab4ecedbce34a7a5a186830486047540c | [
"Apache-2.0"
] | 16 | 2020-07-15T07:24:30.000Z | 2022-03-19T05:41:11.000Z | '''
@Author: Jeffery Sheng (Zhenfei Sheng)
@Time: 2020/5/21 18:34
@File: ICDAR15CropSave.py
'''
import os
import cv2
from glob import glob
from tqdm import tqdm
class icdar2015CropSave:
def __init__(self, img_dir :str, gt_dir :str, save_data_dir :str,
train_val_split_ratio: float or None=0.1):
self.save_id = 1
self.img_dir = os.path.abspath(img_dir)
self.gt_dir = os.path.abspath(gt_dir)
if not os.path.exists(save_data_dir):
os.mkdir(save_data_dir)
self.save_data_dir = save_data_dir
self.train_val_split_ratio = train_val_split_ratio
def crop_save(self) -> None:
all_img_paths = glob(os.path.join(self.img_dir, '*.jpg'))
all_gt_paths = glob(os.path.join(self.gt_dir, '*.txt'))
# check length
assert len(all_img_paths) == len(all_gt_paths)
# create lists to store text-line
text_lines = list()
# start to crop and save
for img_path in tqdm(all_img_paths):
img = cv2.imread(img_path)
gt_path = os.path.join(self.gt_dir, 'gt_' + os.path.basename(img_path).replace('.jpg', '.txt'))
with open(gt_path, 'r', encoding='utf-8-sig') as file:
lines = file.readlines()
for line in lines:
line = line.strip().split(',')
# get points
x1, y1, x2, y2, x3, y3, x4, y4 = list(map(int, line[: 8]))
# get transcript
trans = line[8]
if trans in {'', '*', '###'}:
continue
# check & make dir
save_img_dir = os.path.join(self.save_data_dir, 'images')
if not os.path.exists(save_img_dir):
os.mkdir(save_img_dir)
# build save img path
save_img_path = os.path.join(save_img_dir, f'textbox_{self.save_id}.jpg')
# check if rectangle
if len({x1, y1, x2, y2, x3, y3, x4, y4}) == 4:
# save rectangle
cv2.imwrite(save_img_path, img[y1: y4, x1: x2])
# if polygon, save minimize circumscribed rectangle
else:
x_min, x_max = min((x1, x2, x3, x4)), max((x1, x2, x3, x4))
y_min, y_max = min((y1, y2, y3, y4)), max((y1, y2, y3, y4))
cv2.imwrite(save_img_path, img[y_min: y_max, x_min: x_max])
# save to text-line
text_lines.append(f'textbox_{self.save_id}.jpg\t{trans}\n')
# save_id self increase
self.save_id += 1
if self.train_val_split_ratio:
train = text_lines[: int(round((1-self.train_val_split_ratio)*self.save_id))]
val = text_lines[int(round((1-self.train_val_split_ratio)*self.save_id)): ]
# save text-line file
with open(os.path.join(self.save_data_dir, 'train.txt'), 'w') as save_file:
save_file.writelines(train)
with open(os.path.join(self.save_data_dir, 'val.txt'), 'w') as save_file:
save_file.writelines(val)
print(f'{self.save_id-1} text-box images and 2 text-line file are saved.')
else:
# save text-line file
with open(os.path.join(self.save_data_dir, 'train.txt'), 'w') as save_file:
save_file.writelines(text_lines)
print(f'{self.save_id-1} text-box images and 1 text-line file are saved.')
if __name__ == '__main__':
img_dir = '/data/disk7/private/szf/Datasets/ICDAR2015/train'
gt_dir = '/data/disk7/private/szf/Datasets/ICDAR2015/train_local_trans'
save_data_dir = '/data/disk7/private/szf/Datasets/ICDAR2015/data'
icdar2015CropSave(img_dir, gt_dir, save_data_dir).crop_save() | 45.5 | 107 | 0.570905 |
import os
import cv2
from glob import glob
from tqdm import tqdm
class icdar2015CropSave:
def __init__(self, img_dir :str, gt_dir :str, save_data_dir :str,
train_val_split_ratio: float or None=0.1):
self.save_id = 1
self.img_dir = os.path.abspath(img_dir)
self.gt_dir = os.path.abspath(gt_dir)
if not os.path.exists(save_data_dir):
os.mkdir(save_data_dir)
self.save_data_dir = save_data_dir
self.train_val_split_ratio = train_val_split_ratio
def crop_save(self) -> None:
all_img_paths = glob(os.path.join(self.img_dir, '*.jpg'))
all_gt_paths = glob(os.path.join(self.gt_dir, '*.txt'))
assert len(all_img_paths) == len(all_gt_paths)
text_lines = list()
for img_path in tqdm(all_img_paths):
img = cv2.imread(img_path)
gt_path = os.path.join(self.gt_dir, 'gt_' + os.path.basename(img_path).replace('.jpg', '.txt'))
with open(gt_path, 'r', encoding='utf-8-sig') as file:
lines = file.readlines()
for line in lines:
line = line.strip().split(',')
x1, y1, x2, y2, x3, y3, x4, y4 = list(map(int, line[: 8]))
trans = line[8]
if trans in {'', '*', '###'}:
continue
save_img_dir = os.path.join(self.save_data_dir, 'images')
if not os.path.exists(save_img_dir):
os.mkdir(save_img_dir)
save_img_path = os.path.join(save_img_dir, f'textbox_{self.save_id}.jpg')
if len({x1, y1, x2, y2, x3, y3, x4, y4}) == 4:
cv2.imwrite(save_img_path, img[y1: y4, x1: x2])
else:
x_min, x_max = min((x1, x2, x3, x4)), max((x1, x2, x3, x4))
y_min, y_max = min((y1, y2, y3, y4)), max((y1, y2, y3, y4))
cv2.imwrite(save_img_path, img[y_min: y_max, x_min: x_max])
text_lines.append(f'textbox_{self.save_id}.jpg\t{trans}\n')
self.save_id += 1
if self.train_val_split_ratio:
train = text_lines[: int(round((1-self.train_val_split_ratio)*self.save_id))]
val = text_lines[int(round((1-self.train_val_split_ratio)*self.save_id)): ]
with open(os.path.join(self.save_data_dir, 'train.txt'), 'w') as save_file:
save_file.writelines(train)
with open(os.path.join(self.save_data_dir, 'val.txt'), 'w') as save_file:
save_file.writelines(val)
print(f'{self.save_id-1} text-box images and 2 text-line file are saved.')
else:
with open(os.path.join(self.save_data_dir, 'train.txt'), 'w') as save_file:
save_file.writelines(text_lines)
print(f'{self.save_id-1} text-box images and 1 text-line file are saved.')
if __name__ == '__main__':
img_dir = '/data/disk7/private/szf/Datasets/ICDAR2015/train'
gt_dir = '/data/disk7/private/szf/Datasets/ICDAR2015/train_local_trans'
save_data_dir = '/data/disk7/private/szf/Datasets/ICDAR2015/data'
icdar2015CropSave(img_dir, gt_dir, save_data_dir).crop_save() | true | true |
f72cd357fb91883d347cb40323f2b77a59d3007f | 1,228 | py | Python | couchcrdt/counter.py | drsm79/couch-crdt | 1717a8b03a488793984d7209f6da78c395b3477f | [
"Apache-2.0"
] | null | null | null | couchcrdt/counter.py | drsm79/couch-crdt | 1717a8b03a488793984d7209f6da78c395b3477f | [
"Apache-2.0"
] | null | null | null | couchcrdt/counter.py | drsm79/couch-crdt | 1717a8b03a488793984d7209f6da78c395b3477f | [
"Apache-2.0"
] | null | null | null | from crdt import CRDT
class DistributedCounter(CRDT):
def add(self, number):
return self + number
def remove(self, number):
return self - number
def inc(self):
"""
Increase the counters value by one
"""
return self + 1
def dec(self):
"""
Reduce the counters value by one
"""
return self - 1
def __abs__(self):
"""
Do the set operation and return the iterable over the result
"""
if self.state is None:
return self.value
if self.value is None:
return self.state
return self.value + self.state
def __repr__(self):
return "%s" % self.__abs__()
def __add__(self, number):
if isinstance(self.state, (int, long, float, complex)):
self._update(self.state + number)
else:
self._update(number)
return self
def __sub__(self, number):
if isinstance(self.state, (int, long, float, complex)):
self._update(self.state - number)
else:
self._update(-number)
return self
def _parse(self, data):
return data.json()['rows'][0]['value']
| 23.169811 | 68 | 0.545603 | from crdt import CRDT
class DistributedCounter(CRDT):
def add(self, number):
return self + number
def remove(self, number):
return self - number
def inc(self):
return self + 1
def dec(self):
return self - 1
def __abs__(self):
if self.state is None:
return self.value
if self.value is None:
return self.state
return self.value + self.state
def __repr__(self):
return "%s" % self.__abs__()
def __add__(self, number):
if isinstance(self.state, (int, long, float, complex)):
self._update(self.state + number)
else:
self._update(number)
return self
def __sub__(self, number):
if isinstance(self.state, (int, long, float, complex)):
self._update(self.state - number)
else:
self._update(-number)
return self
def _parse(self, data):
return data.json()['rows'][0]['value']
| true | true |
f72cd35995de0da138e9526f9b56429d06c45c97 | 359,560 | py | Python | nova/virt/libvirt/driver_back.py | xuweiliang/Codelibrary | 54e45b2daa205132c05b0ff5a2c3db7fca2853a7 | [
"Apache-2.0"
] | null | null | null | nova/virt/libvirt/driver_back.py | xuweiliang/Codelibrary | 54e45b2daa205132c05b0ff5a2c3db7fca2853a7 | [
"Apache-2.0"
] | null | null | null | nova/virt/libvirt/driver_back.py | xuweiliang/Codelibrary | 54e45b2daa205132c05b0ff5a2c3db7fca2853a7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to a hypervisor through libvirt.
Supports KVM, LXC, QEMU, UML, XEN and Parallels.
"""
import string
import collections
from collections import deque
import contextlib
import errno
import functools
import glob
import itertools
import mmap
import operator
import os
import shutil
import tempfile
import time
import uuid
import eventlet
from eventlet import greenthread
from eventlet import tpool
from lxml import etree
from os_brick.initiator import connector
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import importutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
import six
from six.moves import range
from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import arch
from nova.compute import hv_type
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_mode
import nova.conf
from nova.console import serial as serial_console
from nova.console import type as ctype
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import image
from nova.network import model as network_model
from nova import objects
from nova.objects import fields
from nova.objects import migrate_data as migrate_data_obj
from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt import diagnostics
from nova.virt.disk import api as disk_api
from nova.virt.disk.vfs import guestfs
from nova.virt import driver
from nova.virt import firewall
from nova.virt import hardware
from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import instancejobtracker
from nova.virt.libvirt import migration as libvirt_migrate
from nova.virt.libvirt.storage import dmcrypt
from nova.virt.libvirt.storage import lvm
from nova.virt.libvirt.storage import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt import vif as libvirt_vif
from nova.virt.libvirt.volume import remotefs
from nova.virt import netutils
from nova.virt import watchdog_actions
from nova.volume import cinder
from nova.volume import encryptors
libvirt = None
uefi_logged = False
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
libvirt_firewall.__name__,
libvirt_firewall.IptablesFirewallDriver.__name__)
DEFAULT_UEFI_LOADER_PATH = {
"x86_64": "/usr/share/OVMF/OVMF_CODE.fd",
"aarch64": "/usr/share/AAVMF/AAVMF_CODE.fd"
}
MAX_CONSOLE_BYTES = 100 * units.Ki
# The libvirt driver will prefix any disable reason codes with this string.
DISABLE_PREFIX = 'AUTO: '
# Disable reason for the service which was enabled or disabled without reason
DISABLE_REASON_UNDEFINED = None
# Guest config console string
CONSOLE = "console=tty0 console=ttyS0"
GuestNumaConfig = collections.namedtuple(
'GuestNumaConfig', ['cpuset', 'cputune', 'numaconfig', 'numatune'])
libvirt_volume_drivers = [
'iscsi=nova.virt.libvirt.volume.iscsi.LibvirtISCSIVolumeDriver',
'iser=nova.virt.libvirt.volume.iser.LibvirtISERVolumeDriver',
'local=nova.virt.libvirt.volume.volume.LibvirtVolumeDriver',
'fake=nova.virt.libvirt.volume.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.net.LibvirtNetVolumeDriver',
'sheepdog=nova.virt.libvirt.volume.net.LibvirtNetVolumeDriver',
'nfs=nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver',
'smbfs=nova.virt.libvirt.volume.smbfs.LibvirtSMBFSVolumeDriver',
'aoe=nova.virt.libvirt.volume.aoe.LibvirtAOEVolumeDriver',
'glusterfs='
'nova.virt.libvirt.volume.glusterfs.LibvirtGlusterfsVolumeDriver',
'fibre_channel='
'nova.virt.libvirt.volume.fibrechannel.'
'LibvirtFibreChannelVolumeDriver',
'scality=nova.virt.libvirt.volume.scality.LibvirtScalityVolumeDriver',
'gpfs=nova.virt.libvirt.volume.gpfs.LibvirtGPFSVolumeDriver',
'quobyte=nova.virt.libvirt.volume.quobyte.LibvirtQuobyteVolumeDriver',
'hgst=nova.virt.libvirt.volume.hgst.LibvirtHGSTVolumeDriver',
'scaleio=nova.virt.libvirt.volume.scaleio.LibvirtScaleIOVolumeDriver',
'disco=nova.virt.libvirt.volume.disco.LibvirtDISCOVolumeDriver',
'vzstorage='
'nova.virt.libvirt.volume.vzstorage.LibvirtVZStorageVolumeDriver',
]
def patch_tpool_proxy():
"""eventlet.tpool.Proxy doesn't work with old-style class in __str__()
or __repr__() calls. See bug #962840 for details.
We perform a monkey patch to replace those two instance methods.
"""
def str_method(self):
return str(self._obj)
def repr_method(self):
return repr(self._obj)
tpool.Proxy.__str__ = str_method
tpool.Proxy.__repr__ = repr_method
patch_tpool_proxy()
# For information about when MIN_LIBVIRT_VERSION and
# NEXT_MIN_LIBVIRT_VERSION can be changed, consult
#
# https://wiki.openstack.org/wiki/LibvirtDistroSupportMatrix
#
# Currently this is effectively the min version for i686/x86_64
# + KVM/QEMU, as other architectures/hypervisors require newer
# versions. Over time, this will become a common min version
# for all architectures/hypervisors, as this value rises to
# meet them.
MIN_LIBVIRT_VERSION = (1, 2, 1)
MIN_QEMU_VERSION = (1, 5, 3)
# TODO(berrange): Re-evaluate this at start of each release cycle
# to decide if we want to plan a future min version bump.
# MIN_LIBVIRT_VERSION can be updated to match this after
# NEXT_MIN_LIBVIRT_VERSION has been at a higher value for
# one cycle
NEXT_MIN_LIBVIRT_VERSION = (1, 2, 1)
NEXT_MIN_QEMU_VERSION = (1, 5, 3)
# When the above version matches/exceeds this version
# delete it & corresponding code using it
# Relative block commit & rebase (feature is detected,
# this version is only used for messaging)
MIN_LIBVIRT_BLOCKJOB_RELATIVE_VERSION = (1, 2, 7)
# Libvirt version 1.2.17 is required for successful block live migration
# of vm booted from image with attached devices
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION = (1, 2, 17)
# libvirt discard feature
MIN_QEMU_DISCARD_VERSION = (1, 6, 0)
# While earlier versions could support NUMA reporting and
# NUMA placement, not until 1.2.7 was there the ability
# to pin guest nodes to host nodes, so mandate that. Without
# this the scheduler cannot make guaranteed decisions, as the
# guest placement may not match what was requested
MIN_LIBVIRT_NUMA_VERSION = (1, 2, 7)
# PowerPC based hosts that support NUMA using libvirt
MIN_LIBVIRT_NUMA_VERSION_PPC = (1, 2, 19)
# Versions of libvirt with known NUMA topology issues
# See bug #1449028
BAD_LIBVIRT_NUMA_VERSIONS = [(1, 2, 9, 2)]
# While earlier versions could support hugepage backed
# guests, not until 1.2.8 was there the ability to request
# a particular huge page size. Without this the scheduler
# cannot make guaranteed decisions, as the huge page size
# used by the guest may not match what was requested
MIN_LIBVIRT_HUGEPAGE_VERSION = (1, 2, 8)
# Versions of libvirt with broken cpu pinning support. This excludes
# versions of libvirt with broken NUMA support since pinning needs
# NUMA
# See bug #1438226
BAD_LIBVIRT_CPU_POLICY_VERSIONS = [(1, 2, 10)]
# qemu 2.1 introduces support for pinning memory on host
# NUMA nodes, along with the ability to specify hugepage
# sizes per guest NUMA node
MIN_QEMU_NUMA_HUGEPAGE_VERSION = (2, 1, 0)
# fsFreeze/fsThaw requirement
MIN_LIBVIRT_FSFREEZE_VERSION = (1, 2, 5)
# UEFI booting support
MIN_LIBVIRT_UEFI_VERSION = (1, 2, 9)
# Hyper-V paravirtualized time source
MIN_LIBVIRT_HYPERV_TIMER_VERSION = (1, 2, 2)
MIN_QEMU_HYPERV_TIMER_VERSION = (2, 0, 0)
# parallels driver support
MIN_LIBVIRT_PARALLELS_VERSION = (1, 2, 12)
# Ability to set the user guest password with Qemu
MIN_LIBVIRT_SET_ADMIN_PASSWD = (1, 2, 16)
# s/390 & s/390x architectures with KVM
MIN_LIBVIRT_KVM_S390_VERSION = (1, 2, 13)
MIN_QEMU_S390_VERSION = (2, 3, 0)
# libvirt < 1.3 reported virt_functions capability
# only when VFs are enabled.
# libvirt 1.3 fix f391889f4e942e22b9ef8ecca492de05106ce41e
MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION = (1, 3, 0)
# ppc64/ppc64le architectures with KVM
# NOTE(rfolco): Same levels for Libvirt/Qemu on Big Endian and Little
# Endian giving the nuance around guest vs host architectures
MIN_LIBVIRT_KVM_PPC64_VERSION = (1, 2, 12)
MIN_QEMU_PPC64_VERSION = (2, 1, 0)
# Auto converge support
MIN_LIBVIRT_AUTO_CONVERGE_VERSION = (1, 2, 3)
MIN_QEMU_AUTO_CONVERGE = (1, 6, 0)
# Names of the types that do not get compressed during migration
NO_COMPRESSION_TYPES = ('qcow2',)
# number of serial console limit
QEMU_MAX_SERIAL_PORTS = 4
# Qemu supports 4 serial consoles, we remove 1 because of the PTY one defined
ALLOWED_QEMU_SERIAL_PORTS = QEMU_MAX_SERIAL_PORTS - 1
# realtime support
MIN_LIBVIRT_REALTIME_VERSION = (1, 2, 13)
# libvirt postcopy support
MIN_LIBVIRT_POSTCOPY_VERSION = (1, 3, 3)
# qemu postcopy support
MIN_QEMU_POSTCOPY_VERSION = (2, 5, 0)
MIN_LIBVIRT_OTHER_ARCH = {arch.S390: MIN_LIBVIRT_KVM_S390_VERSION,
arch.S390X: MIN_LIBVIRT_KVM_S390_VERSION,
arch.PPC: MIN_LIBVIRT_KVM_PPC64_VERSION,
arch.PPC64: MIN_LIBVIRT_KVM_PPC64_VERSION,
arch.PPC64LE: MIN_LIBVIRT_KVM_PPC64_VERSION,
}
MIN_QEMU_OTHER_ARCH = {arch.S390: MIN_QEMU_S390_VERSION,
arch.S390X: MIN_QEMU_S390_VERSION,
arch.PPC: MIN_QEMU_PPC64_VERSION,
arch.PPC64: MIN_QEMU_PPC64_VERSION,
arch.PPC64LE: MIN_QEMU_PPC64_VERSION,
}
# perf events support
MIN_LIBVIRT_PERF_VERSION = (2, 0, 0)
LIBVIRT_PERF_EVENT_PREFIX = 'VIR_PERF_PARAM_'
PERF_EVENTS_CPU_FLAG_MAPPING = {'cmt': 'cmt',
'mbml': 'mbm_local',
'mbmt': 'mbm_total',
}
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
"supports_migrate_to_same_host": False,
"supports_attach_interface": True,
"supports_device_tagging": True,
}
def __init__(self, virtapi, read_only=False):
super(LibvirtDriver, self).__init__(virtapi)
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
libvirt_migrate.libvirt = libvirt
self._host = host.Host(self._uri(), read_only,
lifecycle_event_handler=self.emit_event,
conn_event_handler=self._handle_conn_event)
self._initiator = None
self._fc_wwnns = None
self._fc_wwpns = None
self._caps = None
self._supported_perf_events = []
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
host=self._host)
self.vif_driver = libvirt_vif.LibvirtGenericVIFDriver()
self.volume_drivers = driver.driver_dict_from_config(
self._get_volume_drivers(), self)
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
self.valid_cachemodes = ["default",
"none",
"writethrough",
"writeback",
"directsync",
"unsafe",
]
self._conn_supports_start_paused = CONF.libvirt.virt_type in ('kvm',
'qemu')
for mode_str in CONF.libvirt.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in self.valid_cachemodes:
LOG.warning(_LW('Invalid cachemode %(cache_mode)s specified '
'for disk type %(disk_type)s.'),
{'cache_mode': cache_mode, 'disk_type': disk_type})
continue
self.disk_cachemodes[disk_type] = cache_mode
self._volume_api = cinder.API()
self._image_api = image.API()
sysinfo_serial_funcs = {
'none': lambda: None,
'hardware': self._get_host_sysinfo_serial_hardware,
'os': self._get_host_sysinfo_serial_os,
'auto': self._get_host_sysinfo_serial_auto,
}
self._sysinfo_serial_func = sysinfo_serial_funcs.get(
CONF.libvirt.sysinfo_serial)
self.job_tracker = instancejobtracker.InstanceJobTracker()
self._remotefs = remotefs.RemoteFilesystem()
self._live_migration_flags = self._block_migration_flags = 0
self.active_migrations = {}
# Compute reserved hugepages from conf file at the very
# beginning to ensure any syntax error will be reported and
# avoid any re-calculation when computing resources.
self._reserved_hugepages = hardware.numa_get_reserved_huge_pages()
def _get_volume_drivers(self):
return libvirt_volume_drivers
@property
def disk_cachemode(self):
if self._disk_cachemode is None:
# We prefer 'none' for consistent performance, host crash
# safety & migration correctness by avoiding host page cache.
# Some filesystems (eg GlusterFS via FUSE) don't support
# O_DIRECT though. For those we fallback to 'writethrough'
# which gives host crash safety, and is safe for migration
# provided the filesystem is cache coherent (cluster filesystems
# typically are, but things like NFS are not).
self._disk_cachemode = "none"
if not self._supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
def _set_cache_mode(self, conf):
"""Set cache mode on LibvirtConfigGuestDisk object."""
try:
source_type = conf.source_type
driver_cache = conf.driver_cache
except AttributeError:
return
cache_mode = self.disk_cachemodes.get(source_type,
driver_cache)
conf.driver_cache = cache_mode
def _do_quality_warnings(self):
"""Warn about untested driver configurations.
This will log a warning message about untested driver or host arch
configurations to indicate to administrators that the quality is
unknown. Currently, only qemu or kvm on intel 32- or 64-bit systems
is tested upstream.
"""
caps = self._host.get_capabilities()
hostarch = caps.host.cpu.arch
if (CONF.libvirt.virt_type not in ('qemu', 'kvm') or
hostarch not in (arch.I686, arch.X86_64)):
LOG.warning(_LW('The libvirt driver is not tested on '
'%(type)s/%(arch)s by the OpenStack project and '
'thus its quality can not be ensured. For more '
'information, see: http://docs.openstack.org/'
'developer/nova/support-matrix.html'),
{'type': CONF.libvirt.virt_type, 'arch': hostarch})
def _handle_conn_event(self, enabled, reason):
LOG.info(_LI("Connection event '%(enabled)d' reason '%(reason)s'"),
{'enabled': enabled, 'reason': reason})
self._set_host_enabled(enabled, reason)
def _version_to_string(self, version):
return '.'.join([str(x) for x in version])
def init_host(self, host):
self._host.initialize()
self._do_quality_warnings()
self._parse_migration_flags()
self._supported_perf_events = self._get_supported_perf_events()
if (CONF.libvirt.virt_type == 'lxc' and
not (CONF.libvirt.uid_maps and CONF.libvirt.gid_maps)):
LOG.warning(_LW("Running libvirt-lxc without user namespaces is "
"dangerous. Containers spawned by Nova will be run "
"as the host's root user. It is highly suggested "
"that user namespaces be used in a public or "
"multi-tenant environment."))
# Stop libguestfs using KVM unless we're also configured
# to use this. This solves problem where people need to
# stop Nova use of KVM because nested-virt is broken
if CONF.libvirt.virt_type != "kvm":
guestfs.force_tcg()
if not self._host.has_min_version(MIN_LIBVIRT_VERSION):
raise exception.NovaException(
_('Nova requires libvirt version %s or greater.') %
self._version_to_string(MIN_LIBVIRT_VERSION))
if (CONF.libvirt.virt_type in ("qemu", "kvm") and
not self._host.has_min_version(hv_ver=MIN_QEMU_VERSION)):
raise exception.NovaException(
_('Nova requires QEMU version %s or greater.') %
self._version_to_string(MIN_QEMU_VERSION))
if (CONF.libvirt.virt_type == 'parallels' and
not self._host.has_min_version(MIN_LIBVIRT_PARALLELS_VERSION)):
raise exception.NovaException(
_('Running Nova with parallels virt_type requires '
'libvirt version %s') %
self._version_to_string(MIN_LIBVIRT_PARALLELS_VERSION))
# Give the cloud admin a heads up if we are intending to
# change the MIN_LIBVIRT_VERSION in the next release.
if not self._host.has_min_version(NEXT_MIN_LIBVIRT_VERSION):
LOG.warning(_LW('Running Nova with a libvirt version less than '
'%(version)s is deprecated. The required minimum '
'version of libvirt will be raised to %(version)s '
'in the next release.'),
{'version': self._version_to_string(
NEXT_MIN_LIBVIRT_VERSION)})
if (CONF.libvirt.virt_type in ("qemu", "kvm") and
not self._host.has_min_version(hv_ver=NEXT_MIN_QEMU_VERSION)):
LOG.warning(_LW('Running Nova with a QEMU version less than '
'%(version)s is deprecated. The required minimum '
'version of QEMU will be raised to %(version)s '
'in the next release.'),
{'version': self._version_to_string(
NEXT_MIN_QEMU_VERSION)})
kvm_arch = arch.from_host()
if (CONF.libvirt.virt_type in ('kvm', 'qemu') and
kvm_arch in MIN_LIBVIRT_OTHER_ARCH and
not self._host.has_min_version(
MIN_LIBVIRT_OTHER_ARCH.get(kvm_arch),
MIN_QEMU_OTHER_ARCH.get(kvm_arch))):
raise exception.NovaException(
_('Running Nova with qemu/kvm virt_type on %(arch)s '
'requires libvirt version %(libvirt_ver)s and '
'qemu version %(qemu_ver)s, or greater') %
{'arch': kvm_arch,
'libvirt_ver': self._version_to_string(
MIN_LIBVIRT_OTHER_ARCH.get(kvm_arch)),
'qemu_ver': self._version_to_string(
MIN_QEMU_OTHER_ARCH.get(kvm_arch))})
def _prepare_migration_flags(self):
migration_flags = 0
migration_flags |= libvirt.VIR_MIGRATE_LIVE
# Adding p2p flag only if xen is not in use, because xen does not
# support p2p migrations
if CONF.libvirt.virt_type != 'xen':
migration_flags |= libvirt.VIR_MIGRATE_PEER2PEER
# Adding VIR_MIGRATE_UNDEFINE_SOURCE because, without it, migrated
# instance will remain defined on the source host
migration_flags |= libvirt.VIR_MIGRATE_UNDEFINE_SOURCE
live_migration_flags = block_migration_flags = migration_flags
# Adding VIR_MIGRATE_NON_SHARED_INC, otherwise all block-migrations
# will be live-migrations instead
block_migration_flags |= libvirt.VIR_MIGRATE_NON_SHARED_INC
return (live_migration_flags, block_migration_flags)
def _handle_live_migration_tunnelled(self, migration_flags):
if (CONF.libvirt.live_migration_tunnelled is None or
CONF.libvirt.live_migration_tunnelled):
migration_flags |= libvirt.VIR_MIGRATE_TUNNELLED
return migration_flags
def _is_post_copy_available(self):
if self._host.has_min_version(lv_ver=MIN_LIBVIRT_POSTCOPY_VERSION,
hv_ver=MIN_QEMU_POSTCOPY_VERSION):
return True
return False
def _handle_live_migration_post_copy(self, migration_flags):
if CONF.libvirt.live_migration_permit_post_copy:
if self._is_post_copy_available():
migration_flags |= libvirt.VIR_MIGRATE_POSTCOPY
else:
LOG.info(_LI('The live_migration_permit_post_copy is set '
'to True, but it is not supported.'))
return migration_flags
def _handle_live_migration_auto_converge(self, migration_flags):
if self._host.has_min_version(lv_ver=MIN_LIBVIRT_AUTO_CONVERGE_VERSION,
hv_ver=MIN_QEMU_AUTO_CONVERGE):
if (self._is_post_copy_available() and
(migration_flags & libvirt.VIR_MIGRATE_POSTCOPY) != 0):
LOG.info(_LI('The live_migration_permit_post_copy is set to '
'True and post copy live migration is available '
'so auto-converge will not be in use.'))
elif CONF.libvirt.live_migration_permit_auto_converge:
migration_flags |= libvirt.VIR_MIGRATE_AUTO_CONVERGE
elif CONF.libvirt.live_migration_permit_auto_converge:
LOG.info(_LI('The live_migration_permit_auto_converge is set '
'to True, but it is not supported.'))
return migration_flags
def _parse_migration_flags(self):
(live_migration_flags,
block_migration_flags) = self._prepare_migration_flags()
live_migration_flags = self._handle_live_migration_tunnelled(
live_migration_flags)
block_migration_flags = self._handle_live_migration_tunnelled(
block_migration_flags)
live_migration_flags = self._handle_live_migration_post_copy(
live_migration_flags)
block_migration_flags = self._handle_live_migration_post_copy(
block_migration_flags)
live_migration_flags = self._handle_live_migration_auto_converge(
live_migration_flags)
block_migration_flags = self._handle_live_migration_auto_converge(
block_migration_flags)
self._live_migration_flags = live_migration_flags
self._block_migration_flags = block_migration_flags
# TODO(sahid): This method is targeted for removal when the tests
# have been updated to avoid its use
#
# All libvirt API calls on the libvirt.Connect object should be
# encapsulated by methods on the nova.virt.libvirt.host.Host
# object, rather than directly invoking the libvirt APIs. The goal
# is to avoid a direct dependency on the libvirt API from the
# driver.py file.
def _get_connection(self):
return self._host.get_connection()
_conn = property(_get_connection)
@staticmethod
def _uri():
if CONF.libvirt.virt_type == 'uml':
uri = CONF.libvirt.connection_uri or 'uml:///system'
elif CONF.libvirt.virt_type == 'xen':
uri = CONF.libvirt.connection_uri or 'xen:///'
elif CONF.libvirt.virt_type == 'lxc':
uri = CONF.libvirt.connection_uri or 'lxc:///'
elif CONF.libvirt.virt_type == 'parallels':
uri = CONF.libvirt.connection_uri or 'parallels:///system'
else:
uri = CONF.libvirt.connection_uri or 'qemu:///system'
return uri
@staticmethod
def _live_migration_uri(dest):
# Only Xen and QEMU support live migration, see
# https://libvirt.org/migration.html#scenarios for reference
uris = {
'kvm': 'qemu+tcp://%s/system',
'qemu': 'qemu+tcp://%s/system',
'xen': 'xenmigr://%s/system',
}
virt_type = CONF.libvirt.virt_type
uri = CONF.libvirt.live_migration_uri or uris.get(virt_type)
if uri is None:
raise exception.LiveMigrationURINotAvailable(virt_type=virt_type)
return uri % dest
def instance_exists(self, instance):
"""Efficient override of base instance_exists method."""
try:
self._host.get_guest(instance)
return True
except exception.NovaException:
return False
def list_instances(self):
names = []
for guest in self._host.list_guests(only_running=False):
names.append(guest.name)
return names
def list_instance_uuids(self):
uuids = []
for guest in self._host.list_guests(only_running=False):
uuids.append(guest.uuid)
return uuids
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
for vif in network_info:
self.vif_driver.plug(instance, vif)
def _unplug_vifs(self, instance, network_info, ignore_errors):
"""Unplug VIFs from networks."""
for vif in network_info:
try:
self.vif_driver.unplug(instance, vif)
except exception.NovaException:
if not ignore_errors:
raise
def unplug_vifs(self, instance, network_info):
self._unplug_vifs(instance, network_info, False)
def _teardown_container(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
rootfs_dev = instance.system_metadata.get('rootfs_device_name')
LOG.debug('Attempting to teardown container at path %(dir)s with '
'root device: %(rootfs_dev)s',
{'dir': container_dir, 'rootfs_dev': rootfs_dev},
instance=instance)
disk_api.teardown_container(container_dir, rootfs_dev)
def _destroy(self, instance, attempt=1):
try:
guest = self._host.get_guest(instance)
if CONF.serial_console.enabled:
# This method is called for several events: destroy,
# rebuild, hard-reboot, power-off - For all of these
# events we want to release the serial ports acquired
# for the guest before destroying it.
serials = self._get_serial_ports_from_guest(guest)
for hostname, port in serials:
serial_console.release_port(host=hostname, port=port)
except exception.InstanceNotFound:
guest = None
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
old_domid = -1
if guest is not None:
try:
old_domid = guest.id
guest.poweroff()
except libvirt.libvirtError as e:
is_okay = False
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_NO_DOMAIN:
# Domain already gone. This can safely be ignored.
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# If the instance is already shut off, we get this:
# Code=55 Error=Requested operation is not valid:
# domain is not running
state = guest.get_power_state(self._host)
if state == power_state.SHUTDOWN:
is_okay = True
elif errcode == libvirt.VIR_ERR_INTERNAL_ERROR:
errmsg = e.get_error_message()
if (CONF.libvirt.virt_type == 'lxc' and
errmsg == 'internal error: '
'Some processes refused to die'):
# Some processes in the container didn't die
# fast enough for libvirt. The container will
# eventually die. For now, move on and let
# the wait_for_destroy logic take over.
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:
LOG.warning(_LW("Cannot destroy instance, operation time "
"out"),
instance=instance)
reason = _("operation time out")
raise exception.InstancePowerOffFailure(reason=reason)
elif errcode == libvirt.VIR_ERR_SYSTEM_ERROR:
if e.get_int1() == errno.EBUSY:
# NOTE(danpb): When libvirt kills a process it sends it
# SIGTERM first and waits 10 seconds. If it hasn't gone
# it sends SIGKILL and waits another 5 seconds. If it
# still hasn't gone then you get this EBUSY error.
# Usually when a QEMU process fails to go away upon
# SIGKILL it is because it is stuck in an
# uninterruptible kernel sleep waiting on I/O from
# some non-responsive server.
# Given the CPU load of the gate tests though, it is
# conceivable that the 15 second timeout is too short,
# particularly if the VM running tempest has a high
# steal time from the cloud host. ie 15 wallclock
# seconds may have passed, but the VM might have only
# have a few seconds of scheduled run time.
LOG.warning(_LW('Error from libvirt during destroy. '
'Code=%(errcode)s Error=%(e)s; '
'attempt %(attempt)d of 3'),
{'errcode': errcode, 'e': e,
'attempt': attempt},
instance=instance)
with excutils.save_and_reraise_exception() as ctxt:
# Try up to 3 times before giving up.
if attempt < 3:
ctxt.reraise = False
self._destroy(instance, attempt + 1)
return
if not is_okay:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error from libvirt during destroy. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
def _wait_for_destroy(expected_domid):
"""Called at an interval until the VM is gone."""
# NOTE(vish): If the instance disappears during the destroy
# we ignore it so the cleanup can still be
# attempted because we would prefer destroy to
# never fail.
try:
dom_info = self.get_info(instance)
state = dom_info.state
new_domid = dom_info.id
except exception.InstanceNotFound:
LOG.info(_LI("During wait destroy, instance disappeared."),
instance=instance)
raise loopingcall.LoopingCallDone()
if state == power_state.SHUTDOWN:
LOG.info(_LI("Instance destroyed successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
# NOTE(wangpan): If the instance was booted again after destroy,
# this may be an endless loop, so check the id of
# domain here, if it changed and the instance is
# still running, we should destroy it again.
# see https://bugs.launchpad.net/nova/+bug/1111213 for more details
if new_domid != expected_domid:
LOG.info(_LI("Instance may be started again."),
instance=instance)
kwargs['is_running'] = True
raise loopingcall.LoopingCallDone()
kwargs = {'is_running': False}
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy,
old_domid)
timer.start(interval=0.5).wait()
if kwargs['is_running']:
LOG.info(_LI("Going to destroy instance again."),
instance=instance)
self._destroy(instance)
else:
# NOTE(GuanQiang): teardown container to avoid resource leak
if CONF.libvirt.virt_type == 'lxc':
self._teardown_container(instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
self._destroy(instance)
self.cleanup(context, instance, network_info, block_device_info,
destroy_disks, migrate_data)
def _undefine_domain(self, instance):
try:
guest = self._host.get_guest(instance)
try:
guest.delete_configuration()
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
errcode = e.get_error_code()
LOG.error(_LE('Error from libvirt during undefine. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e}, instance=instance)
except exception.InstanceNotFound:
pass
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
if destroy_vifs:
self._unplug_vifs(instance, network_info, True)
retry = True
while retry:
try:
self.unfilter_instance(instance, network_info)
except libvirt.libvirtError as e:
try:
state = self.get_info(instance).state
except exception.InstanceNotFound:
state = power_state.SHUTDOWN
if state != power_state.SHUTDOWN:
LOG.warning(_LW("Instance may be still running, destroy "
"it again."), instance=instance)
self._destroy(instance)
else:
retry = False
errcode = e.get_error_code()
LOG.exception(_LE('Error from libvirt during unfilter. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
reason = "Error unfiltering instance."
raise exception.InstanceTerminationFailure(reason=reason)
except Exception:
retry = False
raise
else:
retry = False
# FIXME(wangpan): if the instance is booted again here, such as the
# the soft reboot operation boot it here, it will
# become "running deleted", should we check and destroy
# it at the end of this method?
# NOTE(vish): we disconnect from volumes regardless
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device']
if disk_dev is not None:
disk_dev = disk_dev.rpartition("/")[2]
if ('data' in connection_info and
'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
# The volume must be detached from the VM before
# disconnecting it from its encryptor. Otherwise, the
# encryptor may report that the volume is still in use.
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
try:
self._disconnect_volume(connection_info, disk_dev)
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if destroy_disks:
# Don't block on Volume errors if we're trying to
# delete the instance as we may be partially created
# or deleted
ctxt.reraise = False
LOG.warning(
_LW("Ignoring Volume Error on vol %(vol_id)s "
"during delete %(exc)s"),
{'vol_id': vol.get('volume_id'), 'exc': exc},
instance=instance)
if destroy_disks:
# NOTE(haomai): destroy volumes if needed
if CONF.libvirt.images_type == 'lvm':
self._cleanup_lvm(instance, block_device_info)
if CONF.libvirt.images_type == 'rbd':
self._cleanup_rbd(instance)
is_shared_block_storage = False
if migrate_data and 'is_shared_block_storage' in migrate_data:
is_shared_block_storage = migrate_data.is_shared_block_storage
if destroy_disks or is_shared_block_storage:
attempts = int(instance.system_metadata.get('clean_attempts',
'0'))
success = self.delete_instance_files(instance)
# NOTE(mriedem): This is used in the _run_pending_deletes periodic
# task in the compute manager. The tight coupling is not great...
instance.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
instance.cleaned = True
instance.save()
self._undefine_domain(instance)
def _detach_encrypted_volumes(self, instance, block_device_info):
"""Detaches encrypted volumes attached to instance."""
disks = jsonutils.loads(self.get_instance_disk_info(instance,
block_device_info))
encrypted_volumes = filter(dmcrypt.is_encrypted,
[disk['path'] for disk in disks])
for path in encrypted_volumes:
dmcrypt.delete_volume(path)
def _get_serial_ports_from_guest(self, guest, mode=None):
"""Returns an iterator over serial port(s) configured on guest.
:param mode: Should be a value in (None, bind, connect)
"""
xml = guest.get_xml_desc()
tree = etree.fromstring(xml)
# The 'serial' device is the base for x86 platforms. Other platforms
# (e.g. kvm on system z = arch.S390X) can only use 'console' devices.
xpath_mode = "[@mode='%s']" % mode if mode else ""
serial_tcp = "./devices/serial[@type='tcp']/source" + xpath_mode
console_tcp = "./devices/console[@type='tcp']/source" + xpath_mode
tcp_devices = tree.findall(serial_tcp)
if len(tcp_devices) == 0:
tcp_devices = tree.findall(console_tcp)
for source in tcp_devices:
yield (source.get("host"), int(source.get("service")))
@staticmethod
def _get_rbd_driver():
return rbd_utils.RBDDriver(
pool=CONF.libvirt.images_rbd_pool,
ceph_conf=CONF.libvirt.images_rbd_ceph_conf,
rbd_user=CONF.libvirt.rbd_user)
def _cleanup_rbd(self, instance):
# NOTE(nic): On revert_resize, the cleanup steps for the root
# volume are handled with an "rbd snap rollback" command,
# and none of this is needed (and is, in fact, harmful) so
# filter out non-ephemerals from the list
if instance.task_state == task_states.RESIZE_REVERTING:
filter_fn = lambda disk: (disk.startswith(instance.uuid) and
disk.endswith('disk.local'))
else:
filter_fn = lambda disk: disk.startswith(instance.uuid)
LibvirtDriver._get_rbd_driver().cleanup_volumes(filter_fn)
def _cleanup_lvm(self, instance, block_device_info):
"""Delete all LVM disks for given instance object."""
if instance.get('ephemeral_key_uuid') is not None:
self._detach_encrypted_volumes(instance, block_device_info)
disks = self._lvm_disks(instance)
if disks:
lvm.remove_volumes(disks)
def _lvm_disks(self, instance):
"""Returns all LVM disks for given instance object."""
if CONF.libvirt.images_volume_group:
vg = os.path.join('/dev', CONF.libvirt.images_volume_group)
if not os.path.exists(vg):
return []
pattern = '%s_' % instance.uuid
def belongs_to_instance(disk):
return disk.startswith(pattern)
def fullpath(name):
return os.path.join(vg, name)
logical_volumes = lvm.list_volumes(vg)
disk_names = filter(belongs_to_instance, logical_volumes)
disks = map(fullpath, disk_names)
return disks
return []
def get_volume_connector(self, instance):
root_helper = utils.get_root_helper()
return connector.get_connector_properties(
root_helper, CONF.my_block_storage_ip,
CONF.libvirt.volume_use_multipath,
enforce_multipath=True,
host=CONF.host)
def _cleanup_resize(self, instance, network_info):
target = libvirt_utils.get_instance_path(instance) + '_resize'
if os.path.exists(target):
# Deletion can fail over NFS, so retry the deletion as required.
# Set maximum attempt as 5, most test can remove the directory
# for the second time.
utils.execute('rm', '-rf', target, delay_on_retry=True,
attempts=5)
root_disk = self.image_backend.image(instance, 'disk')
# TODO(nic): Set ignore_errors=False in a future release.
# It is set to True here to avoid any upgrade issues surrounding
# instances being in pending resize state when the software is updated;
# in that case there will be no snapshot to remove. Once it can be
# reasonably assumed that no such instances exist in the wild
# anymore, it should be set back to False (the default) so it will
# throw errors, like it should.
if root_disk.exists():
root_disk.remove_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME,
ignore_errors=True)
if instance.host != CONF.host:
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
self.unfilter_instance(instance, network_info)
def _get_volume_driver(self, connection_info):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
return self.volume_drivers[driver_type]
def _connect_volume(self, connection_info, disk_info):
vol_driver = self._get_volume_driver(connection_info)
vol_driver.connect_volume(connection_info, disk_info)
def _disconnect_volume(self, connection_info, disk_dev):
vol_driver = self._get_volume_driver(connection_info)
vol_driver.disconnect_volume(connection_info, disk_dev)
def _get_volume_config(self, connection_info, disk_info):
vol_driver = self._get_volume_driver(connection_info)
return vol_driver.get_config(connection_info, disk_info)
def _get_volume_encryptor(self, connection_info, encryption):
encryptor = encryptors.get_volume_encryptor(connection_info,
**encryption)
return encryptor
def _check_discard_for_attach_volume(self, conf, instance):
"""Perform some checks for volumes configured for discard support.
If discard is configured for the volume, and the guest is using a
configuration known to not work, we will log a message explaining
the reason why.
"""
if conf.driver_discard == 'unmap' and conf.target_bus == 'virtio':
LOG.debug('Attempting to attach volume %(id)s with discard '
'support enabled to an instance using an '
'unsupported configuration. target_bus = '
'%(bus)s. Trim commands will not be issued to '
'the storage device.',
{'bus': conf.target_bus,
'id': conf.serial},
instance=instance)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
guest = self._host.get_guest(instance)
disk_dev = mountpoint.rpartition("/")[2]
bdm = {
'device_name': disk_dev,
'disk_bus': disk_bus,
'device_type': device_type}
# Note(cfb): If the volume has a custom block size, check that
# that we are using QEMU/KVM and libvirt >= 0.10.2. The
# presence of a block size is considered mandatory by
# cinder so we fail if we can't honor the request.
data = {}
if ('data' in connection_info):
data = connection_info['data']
if ('logical_block_size' in data or 'physical_block_size' in data):
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Volume sets block size, but the current "
"libvirt hypervisor '%s' does not support custom "
"block size") % CONF.libvirt.virt_type
raise exception.InvalidHypervisorType(msg)
disk_info = blockinfo.get_info_from_bdm(
instance, CONF.libvirt.virt_type, instance.image_meta, bdm)
self._connect_volume(connection_info, disk_info)
conf = self._get_volume_config(connection_info, disk_info)
self._set_cache_mode(conf)
self._check_discard_for_attach_volume(conf, instance)
try:
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
guest.attach_device(conf, persistent=True, live=live)
except Exception as ex:
LOG.exception(_LE('Failed to attach volume at mountpoint: %s'),
mountpoint, instance=instance)
if isinstance(ex, libvirt.libvirtError):
errcode = ex.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
self._disconnect_volume(connection_info, disk_dev)
raise exception.DeviceIsBusy(device=disk_dev)
with excutils.save_and_reraise_exception():
self._disconnect_volume(connection_info, disk_dev)
def _swap_volume(self, guest, disk_path, new_path, resize_to):
"""Swap existing disk with a new block device."""
dev = guest.get_block_device(disk_path)
# Save a copy of the domain's persistent XML file
xml = guest.get_xml_desc(dump_inactive=True, dump_sensitive=True)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
dev.abort_job()
except Exception:
pass
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if guest.has_persistent_configuration():
guest.delete_configuration()
# Start copy with VIR_DOMAIN_REBASE_REUSE_EXT flag to
# allow writing to existing external volume file
dev.rebase(new_path, copy=True, reuse_ext=True)
while dev.wait_for_job():
time.sleep(0.5)
dev.abort_job(pivot=True)
if resize_to:
# NOTE(alex_xu): domain.blockJobAbort isn't sync call. This
# is bug in libvirt. So we need waiting for the pivot is
# finished. libvirt bug #1119173
while dev.wait_for_job(wait_for_job_clean=True):
time.sleep(0.5)
dev.resize(resize_to * units.Gi / units.Ki)
finally:
self._host.write_instance_config(xml)
def swap_volume(self, old_connection_info,
new_connection_info, instance, mountpoint, resize_to):
guest = self._host.get_guest(instance)
disk_dev = mountpoint.rpartition("/")[2]
if not guest.get_disk(disk_dev):
raise exception.DiskNotFound(location=disk_dev)
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(
CONF.libvirt.virt_type, disk_dev),
'type': 'disk',
}
self._connect_volume(new_connection_info, disk_info)
conf = self._get_volume_config(new_connection_info, disk_info)
if not conf.source_path:
self._disconnect_volume(new_connection_info, disk_dev)
raise NotImplementedError(_("Swap only supports host devices"))
# Save updates made in connection_info when connect_volume was called
volume_id = new_connection_info.get('serial')
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
nova_context.get_admin_context(), volume_id, instance.uuid)
driver_bdm = driver_block_device.convert_volume(bdm)
driver_bdm['connection_info'] = new_connection_info
driver_bdm.save()
self._swap_volume(guest, disk_dev, conf.source_path, resize_to)
self._disconnect_volume(old_connection_info, disk_dev)
def _get_existing_domain_xml(self, instance, network_info,
block_device_info=None):
try:
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
except exception.InstanceNotFound:
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
instance.image_meta,
block_device_info)
xml = self._get_guest_xml(nova_context.get_admin_context(),
instance, network_info, disk_info,
instance.image_meta,
block_device_info=block_device_info)
return xml
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
disk_dev = mountpoint.rpartition("/")[2]
try:
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
wait_for_detach = guest.detach_device_with_retry(guest.get_disk,
disk_dev,
persistent=True,
live=live)
if encryption:
# The volume must be detached from the VM before
# disconnecting it from its encryptor. Otherwise, the
# encryptor may report that the volume is still in use.
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
wait_for_detach()
except exception.InstanceNotFound:
# NOTE(zhaoqin): If the instance does not exist, _lookup_by_name()
# will throw InstanceNotFound exception. Need to
# disconnect volume under this circumstance.
LOG.warning(_LW("During detach_volume, instance disappeared."),
instance=instance)
except exception.DeviceNotFound:
raise exception.DiskNotFound(location=disk_dev)
except libvirt.libvirtError as ex:
# NOTE(vish): This is called to cleanup volumes after live
# migration, so we should still disconnect even if
# the instance doesn't exist here anymore.
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
# NOTE(vish):
LOG.warning(_LW("During detach_volume, instance disappeared."),
instance=instance)
else:
raise
self._disconnect_volume(connection_info, disk_dev)
def attach_interface(self, instance, image_meta, vif):
guest = self._host.get_guest(instance)
self.vif_driver.plug(instance, vif)
self.firewall_driver.setup_basic_filtering(instance, [vif])
cfg = self.vif_driver.get_config(instance, vif, image_meta,
instance.flavor,
CONF.libvirt.virt_type,
self._host)
try:
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
guest.attach_device(cfg, persistent=True, live=live)
except libvirt.libvirtError:
LOG.error(_LE('attaching network adapter failed.'),
instance=instance, exc_info=True)
self.vif_driver.unplug(instance, vif)
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
def detach_interface(self, instance, vif):
guest = self._host.get_guest(instance)
cfg = self.vif_driver.get_config(instance, vif,
instance.image_meta,
instance.flavor,
CONF.libvirt.virt_type, self._host)
try:
self.vif_driver.unplug(instance, vif)
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
guest.detach_device(cfg, persistent=True, live=live)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warning(_LW("During detach_interface, "
"instance disappeared."),
instance=instance)
else:
# NOTE(mriedem): When deleting an instance and using Neutron,
# we can be racing against Neutron deleting the port and
# sending the vif-deleted event which then triggers a call to
# detach the interface, so we might have failed because the
# network device no longer exists. Libvirt will fail with
# "operation failed: no matching network device was found"
# which unfortunately does not have a unique error code so we
# need to look up the interface by MAC and if it's not found
# then we can just log it as a warning rather than tracing an
# error.
mac = vif.get('address')
interface = guest.get_interface_by_mac(mac)
if interface:
LOG.error(_LE('detaching network adapter failed.'),
instance=instance, exc_info=True)
raise exception.InterfaceDetachFailed(
instance_uuid=instance.uuid)
# The interface is gone so just log it as a warning.
LOG.warning(_LW('Detaching interface %(mac)s failed because '
'the device is no longer found on the guest.'),
{'mac': mac}, instance=instance)
def _create_snapshot_metadata(self, image_meta, instance,
img_fmt, snp_name):
metadata = {'is_public': False,
'status': 'active',
'name': snp_name,
'properties': {
'kernel_id': instance.kernel_id,
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance.project_id,
'ramdisk_id': instance.ramdisk_id,
}
}
if instance.os_type:
metadata['properties']['os_type'] = instance.os_type
# NOTE(vish): glance forces ami disk format to be ami
if image_meta.disk_format == 'ami':
metadata['disk_format'] = 'ami'
else:
metadata['disk_format'] = img_fmt
if image_meta.obj_attr_is_set("container_format"):
metadata['container_format'] = image_meta.container_format
else:
metadata['container_format'] = "bare"
return metadata
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+
"""
try:
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove virt_dom at the end.
virt_dom = guest._domain
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
snapshot = self._image_api.get(context, image_id)
# source_format is an on-disk format
# source_type is a backend type
disk_path, source_format = libvirt_utils.find_disk(virt_dom)
source_type = libvirt_utils.get_disk_type_from_path(disk_path)
# We won't have source_type for raw or qcow2 disks, because we can't
# determine that from the path. We should have it from the libvirt
# xml, though.
if source_type is None:
source_type = source_format
# For lxc instances we won't have it either from libvirt xml
# (because we just gave libvirt the mounted filesystem), or the path,
# so source_type is still going to be None. In this case,
# snapshot_backend is going to default to CONF.libvirt.images_type
# below, which is still safe.
image_format = CONF.libvirt.snapshot_image_format or source_type
# NOTE(bfilippov): save lvm and rbd as raw
if image_format == 'lvm' or image_format == 'rbd':
image_format = 'raw'
metadata = self._create_snapshot_metadata(instance.image_meta,
instance,
image_format,
snapshot['name'])
snapshot_name = uuid.uuid4().hex
state = guest.get_power_state(self._host)
# NOTE(dgenin): Instances with LVM encrypted ephemeral storage require
# cold snapshots. Currently, checking for encryption is
# redundant because LVM supports only cold snapshots.
# It is necessary in case this situation changes in the
# future.
if (self._host.has_min_version(hv_type=host.HV_DRIVER_QEMU)
and source_type not in ('lvm')
and not CONF.ephemeral_storage_encryption.enabled
and not CONF.workarounds.disable_libvirt_livesnapshot):
live_snapshot = True
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended. This operation also
# confirms the running instance, as opposed to the system as a
# whole, has a new enough version of the hypervisor (bug 1193146).
try:
guest.get_block_device(disk_path).abort_job()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
live_snapshot = False
else:
pass
else:
live_snapshot = False
# NOTE(rmk): We cannot perform live snapshots when a managedSave
# file is present, so we will use the cold/legacy method
# for instances which are shutdown.
if state == power_state.SHUTDOWN:
live_snapshot = False
self._prepare_domain_for_snapshot(context, live_snapshot, state,
instance)
snapshot_backend = self.image_backend.snapshot(instance,
disk_path,
image_type=source_type)
if live_snapshot:
LOG.info(_LI("Beginning live snapshot process"),
instance=instance)
else:
LOG.info(_LI("Beginning cold snapshot process"),
instance=instance)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
try:
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
metadata['location'] = snapshot_backend.direct_snapshot(
context, snapshot_name, image_format, image_id,
instance.image_ref)
self._snapshot_domain(context, live_snapshot, virt_dom, state,
instance)
self._image_api.update(context, image_id, metadata,
purge_props=False)
except (NotImplementedError, exception.ImageUnacceptable,
exception.Forbidden) as e:
if type(e) != NotImplementedError:
LOG.warning(_LW('Performing standard snapshot because direct '
'snapshot failed: %(error)s'), {'error': e})
failed_snap = metadata.pop('location', None)
if failed_snap:
failed_snap = {'url': str(failed_snap)}
snapshot_backend.cleanup_direct_snapshot(failed_snap,
also_destroy_volume=True,
ignore_errors=True)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD,
expected_state=task_states.IMAGE_UPLOADING)
# TODO(nic): possibly abstract this out to the snapshot_backend
if source_type == 'rbd' and live_snapshot:
# Standard snapshot uses qemu-img convert from RBD which is
# not safe to run with live_snapshot.
live_snapshot = False
# Suspend the guest, so this is no longer a live snapshot
self._prepare_domain_for_snapshot(context, live_snapshot,
state, instance)
snapshot_directory = CONF.libvirt.snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
out_path = os.path.join(tmpdir, snapshot_name)
if live_snapshot:
# NOTE(xqueralt): libvirt needs o+x in the tempdir
os.chmod(tmpdir, 0o701)
self._live_snapshot(context, instance, guest,
disk_path, out_path, source_format,
image_format, instance.image_meta)
else:
snapshot_backend.snapshot_extract(out_path,
image_format)
finally:
self._snapshot_domain(context, live_snapshot, virt_dom,
state, instance)
LOG.info(_LI("Snapshot extracted, beginning image upload"),
instance=instance)
# Upload that image to the image service
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
with libvirt_utils.file_open(out_path) as image_file:
self._image_api.update(context,
image_id,
metadata,
image_file)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to snapshot image"))
failed_snap = metadata.pop('location', None)
if failed_snap:
failed_snap = {'url': str(failed_snap)}
snapshot_backend.cleanup_direct_snapshot(
failed_snap, also_destroy_volume=True,
ignore_errors=True)
LOG.info(_LI("Snapshot image upload complete"), instance=instance)
def _prepare_domain_for_snapshot(self, context, live_snapshot, state,
instance):
# NOTE(dkang): managedSave does not work for LXC
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING or state == power_state.PAUSED:
self.suspend(context, instance)
def _snapshot_domain(self, context, live_snapshot, virt_dom, state,
instance):
guest = None
# NOTE(dkang): because previous managedSave is not called
# for LXC, _create_domain must not be called.
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING:
guest = self._create_domain(domain=virt_dom)
elif state == power_state.PAUSED:
guest = self._create_domain(domain=virt_dom, pause=True)
if guest is not None:
self._attach_pci_devices(
guest, pci_manager.get_instance_pci_devs(instance))
self._attach_sriov_ports(context, instance, guest)
def _can_set_admin_password(self, image_meta):
if (CONF.libvirt.virt_type not in ('kvm', 'qemu') or
not self._host.has_min_version(MIN_LIBVIRT_SET_ADMIN_PASSWD)):
raise exception.SetAdminPasswdNotSupported()
hw_qga = image_meta.properties.get('hw_qemu_guest_agent', '')
if not strutils.bool_from_string(hw_qga):
raise exception.QemuGuestAgentNotEnabled()
def set_admin_password(self, instance, new_pass):
self._can_set_admin_password(instance.image_meta)
guest = self._host.get_guest(instance)
user = instance.image_meta.properties.get("os_admin_user")
if not user:
if instance.os_type == "windows":
user = "Administrator"
else:
user = "root"
try:
guest.set_user_password(user, new_pass)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
msg = (_('Error from libvirt while set password for username '
'"%(user)s": [Error Code %(error_code)s] %(ex)s')
% {'user': user, 'error_code': error_code, 'ex': ex})
raise exception.NovaException(msg)
def _can_quiesce(self, instance, image_meta):
if (CONF.libvirt.virt_type not in ('kvm', 'qemu') or
not self._host.has_min_version(MIN_LIBVIRT_FSFREEZE_VERSION)):
raise exception.InstanceQuiesceNotSupported(
instance_id=instance.uuid)
if not image_meta.properties.get('hw_qemu_guest_agent', False):
raise exception.QemuGuestAgentNotEnabled()
def _set_quiesced(self, context, instance, image_meta, quiesced):
self._can_quiesce(instance, image_meta)
try:
guest = self._host.get_guest(instance)
if quiesced:
guest.freeze_filesystems()
else:
guest.thaw_filesystems()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
msg = (_('Error from libvirt while quiescing %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s')
% {'instance_name': instance.name,
'error_code': error_code, 'ex': ex})
raise exception.NovaException(msg)
def quiesce(self, context, instance, image_meta):
"""Freeze the guest filesystems to prepare for snapshot.
The qemu-guest-agent must be setup to execute fsfreeze.
"""
self._set_quiesced(context, instance, image_meta, True)
def unquiesce(self, context, instance, image_meta):
"""Thaw the guest filesystems after snapshot."""
self._set_quiesced(context, instance, image_meta, False)
def _live_snapshot(self, context, instance, guest, disk_path, out_path,
source_format, image_format, image_meta):
"""Snapshot an instance without downtime."""
dev = guest.get_block_device(disk_path)
# Save a copy of the domain's persistent XML file
xml = guest.get_xml_desc(dump_inactive=True, dump_sensitive=True)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
dev.abort_job()
except Exception:
pass
# NOTE (rmk): We are using shallow rebases as a workaround to a bug
# in QEMU 1.3. In order to do this, we need to create
# a destination image with the original backing file
# and matching size of the instance root disk.
src_disk_size = libvirt_utils.get_disk_size(disk_path,
format=source_format)
src_back_path = libvirt_utils.get_disk_backing_file(disk_path,
format=source_format,
basename=False)
disk_delta = out_path + '.delta'
libvirt_utils.create_cow_image(src_back_path, disk_delta,
src_disk_size)
quiesced = False
try:
self._set_quiesced(context, instance, image_meta, True)
quiesced = True
except exception.NovaException as err:
if image_meta.properties.get('os_require_quiesce', False):
raise
LOG.info(_LI('Skipping quiescing instance: %(reason)s.'),
{'reason': err}, instance=instance)
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if guest.has_persistent_configuration():
guest.delete_configuration()
# NOTE (rmk): Establish a temporary mirror of our root disk and
# issue an abort once we have a complete copy.
dev.rebase(disk_delta, copy=True, reuse_ext=True, shallow=True)
while dev.wait_for_job():
time.sleep(0.5)
dev.abort_job()
libvirt_utils.chown(disk_delta, os.getuid())
finally:
self._host.write_instance_config(xml)
if quiesced:
self._set_quiesced(context, instance, image_meta, False)
# Convert the delta (CoW) image with a backing file to a flat
# image with no backing file.
libvirt_utils.extract_snapshot(disk_delta, 'qcow2',
out_path, image_format)
def cdrom_list(self, context, instance):
cdroms = []
guest = self._host.get_guest(instance)
domain = guest._domain
xml = domain.XMLDesc(0)
xml_doc = etree.fromstring(xml)
disks = xml_doc.findall('devices/disk')
for disk in disks:
if disk.get('device') == 'cdrom':
source = disk.find('source')
target = disk.find('target')
if target is not None:
cdrom ={}
device = target.get('dev')
cdrom['device_name'] = device
if source is not None:
disk_path = source.get('file')
image_id = os.path.basename(disk_path)
else:
image_id = ''
cdrom['image_id'] = image_id
cdroms.append(cdrom)
return cdroms
def attach_cdrom(self, context, instance, device, image_id):
cdrom_device={}
guest = self._host.get_guest(instance)
domain = guest._domain
cdrom_config = self._get_guest_cdrom_config(context, instance, image_id, device)
is_updated = False
is_active = domain.isActive()
if is_active == 1:
domain.updateDeviceFlags(cdrom_config.to_xml(),
libvirt.VIR_DOMAIN_AFFECT_LIVE | libvirt.VIR_DOMAIN_AFFECT_CONFIG)
is_updated = True
elif is_active == 0:
domain.updateDeviceFlags(cdrom_config.to_xml(), libvirt.VIR_DOMAIN_AFFECT_CONFIG)
is_updated = True
if is_updated:
# Sync the xml between default libvirt xml path and nova instance xml path
instance_dir = libvirt_utils.get_instance_path(instance)
xml_path = os.path.join(instance_dir, 'libvirt.xml')
xml = domain.XMLDesc(0)
libvirt_utils.write_to_file(xml_path, xml)
cdrom_device['device_name'] = device
cdrom_device['image_id'] = image_id
return cdrom_device
def has_cdrom(self,instance, disk_info):
#LOG.info("disk_info:%s",disk_info)
disk_mapping = disk_info['mapping']
cdxml = None
inst_type = objects.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
if 'disk' in disk_mapping:
disk = disk_mapping['disk']
if disk['type'] == 'cdrom':
image = self.image_backend.image(instance,
'disk',
None)
cdxml = image.libvirt_info(disk['bus'],
disk['dev'],
disk['type'],
self.disk_cachemode,
inst_type['extra_specs'],
self._get_hypervisor_version())
if 'disk.local' in disk_mapping:
disklocal = disk_mapping['disk.local']
if disklocal['type'] == 'cdrom':
image = self.image_backend.image(instance,
'disk.local',
None)
cdxml = image.libvirt_info(disklocal['bus'],
disklocal['dev'],
disklocal['type'],
self.disk_cachemode,
inst_type['extra_specs'],
self._get_hypervisor_version())
return cdxml
def _get_guest_cdrom_config(self, context, instance, image_id,
device, enable_cache_image=True):
cdrom_config = vconfig.LibvirtConfigGuestDisk()
cdrom_config.source_type = 'file'
cdrom_config.source_device = 'cdrom'
cdrom_config.target_bus = 'ide'
if device:
cdrom_config.target_dev = device
cdrom_config.readonly = True
cdrom_config.driver_name = 'qemu'
cdrom_config.driver_format = 'raw'
if image_id != '0':
# image_info = {}
# image_info['image_id'] = image_id
fake_image_id = imagecache.get_cache_fname(image_id)
if enable_cache_image:
imagecache.cache_image(libvirt_utils.fetch_image,
fake_image_id,
context=context,
image_id=image_id)
#user_id=instance.user_id,
#project_id=instance.project_id)
base_url = self.image_cache_manager._get_base()
image_url = os.path.join(base_url, fake_image_id)
else:
image_url = ''
cdrom_config.source_path = image_url
return cdrom_config
def dev_snapshot_create(self, context, instance, name):
guest = self._host.get_guest(instance)
domain = guest._domain
xml = domain.XMLDesc(0)
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
disks_to_snap = []
network_disks_to_snap = []
disks_to_skip = []
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None):
continue
disk_info = {
'dev': guest_disk.target_dev,
'serial': guest_disk.serial,
'current_file': guest_disk.source_path,
'source_protocol': guest_disk.source_protocol,
'source_name': guest_disk.source_name,
'source_hosts': guest_disk.source_hosts,
'source_ports': guest_disk.source_ports
}
if guest_disk.target_dev == 'vda':
xwl_target_dev='vda'
disks_to_snap.append(guest_disk.source_path)
if guest_disk.target_dev == 'hda':
xwl_target_dev='hda'
disks_to_snap.append(guest_disk.source_path)
if not disks_to_snap :
msg = _('Found no disk to snapshot.')
raise exception.NovaException(msg)
snapshot = vconfig.LibvirtConfigGuestSnapshot()
if name:
uname = repr(name)
snapshot_name =unicode(uname, 'unicode-escape')
else:
snapshot_name = None
if xwl_target_dev == 'hda':
for current_name in disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
if snapshot_name:
snapshot.name = snapshot_name
snap_disk.name = 'hda'
snap_disk.snapshot = 'internal'
snap_disk.driver_name = 'qcow2'
snapshot.add_disk(snap_disk)
for dev in disks_to_skip:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = dev
snap_disk.snapshot = 'no'
snapshot.add_disk(snap_disk)
else:
for current_name in disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
if snapshot_name:
snapshot.name = snapshot_name
snap_disk.name = 'vda'
snap_disk.snapshot = 'internal'
snap_disk.driver_name = 'qcow2'
snapshot.add_disk(snap_disk)
for dev in disks_to_skip:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = dev
snap_disk.snapshot = 'no'
snapshot.add_disk(snap_disk)
snapshot_xml = snapshot.to_xml()
snap_flags = 0
#QUIESCE = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE
try:
guest._domain.snapshotCreateXML(snapshot_xml, snap_flags)
return
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create quiesced dev_snapshot, '
'attempting again with quiescing disabled.'))
try:
guest._domain.snapshotCreateXML(snapshot_xml, snap_flags )
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create dev_snapshot, '
'failing dev_snapshot operation.'))
raise
def dev_snapshot_list(self, context, instance):
snaps = []
try:
guest = self._host.get_guest(instance)
snapshotlist=guest._domain.listAllSnapshots(0)
except exception.InstanceNotFound:
return snaps
for snapshot in snapshotlist:
Desc = snapshot.getName()
try:
Desctime = time.strftime("%y-%m-%d %H:%M:%S", time.localtime(string.atof(Desc)))
name = {}
name['dev_snapshot_name'] = Desctime
except:
name = {}
Desctime = Desc[2:-1]
name['dev_snapshot_name'] = Desctime
snaps.append(name)
return snaps
def dev_snapshot_delete(self, context, instance, name):
try:
guest = self._host.get_guest(instance)
timeName = time.mktime(time.strptime(name, "%y-%m-%d %H:%M:%S"))
tem='%.0f' % timeName
snapshot = guest._domain.snapshotLookupByName(tem,0)
snapshot.delete(0)
except:
stringName = repr(name)
unicodeName = unicode(stringName,'unicode-escape')
tem =unicodeName.encode('utf8')
snapshot = guest._domain.snapshotLookupByName(tem,0)
snapshot.delete(0)
def dev_snapshot_revert(self, context, instance, name):
try:
guest = self._host.get_guest(instance)
timeName = time.mktime(time.strptime(name, "%y-%m-%d %H:%M:%S"))
tem='%.0f' % timeName
snapshot = guest._domain.snapshotLookupByName(tem,0)
guest._domain.revertToSnapshot(snapshot,0)
except:
stringName = repr(name)
unicodeName = unicode(stringName,'unicode-escape')
tem =unicodeName.encode('utf8')
snapshot = guest._domain.snapshotLookupByName(tem,0)
guest._domain.revertToSnapshot(snapshot,0)
def _volume_snapshot_update_status(self, context, snapshot_id, status):
"""Send a snapshot status update to Cinder.
This method captures and logs exceptions that occur
since callers cannot do anything useful with these exceptions.
Operations on the Cinder side waiting for this will time out if
a failure occurs sending the update.
:param context: security context
:param snapshot_id: id of snapshot being updated
:param status: new status value
"""
try:
self._volume_api.update_snapshot_status(context,
snapshot_id,
status)
except Exception:
LOG.exception(_LE('Failed to send updated snapshot status '
'to volume service.'))
def _volume_snapshot_create(self, context, instance, guest,
volume_id, new_file):
"""Perform volume snapshot.
:param guest: VM that volume is attached to
:param volume_id: volume UUID to snapshot
:param new_file: relative path to new qcow2 file present on share
"""
xml = guest.get_xml_desc()
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
disks_to_snap = [] # to be snapshotted by libvirt
network_disks_to_snap = [] # network disks (netfs, gluster, etc.)
disks_to_skip = [] # local disks not snapshotted
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None):
continue
if (guest_disk.serial is None or guest_disk.serial != volume_id):
disks_to_skip.append(guest_disk.target_dev)
continue
# disk is a Cinder volume with the correct volume_id
disk_info = {
'dev': guest_disk.target_dev,
'serial': guest_disk.serial,
'current_file': guest_disk.source_path,
'source_protocol': guest_disk.source_protocol,
'source_name': guest_disk.source_name,
'source_hosts': guest_disk.source_hosts,
'source_ports': guest_disk.source_ports
}
# Determine path for new_file based on current path
if disk_info['current_file'] is not None:
current_file = disk_info['current_file']
new_file_path = os.path.join(os.path.dirname(current_file),
new_file)
disks_to_snap.append((current_file, new_file_path))
elif disk_info['source_protocol'] in ('gluster', 'netfs'):
network_disks_to_snap.append((disk_info, new_file))
if not disks_to_snap and not network_disks_to_snap:
msg = _('Found no disk to snapshot.')
raise exception.NovaException(msg)
snapshot = vconfig.LibvirtConfigGuestSnapshot()
for current_name, new_filename in disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = current_name
snap_disk.source_path = new_filename
snap_disk.source_type = 'file'
snap_disk.snapshot = 'external'
snap_disk.driver_name = 'qcow2'
snapshot.add_disk(snap_disk)
for disk_info, new_filename in network_disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = disk_info['dev']
snap_disk.source_type = 'network'
snap_disk.source_protocol = disk_info['source_protocol']
snap_disk.snapshot = 'external'
snap_disk.source_path = new_filename
old_dir = disk_info['source_name'].split('/')[0]
snap_disk.source_name = '%s/%s' % (old_dir, new_filename)
snap_disk.source_hosts = disk_info['source_hosts']
snap_disk.source_ports = disk_info['source_ports']
snapshot.add_disk(snap_disk)
for dev in disks_to_skip:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = dev
snap_disk.snapshot = 'no'
snapshot.add_disk(snap_disk)
snapshot_xml = snapshot.to_xml()
LOG.debug("snap xml: %s", snapshot_xml, instance=instance)
try:
guest.snapshot(snapshot, no_metadata=True, disk_only=True,
reuse_ext=True, quiesce=True)
return
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create quiesced VM snapshot, '
'attempting again with quiescing disabled.'),
instance=instance)
try:
guest.snapshot(snapshot, no_metadata=True, disk_only=True,
reuse_ext=True, quiesce=False)
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create VM snapshot, '
'failing volume_snapshot operation.'),
instance=instance)
raise
def _volume_refresh_connection_info(self, context, instance, volume_id):
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, volume_id, instance.uuid)
driver_bdm = driver_block_device.convert_volume(bdm)
if driver_bdm:
driver_bdm.refresh_connection_info(context, instance,
self._volume_api, self)
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
"""Create snapshots of a Cinder volume via libvirt.
:param instance: VM instance object reference
:param volume_id: id of volume being snapshotted
:param create_info: dict of information used to create snapshots
- snapshot_id : ID of snapshot
- type : qcow2 / <other>
- new_file : qcow2 file created by Cinder which
becomes the VM's active image after
the snapshot is complete
"""
LOG.debug("volume_snapshot_create: create_info: %(c_info)s",
{'c_info': create_info}, instance=instance)
try:
guest = self._host.get_guest(instance)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
if create_info['type'] != 'qcow2':
raise exception.NovaException(_('Unknown type: %s') %
create_info['type'])
snapshot_id = create_info.get('snapshot_id', None)
if snapshot_id is None:
raise exception.NovaException(_('snapshot_id required '
'in create_info'))
try:
self._volume_snapshot_create(context, instance, guest,
volume_id, create_info['new_file'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error occurred during '
'volume_snapshot_create, '
'sending error status to Cinder.'),
instance=instance)
self._volume_snapshot_update_status(
context, snapshot_id, 'error')
self._volume_snapshot_update_status(
context, snapshot_id, 'creating')
def _wait_for_snapshot():
snapshot = self._volume_api.get_snapshot(context, snapshot_id)
if snapshot.get('status') != 'creating':
self._volume_refresh_connection_info(context, instance,
volume_id)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_snapshot)
timer.start(interval=0.5).wait()
@staticmethod
def _rebase_with_qemu_img(guest, device, active_disk_object,
rebase_base):
"""Rebase a device tied to a guest using qemu-img.
:param guest:the Guest which owns the device being rebased
:type guest: nova.virt.libvirt.guest.Guest
:param device: the guest block device to rebase
:type device: nova.virt.libvirt.guest.BlockDevice
:param active_disk_object: the guest block device to rebase
:type active_disk_object: nova.virt.libvirt.config.\
LibvirtConfigGuestDisk
:param rebase_base: the new parent in the backing chain
:type rebase_base: None or string
"""
# It's unsure how well qemu-img handles network disks for
# every protocol. So let's be safe.
active_protocol = active_disk_object.source_protocol
if active_protocol is not None:
msg = _("Something went wrong when deleting a volume snapshot: "
"rebasing a %(protocol)s network disk using qemu-img "
"has not been fully tested") % {'protocol':
active_protocol}
LOG.error(msg)
raise exception.NovaException(msg)
if rebase_base is None:
# If backing_file is specified as "" (the empty string), then
# the image is rebased onto no backing file (i.e. it will exist
# independently of any backing file).
backing_file = ""
qemu_img_extra_arg = []
else:
# If the rebased image is going to have a backing file then
# explicitly set the backing file format to avoid any security
# concerns related to file format auto detection.
backing_file = rebase_base
b_file_fmt = images.qemu_img_info(backing_file).file_format
qemu_img_extra_arg = ['-F', b_file_fmt]
qemu_img_extra_arg.append(active_disk_object.source_path)
utils.execute("qemu-img", "rebase", "-b", backing_file,
*qemu_img_extra_arg)
def _volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info=None):
"""Note:
if file being merged into == active image:
do a blockRebase (pull) operation
else:
do a blockCommit operation
Files must be adjacent in snap chain.
:param instance: instance object reference
:param volume_id: volume UUID
:param snapshot_id: snapshot UUID (unused currently)
:param delete_info: {
'type': 'qcow2',
'file_to_merge': 'a.img',
'merge_target_file': 'b.img' or None (if merging file_to_merge into
active image)
}
"""
LOG.debug('volume_snapshot_delete: delete_info: %s', delete_info,
instance=instance)
if delete_info['type'] != 'qcow2':
msg = _('Unknown delete_info type %s') % delete_info['type']
raise exception.NovaException(msg)
try:
guest = self._host.get_guest(instance)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
# Find dev name
my_dev = None
active_disk = None
xml = guest.get_xml_desc()
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
active_disk_object = None
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None or guest_disk.serial is None):
continue
if guest_disk.serial == volume_id:
my_dev = guest_disk.target_dev
active_disk = guest_disk.source_path
active_protocol = guest_disk.source_protocol
active_disk_object = guest_disk
break
if my_dev is None or (active_disk is None and active_protocol is None):
msg = _('Disk with id: %s '
'not found attached to instance.') % volume_id
LOG.debug('Domain XML: %s', xml, instance=instance)
raise exception.NovaException(msg)
LOG.debug("found device at %s", my_dev, instance=instance)
def _get_snap_dev(filename, backing_store):
if filename is None:
msg = _('filename cannot be None')
raise exception.NovaException(msg)
# libgfapi delete
LOG.debug("XML: %s", xml)
LOG.debug("active disk object: %s", active_disk_object)
# determine reference within backing store for desired image
filename_to_merge = filename
matched_name = None
b = backing_store
index = None
current_filename = active_disk_object.source_name.split('/')[1]
if current_filename == filename_to_merge:
return my_dev + '[0]'
while b is not None:
source_filename = b.source_name.split('/')[1]
if source_filename == filename_to_merge:
LOG.debug('found match: %s', b.source_name)
matched_name = b.source_name
index = b.index
break
b = b.backing_store
if matched_name is None:
msg = _('no match found for %s') % (filename_to_merge)
raise exception.NovaException(msg)
LOG.debug('index of match (%s) is %s', b.source_name, index)
my_snap_dev = '%s[%s]' % (my_dev, index)
return my_snap_dev
if delete_info['merge_target_file'] is None:
# pull via blockRebase()
# Merge the most recent snapshot into the active image
rebase_disk = my_dev
rebase_base = delete_info['file_to_merge'] # often None
if (active_protocol is not None) and (rebase_base is not None):
rebase_base = _get_snap_dev(rebase_base,
active_disk_object.backing_store)
# NOTE(deepakcs): libvirt added support for _RELATIVE in v1.2.7,
# and when available this flag _must_ be used to ensure backing
# paths are maintained relative by qemu.
#
# If _RELATIVE flag not found, continue with old behaviour
# (relative backing path seems to work for this case)
try:
libvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE
relative = rebase_base is not None
except AttributeError:
LOG.warning(_LW(
"Relative blockrebase support was not detected. "
"Continuing with old behaviour."))
relative = False
LOG.debug(
'disk: %(disk)s, base: %(base)s, '
'bw: %(bw)s, relative: %(relative)s',
{'disk': rebase_disk,
'base': rebase_base,
'bw': libvirt_guest.BlockDevice.REBASE_DEFAULT_BANDWIDTH,
'relative': str(relative)}, instance=instance)
dev = guest.get_block_device(rebase_disk)
if guest.is_active():
result = dev.rebase(rebase_base, relative=relative)
if result == 0:
LOG.debug('blockRebase started successfully',
instance=instance)
while dev.wait_for_job(abort_on_error=True):
LOG.debug('waiting for blockRebase job completion',
instance=instance)
time.sleep(0.5)
# If the guest is not running libvirt won't do a blockRebase.
# In that case, let's ask qemu-img to rebase the disk.
else:
LOG.debug('Guest is not running so doing a block rebase '
'using "qemu-img rebase"', instance=instance)
self._rebase_with_qemu_img(guest, dev, active_disk_object,
rebase_base)
else:
# commit with blockCommit()
my_snap_base = None
my_snap_top = None
commit_disk = my_dev
# NOTE(deepakcs): libvirt added support for _RELATIVE in v1.2.7,
# and when available this flag _must_ be used to ensure backing
# paths are maintained relative by qemu.
#
# If _RELATIVE flag not found, raise exception as relative backing
# path may not be maintained and Cinder flow is broken if allowed
# to continue.
try:
libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
except AttributeError:
ver = '.'.join(
[str(x) for x in
MIN_LIBVIRT_BLOCKJOB_RELATIVE_VERSION])
msg = _("Relative blockcommit support was not detected. "
"Libvirt '%s' or later is required for online "
"deletion of file/network storage-backed volume "
"snapshots.") % ver
raise exception.Invalid(msg)
if active_protocol is not None:
my_snap_base = _get_snap_dev(delete_info['merge_target_file'],
active_disk_object.backing_store)
my_snap_top = _get_snap_dev(delete_info['file_to_merge'],
active_disk_object.backing_store)
commit_base = my_snap_base or delete_info['merge_target_file']
commit_top = my_snap_top or delete_info['file_to_merge']
LOG.debug('will call blockCommit with commit_disk=%(commit_disk)s '
'commit_base=%(commit_base)s '
'commit_top=%(commit_top)s ',
{'commit_disk': commit_disk,
'commit_base': commit_base,
'commit_top': commit_top}, instance=instance)
dev = guest.get_block_device(commit_disk)
result = dev.commit(commit_base, commit_top, relative=True)
if result == 0:
LOG.debug('blockCommit started successfully',
instance=instance)
while dev.wait_for_job(abort_on_error=True):
LOG.debug('waiting for blockCommit job completion',
instance=instance)
time.sleep(0.5)
def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id,
delete_info):
try:
self._volume_snapshot_delete(context, instance, volume_id,
snapshot_id, delete_info=delete_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error occurred during '
'volume_snapshot_delete, '
'sending error status to Cinder.'),
instance=instance)
self._volume_snapshot_update_status(
context, snapshot_id, 'error_deleting')
self._volume_snapshot_update_status(context, snapshot_id, 'deleting')
self._volume_refresh_connection_info(context, instance, volume_id)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot a virtual machine, given an instance reference."""
if reboot_type == 'SOFT':
# NOTE(vish): This will attempt to do a graceful shutdown/restart.
try:
soft_reboot_success = self._soft_reboot(instance)
except libvirt.libvirtError as e:
LOG.debug("Instance soft reboot failed: %s", e,
instance=instance)
soft_reboot_success = False
if soft_reboot_success:
LOG.info(_LI("Instance soft rebooted successfully."),
instance=instance)
return
else:
LOG.warning(_LW("Failed to soft reboot instance. "
"Trying hard reboot."),
instance=instance)
return self._hard_reboot(context, instance, network_info,
block_device_info)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
We use shutdown and create here so we can return if the guest
responded and actually rebooted. Note that this method only
succeeds if the guest responds to acpi. Therefore we return
success or failure so we can fall back to a hard reboot if
necessary.
:returns: True if the reboot succeeded
"""
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
old_domid = guest.id
# NOTE(vish): This check allows us to reboot an instance that
# is already shutdown.
if state == power_state.RUNNING:
guest.shutdown()
# NOTE(vish): This actually could take slightly longer than the
# FLAG defines depending on how long the get_info
# call takes to return.
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance, 'all'))
for x in range(CONF.libvirt.wait_soft_reboot_seconds):
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
new_domid = guest.id
# NOTE(ivoks): By checking domain IDs, we make sure we are
# not recreating domain that's already running.
if old_domid != new_domid:
if state in [power_state.SHUTDOWN,
power_state.CRASHED]:
LOG.info(_LI("Instance shutdown successfully."),
instance=instance)
self._create_domain(domain=guest._domain)
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running, instance)
timer.start(interval=0.5).wait()
return True
else:
LOG.info(_LI("Instance may have been rebooted during soft "
"reboot, so return now."), instance=instance)
return True
greenthread.sleep(1)
return False
def _hard_reboot(self, context, instance, network_info,
block_device_info=None):
"""Reboot a virtual machine, given an instance reference.
Performs a Libvirt reset (if supported) on the domain.
If Libvirt reset is unavailable this method actually destroys and
re-creates the domain to ensure the reboot happens, as the guest
OS cannot ignore this action.
"""
self._destroy(instance)
# Domain XML will be redefined so we can safely undefine it
# from libvirt. This ensure that such process as create serial
# console for guest will run smoothly.
self._undefine_domain(instance)
# Convert the system metadata to image metadata
instance_dir = libvirt_utils.get_instance_path(instance)
fileutils.ensure_tree(instance_dir)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
instance.image_meta,
block_device_info)
# NOTE(vish): This could generate the wrong device_format if we are
# using the raw backend and the images don't exist yet.
# The create_images_and_backing below doesn't properly
# regenerate raw backend images, however, so when it
# does we need to (re)generate the xml after the images
# are in place.
xml = self._get_guest_xml(context, instance, network_info, disk_info,
instance.image_meta,
block_device_info=block_device_info,
write_to_disk=True)
if context.auth_token is not None:
# NOTE (rmk): Re-populate any missing backing files.
backing_disk_info = self._get_instance_disk_info(instance.name,
xml,
block_device_info)
self._create_images_and_backing(context, instance, instance_dir,
backing_disk_info)
# Initialize all the necessary networking, block devices and
# start the instance.
self._create_domain_and_network(context, xml, instance, network_info,
disk_info,
block_device_info=block_device_info,
reboot=True,
vifs_already_plugged=True)
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance, 'all'))
def _wait_for_reboot():
"""Called at an interval until the VM is running again."""
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info(_LI("Instance rebooted successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
def pause(self, instance):
"""Pause VM instance."""
self._host.get_guest(instance).pause()
def unpause(self, instance):
"""Unpause paused VM instance."""
self._host.get_guest(instance).resume()
def _clean_shutdown(self, instance, timeout, retry_interval):
"""Attempt to shutdown the instance gracefully.
:param instance: The instance to be shutdown
:param timeout: How long to wait in seconds for the instance to
shutdown
:param retry_interval: How often in seconds to signal the instance
to shutdown while waiting
:returns: True if the shutdown succeeded
"""
# List of states that represent a shutdown instance
SHUTDOWN_STATES = [power_state.SHUTDOWN,
power_state.CRASHED]
try:
guest = self._host.get_guest(instance)
except exception.InstanceNotFound:
# If the instance has gone then we don't need to
# wait for it to shutdown
return True
state = guest.get_power_state(self._host)
if state in SHUTDOWN_STATES:
LOG.info(_LI("Instance already shutdown."),
instance=instance)
return True
LOG.debug("Shutting down instance from state %s", state,
instance=instance)
guest.shutdown()
retry_countdown = retry_interval
for sec in six.moves.range(timeout):
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
if state in SHUTDOWN_STATES:
LOG.info(_LI("Instance shutdown successfully after %d "
"seconds."), sec, instance=instance)
return True
# Note(PhilD): We can't assume that the Guest was able to process
# any previous shutdown signal (for example it may
# have still been startingup, so within the overall
# timeout we re-trigger the shutdown every
# retry_interval
if retry_countdown == 0:
retry_countdown = retry_interval
# Instance could shutdown at any time, in which case we
# will get an exception when we call shutdown
try:
LOG.debug("Instance in state %s after %d seconds - "
"resending shutdown", state, sec,
instance=instance)
guest.shutdown()
except libvirt.libvirtError:
# Assume this is because its now shutdown, so loop
# one more time to clean up.
LOG.debug("Ignoring libvirt exception from shutdown "
"request.", instance=instance)
continue
else:
retry_countdown -= 1
time.sleep(1)
LOG.info(_LI("Instance failed to shutdown in %d seconds."),
timeout, instance=instance)
return False
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
if timeout:
self._clean_shutdown(instance, timeout, retry_interval)
self._destroy(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
# We use _hard_reboot here to ensure that all backing files,
# network, and block device connections, etc. are established
# and available before we attempt to start the instance.
self._hard_reboot(context, instance, network_info, block_device_info)
def trigger_crash_dump(self, instance):
"""Trigger crash dump by injecting an NMI to the specified instance."""
try:
self._host.get_guest(instance).inject_nmi()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
raise exception.TriggerCrashDumpNotSupported()
elif error_code == libvirt.VIR_ERR_OPERATION_INVALID:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
LOG.exception(_LE('Error from libvirt while injecting an NMI to '
'%(instance_uuid)s: '
'[Error Code %(error_code)s] %(ex)s'),
{'instance_uuid': instance.uuid,
'error_code': error_code, 'ex': ex})
raise
def suspend(self, context, instance):
"""Suspend the specified instance."""
guest = self._host.get_guest(instance)
self._detach_pci_devices(guest,
pci_manager.get_instance_pci_devs(instance))
self._detach_sriov_ports(context, instance, guest)
guest.save_memory_state()
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
disk_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance, instance.image_meta,
block_device_info=block_device_info)
xml = self._get_existing_domain_xml(instance, network_info,
block_device_info)
guest = self._create_domain_and_network(context, xml, instance,
network_info, disk_info,
block_device_info=block_device_info,
vifs_already_plugged=True)
self._attach_pci_devices(guest,
pci_manager.get_instance_pci_devs(instance))
self._attach_sriov_ports(context, instance, guest, network_info)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
try:
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
if state in ignored_states:
return
except exception.NovaException:
pass
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self._hard_reboot(context, instance, network_info, block_device_info)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Loads a VM using rescue images.
A rescue is normally performed when something goes wrong with the
primary images and data needs to be corrected/recovered. Rescuing
should not edit or over-ride the original image, only allow for
data recovery.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_existing_domain_xml(instance, network_info)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
rescue_image_id = None
if image_meta.obj_attr_is_set("id"):
rescue_image_id = image_meta.id
rescue_images = {
'image_id': (rescue_image_id or
CONF.libvirt.rescue_image_id or instance.image_ref),
'kernel_id': (CONF.libvirt.rescue_kernel_id or
instance.kernel_id),
'ramdisk_id': (CONF.libvirt.rescue_ramdisk_id or
instance.ramdisk_id),
}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
rescue=True)
gen_confdrive = functools.partial(self._create_configdrive,
context, instance,
admin_pass=rescue_password,
network_info=network_info,
suffix='.rescue')
self._create_image(context, instance, disk_info['mapping'],
suffix='.rescue', disk_images=rescue_images,
network_info=network_info,
admin_pass=rescue_password)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta, rescue=rescue_images,
write_to_disk=True)
self._destroy(instance)
self._create_domain(xml, post_xml_callback=gen_confdrive)
def unrescue(self, instance, network_info):
"""Reboot the VM which is being rescued back into primary images.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
xml_path = os.path.join(instance_dir, 'libvirt.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
libvirt_utils.write_to_file(xml_path, xml)
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove virt_dom at the end.
virt_dom = guest._domain
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
rescue_files = os.path.join(instance_dir, "*.rescue")
for rescue_file in glob.iglob(rescue_files):
if os.path.isdir(rescue_file):
shutil.rmtree(rescue_file)
else:
libvirt_utils.file_delete(rescue_file)
# cleanup rescue volume
lvm.remove_volumes([lvmdisk for lvmdisk in self._lvm_disks(instance)
if lvmdisk.endswith('.rescue')])
if CONF.libvirt.images_type == 'rbd':
filter_fn = lambda disk: (disk.startswith(instance.uuid) and
disk.endswith('.rescue'))
LibvirtDriver._get_rbd_driver().cleanup_volumes(filter_fn)
# def poll_rebooting_instances(self, timeout, instances):
# pass
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
block_device_info)
gen_confdrive = functools.partial(self._create_configdrive,
context, instance,
admin_pass=admin_password,
files=injected_files,
network_info=network_info)
self._create_image(context, instance,
disk_info['mapping'],
network_info=network_info,
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
# Required by Quobyte CI
self._ensure_console_log_for_instance(instance)
xml = self._get_guest_xml(context, instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(
context, xml, instance, network_info, disk_info,
block_device_info=block_device_info,
post_xml_callback=gen_confdrive)
LOG.debug("Instance is running", instance=instance)
def _wait_for_boot():
"""Called at an interval until the VM is running."""
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info(_LI("Instance spawned successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()
def _flush_libvirt_console(self, pty):
out, err = utils.execute('dd',
'if=%s' % pty,
'iflag=nonblock',
run_as_root=True,
check_exit_code=False)
return out
def _append_to_file(self, data, fpath):
LOG.info(_LI('data: %(data)r, fpath: %(fpath)r'),
{'data': data, 'fpath': fpath})
with open(fpath, 'a+') as fp:
fp.write(data)
return fpath
def get_console_output(self, context, instance):
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
tree = etree.fromstring(xml)
console_types = {}
# NOTE(comstud): We want to try 'file' types first, then try 'pty'
# types. We can't use Python 2.7 syntax of:
# tree.find("./devices/console[@type='file']/source")
# because we need to support 2.6.
console_nodes = tree.findall('./devices/console')
for console_node in console_nodes:
console_type = console_node.get('type')
console_types.setdefault(console_type, [])
console_types[console_type].append(console_node)
# If the guest has a console logging to a file prefer to use that
if console_types.get('file'):
for file_console in console_types.get('file'):
source_node = file_console.find('./source')
if source_node is None:
continue
path = source_node.get("path")
if not path:
continue
if not os.path.exists(path):
LOG.info(_LI('Instance is configured with a file console, '
'but the backing file is not (yet?) present'),
instance=instance)
return ""
libvirt_utils.chown(path, os.getuid())
with libvirt_utils.file_open(path, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp,
MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_LI('Truncated console log returned, '
'%d bytes ignored'), remaining,
instance=instance)
return log_data
# Try 'pty' types
if console_types.get('pty'):
for pty_console in console_types.get('pty'):
source_node = pty_console.find('./source')
if source_node is None:
continue
pty = source_node.get("path")
if not pty:
continue
break
else:
raise exception.ConsoleNotAvailable()
console_log = self._get_console_log_path(instance)
# By default libvirt chowns the console log when it starts a domain.
# We need to chown it back before attempting to read from or write
# to it.
if os.path.exists(console_log):
libvirt_utils.chown(console_log, os.getuid())
data = self._flush_libvirt_console(pty)
fpath = self._append_to_file(data, console_log)
with libvirt_utils.file_open(fpath, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_LI('Truncated console log returned, '
'%d bytes ignored'),
remaining, instance=instance)
return log_data
def get_host_ip_addr(self):
ips = compute_utils.get_machine_ips()
if CONF.my_ip not in ips:
LOG.warning(_LW('my_ip address (%(my_ip)s) was not found on '
'any of the interfaces: %(ifaces)s'),
{'my_ip': CONF.my_ip, 'ifaces': ", ".join(ips)})
return CONF.my_ip
def get_vnc_console(self, context, instance):
def get_vnc_port_for_instance(instance_name):
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
xml_dom = etree.fromstring(xml)
graphic = xml_dom.find("./devices/graphics[@type='vnc']")
if graphic is not None:
return graphic.get('port')
# NOTE(rmk): We had VNC consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='vnc')
port = get_vnc_port_for_instance(instance.name)
host = CONF.vnc.vncserver_proxyclient_address
return ctype.ConsoleVNC(host=host, port=port)
def get_spice_console(self, context, instance):
def get_spice_ports_for_instance(instance_name):
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
xml_dom = etree.fromstring(xml)
graphic = xml_dom.find("./devices/graphics[@type='spice']")
if graphic is not None:
return (graphic.get('port'), graphic.get('tlsPort'))
# NOTE(rmk): We had Spice consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='spice')
ports = get_spice_ports_for_instance(instance.name)
host = CONF.spice.server_proxyclient_address
return ctype.ConsoleSpice(host=host, port=ports[0], tlsPort=ports[1])
def get_serial_console(self, context, instance):
guest = self._host.get_guest(instance)
for hostname, port in self._get_serial_ports_from_guest(
guest, mode='bind'):
return ctype.ConsoleSerial(host=hostname, port=port)
raise exception.ConsoleTypeUnavailable(console_type='serial')
@staticmethod
def _supports_direct_io(dirpath):
if not hasattr(os, 'O_DIRECT'):
LOG.debug("This python runtime does not support direct I/O")
return False
testfile = os.path.join(dirpath, ".directio.test")
hasDirectIO = True
fd = None
try:
fd = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
# Check is the write allowed with 512 byte alignment
align_size = 512
m = mmap.mmap(-1, align_size)
m.write(r"x" * align_size)
os.write(fd, m)
LOG.debug("Path '%(path)s' supports direct I/O",
{'path': dirpath})
except OSError as e:
if e.errno == errno.EINVAL:
LOG.debug("Path '%(path)s' does not support direct I/O: "
"'%(ex)s'", {'path': dirpath, 'ex': e})
hasDirectIO = False
else:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error on '%(path)s' while checking "
"direct I/O: '%(ex)s'"),
{'path': dirpath, 'ex': e})
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error on '%(path)s' while checking direct I/O: "
"'%(ex)s'"), {'path': dirpath, 'ex': e})
finally:
# ensure unlink(filepath) will actually remove the file by deleting
# the remaining link to it in close(fd)
if fd is not None:
os.close(fd)
try:
os.unlink(testfile)
except Exception:
pass
return hasDirectIO
@staticmethod
def _create_ephemeral(target, ephemeral_size,
fs_label, os_type, is_block_dev=False,
context=None, specified_fs=None):
if not is_block_dev:
libvirt_utils.create_image('raw', target, '%dG' % ephemeral_size)
# Run as root only for block devices.
disk_api.mkfs(os_type, fs_label, target, run_as_root=is_block_dev,
specified_fs=specified_fs)
@staticmethod
def _create_swap(target, swap_mb, context=None):
"""Create a swap file of specified size."""
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
utils.mkfs('swap', target)
@staticmethod
def _get_console_log_path(instance):
return os.path.join(libvirt_utils.get_instance_path(instance),
'console.log')
def _ensure_console_log_for_instance(self, instance):
# NOTE(mdbooth): Although libvirt will create this file for us
# automatically when it starts, it will initially create it with
# root ownership and then chown it depending on the configuration of
# the domain it is launching. Quobyte CI explicitly disables the
# chown by setting dynamic_ownership=0 in libvirt's config.
# Consequently when the domain starts it is unable to write to its
# console.log. See bug https://bugs.launchpad.net/nova/+bug/1597644
#
# To work around this, we create the file manually before starting
# the domain so it has the same ownership as Nova. This works
# for Quobyte CI because it is also configured to run qemu as the same
# user as the Nova service. Installations which don't set
# dynamic_ownership=0 are not affected because libvirt will always
# correctly configure permissions regardless of initial ownership.
#
# Setting dynamic_ownership=0 is dubious and potentially broken in
# more ways than console.log (see comment #22 on the above bug), so
# Future Maintainer who finds this code problematic should check to see
# if we still support it.
console_file = self._get_console_log_path(instance)
LOG.debug('Ensure instance console log exists: %s', console_file,
instance=instance)
libvirt_utils.file_open(console_file, 'a').close()
@staticmethod
def _get_disk_config_path(instance, suffix=''):
return os.path.join(libvirt_utils.get_instance_path(instance),
'disk.config' + suffix)
@staticmethod
def _get_disk_config_image_type():
# TODO(mikal): there is a bug here if images_type has
# changed since creation of the instance, but I am pretty
# sure that this bug already exists.
return 'rbd' if CONF.libvirt.images_type == 'rbd' else 'raw'
@staticmethod
def _is_booted_from_volume(instance, disk_mapping):
"""Determines whether the VM is booting from volume
Determines whether the disk mapping indicates that the VM
is booting from a volume.
"""
return ((not bool(instance.get('image_ref')))
or 'disk' not in disk_mapping)
@staticmethod
def _has_local_disk(instance, disk_mapping):
"""Determines whether the VM has a local disk
Determines whether the disk mapping indicates that the VM
has a local disk (e.g. ephemeral, swap disk and config-drive).
"""
if disk_mapping:
if ('disk.local' in disk_mapping or
'disk.swap' in disk_mapping or
'disk.config' in disk_mapping):
return True
return False
def _inject_data(self, injection_image, instance, network_info,
admin_pass, files):
"""Injects data in a disk image
Helper used for injecting data in a disk image file system.
Keyword arguments:
injection_image -- An Image object we're injecting into
instance -- a dict that refers instance specifications
network_info -- a dict that refers network speficications
admin_pass -- a string used to set an admin password
files -- a list of files needs to be injected
"""
# Handles the partition need to be used.
target_partition = None
if not instance.kernel_id:
target_partition = CONF.libvirt.inject_partition
if target_partition == 0:
target_partition = None
if CONF.libvirt.virt_type == 'lxc':
target_partition = None
# Handles the key injection.
if CONF.libvirt.inject_key and instance.get('key_data'):
key = str(instance.key_data)
else:
key = None
# Handles the admin password injection.
if not CONF.libvirt.inject_password:
admin_pass = None
# Handles the network injection.
net = netutils.get_injected_network_template(
network_info, libvirt_virt_type=CONF.libvirt.virt_type)
# Handles the metadata injection
metadata = instance.get('metadata')
if any((key, net, metadata, admin_pass, files)):
img_id = instance.image_ref
try:
disk_api.inject_data(injection_image.get_model(self._conn),
key, net, metadata, admin_pass, files,
partition=target_partition,
mandatory=('files',))
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error injecting data into image '
'%(img_id)s (%(e)s)'),
{'img_id': img_id, 'e': e},
instance=instance)
# NOTE(sileht): many callers of this method assume that this
# method doesn't fail if an image already exists but instead
# think that it will be reused (ie: (live)-migration/resize)
def _create_image(self, context, instance,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, files=None,
admin_pass=None, inject_files=True,
fallback_from_host=None):
booted_from_volume = self._is_booted_from_volume(
instance, disk_mapping)
def image(fname, image_type=CONF.libvirt.images_type):
return self.image_backend.image(instance,
fname + suffix, image_type)
def raw(fname):
return image(fname, image_type='raw')
# ensure directories exist and are writable
fileutils.ensure_tree(libvirt_utils.get_instance_path(instance))
LOG.info(_LI('Creating image'), instance=instance)
if not disk_images:
disk_images = {'image_id': instance.image_ref,
'kernel_id': instance.kernel_id,
'ramdisk_id': instance.ramdisk_id}
if disk_images['kernel_id']:
fname = imagecache.get_cache_fname(disk_images['kernel_id'])
raw('kernel').cache(fetch_func=libvirt_utils.fetch_raw_image,
context=context,
filename=fname,
image_id=disk_images['kernel_id'])
if disk_images['ramdisk_id']:
fname = imagecache.get_cache_fname(disk_images['ramdisk_id'])
raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_raw_image,
context=context,
filename=fname,
image_id=disk_images['ramdisk_id'])
inst_type = instance.get_flavor()
if CONF.libvirt.virt_type == 'uml':
libvirt_utils.chown(image('disk').path, 'root')
self._create_and_inject_local_root(context, instance,
booted_from_volume, suffix, disk_images,
network_info, admin_pass, files, inject_files,
fallback_from_host)
# Lookup the filesystem type if required
os_type_with_default = disk_api.get_fs_type_for_os_type(
instance.os_type)
# Generate a file extension based on the file system
# type and the mkfs commands configured if any
file_extension = disk_api.get_file_extension_for_os_type(
os_type_with_default)
ephemeral_gb = instance.flavor.ephemeral_gb
if 'disk.local' in disk_mapping:
disk_image = image('disk.local')
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance.os_type,
is_block_dev=disk_image.is_block_dev)
fname = "ephemeral_%s_%s" % (ephemeral_gb, file_extension)
size = ephemeral_gb * units.Gi
disk_image.cache(fetch_func=fn,
context=context,
filename=fname,
size=size,
ephemeral_size=ephemeral_gb)
for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
disk_image = image(blockinfo.get_eph_disk(idx))
specified_fs = eph.get('guest_format')
if specified_fs and not self.is_supported_fs_format(specified_fs):
msg = _("%s format is not supported") % specified_fs
raise exception.InvalidBDMFormat(details=msg)
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral%d' % idx,
os_type=instance.os_type,
is_block_dev=disk_image.is_block_dev)
size = eph['size'] * units.Gi
fname = "ephemeral_%s_%s" % (eph['size'], file_extension)
disk_image.cache(fetch_func=fn,
context=context,
filename=fname,
size=size,
ephemeral_size=eph['size'],
specified_fs=specified_fs)
if 'disk.swap' in disk_mapping:
mapping = disk_mapping['disk.swap']
swap_mb = 0
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
mapping['dev'], block_device_info)):
swap_mb = inst_type['swap']
if swap_mb > 0:
size = swap_mb * units.Mi
image('disk.swap').cache(fetch_func=self._create_swap,
context=context,
filename="swap_%s" % swap_mb,
size=size,
swap_mb=swap_mb)
def _create_and_inject_local_root(self, context, instance,
booted_from_volume, suffix, disk_images,
network_info, admin_pass, files, inject_files,
fallback_from_host):
# File injection only if needed
need_inject = (not configdrive.required_by(instance) and
inject_files and CONF.libvirt.inject_partition != -2)
# NOTE(ndipanov): Even if disk_mapping was passed in, which
# currently happens only on rescue - we still don't want to
# create a base image.
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images['image_id'])
size = instance.flavor.root_gb * units.Gi
if size == 0 or suffix == '.rescue':
size = None
backend = self.image_backend.image(instance, 'disk' + suffix,
CONF.libvirt.images_type)
if instance.task_state == task_states.RESIZE_FINISH:
backend.create_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME)
if backend.SUPPORTS_CLONE:
def clone_fallback_to_fetch(*args, **kwargs):
try:
backend.clone(context, disk_images['image_id'])
except exception.ImageUnacceptable:
libvirt_utils.fetch_image(*args, **kwargs)
fetch_func = clone_fallback_to_fetch
else:
fetch_func = libvirt_utils.fetch_image
self._try_fetch_image_cache(backend, fetch_func, context,
root_fname, disk_images['image_id'],
instance, size, fallback_from_host)
if need_inject:
self._inject_data(backend, instance, network_info, admin_pass,
files)
elif need_inject:
LOG.warning(_LW('File injection into a boot from volume '
'instance is not supported'), instance=instance)
def _create_configdrive(self, context, instance, admin_pass=None,
files=None, network_info=None, suffix=''):
# As this method being called right after the definition of a
# domain, but before its actual launch, device metadata will be built
# and saved in the instance for it to be used by the config drive and
# the metadata service.
instance.device_metadata = self._build_device_metadata(context,
instance)
config_drive_image = None
if configdrive.required_by(instance):
LOG.info(_LI('Using config drive'), instance=instance)
config_drive_image = self.image_backend.image(
instance, 'disk.config' + suffix,
self._get_disk_config_image_type())
# Don't overwrite an existing config drive
if not config_drive_image.exists():
extra_md = {}
if admin_pass:
extra_md['admin_pass'] = admin_pass
inst_md = instance_metadata.InstanceMetadata(
instance, content=files, extra_md=extra_md,
network_info=network_info, request_context=context)
cdb = configdrive.ConfigDriveBuilder(instance_md=inst_md)
with cdb:
config_drive_local_path = self._get_disk_config_path(
instance, suffix)
LOG.info(_LI('Creating config drive at %(path)s'),
{'path': config_drive_local_path},
instance=instance)
try:
cdb.make_drive(config_drive_local_path)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive failed '
'with error: %s'),
e, instance=instance)
try:
config_drive_image.import_file(
instance, config_drive_local_path,
'disk.config' + suffix)
finally:
# NOTE(mikal): if the config drive was imported into RBD,
# then we no longer need the local copy
if CONF.libvirt.images_type == 'rbd':
os.unlink(config_drive_local_path)
def _prepare_pci_devices_for_use(self, pci_devices):
# kvm , qemu support managed mode
# In managed mode, the configured device will be automatically
# detached from the host OS drivers when the guest is started,
# and then re-attached when the guest shuts down.
if CONF.libvirt.virt_type != 'xen':
# we do manual detach only for xen
return
try:
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._host.device_lookup_by_name(libvirt_dev_addr)
# Note(yjiang5) Spelling for 'dettach' is correct, see
# http://libvirt.org/html/libvirt-libvirt.html.
libvirt_dev.dettach()
# Note(yjiang5): A reset of one PCI device may impact other
# devices on the same bus, thus we need two separated loops
# to detach and then reset it.
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._host.device_lookup_by_name(libvirt_dev_addr)
libvirt_dev.reset()
except libvirt.libvirtError as exc:
raise exception.PciDevicePrepareFailed(id=dev['id'],
instance_uuid=
dev['instance_uuid'],
reason=six.text_type(exc))
def _detach_pci_devices(self, guest, pci_devs):
try:
for dev in pci_devs:
guest.detach_device(self._get_guest_pci_device(dev), live=True)
# after detachDeviceFlags returned, we should check the dom to
# ensure the detaching is finished
xml = guest.get_xml_desc()
xml_doc = etree.fromstring(xml)
guest_config = vconfig.LibvirtConfigGuest()
guest_config.parse_dom(xml_doc)
for hdev in [d for d in guest_config.devices
if isinstance(d, vconfig.LibvirtConfigGuestHostdevPCI)]:
hdbsf = [hdev.domain, hdev.bus, hdev.slot, hdev.function]
dbsf = pci_utils.parse_address(dev.address)
if [int(x, 16) for x in hdbsf] ==\
[int(x, 16) for x in dbsf]:
raise exception.PciDeviceDetachFailed(reason=
"timeout",
dev=dev)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warning(_LW("Instance disappeared while detaching "
"a PCI device from it."))
else:
raise
def _attach_pci_devices(self, guest, pci_devs):
try:
for dev in pci_devs:
guest.attach_device(self._get_guest_pci_device(dev))
except libvirt.libvirtError:
LOG.error(_LE('Attaching PCI devices %(dev)s to %(dom)s failed.'),
{'dev': pci_devs, 'dom': guest.id})
raise
@staticmethod
def _has_sriov_port(network_info):
for vif in network_info:
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
return True
return False
def _attach_sriov_ports(self, context, instance, guest, network_info=None):
if network_info is None:
network_info = instance.info_cache.network_info
if network_info is None:
return
if self._has_sriov_port(network_info):
for vif in network_info:
if vif['vnic_type'] in network_model.VNIC_TYPES_SRIOV:
cfg = self.vif_driver.get_config(instance,
vif,
instance.image_meta,
instance.flavor,
CONF.libvirt.virt_type,
self._host)
LOG.debug('Attaching SR-IOV port %(port)s to %(dom)s',
{'port': vif, 'dom': guest.id},
instance=instance)
guest.attach_device(cfg)
def _detach_sriov_ports(self, context, instance, guest):
network_info = instance.info_cache.network_info
if network_info is None:
return
if self._has_sriov_port(network_info):
# In case of SR-IOV vif types we create pci request per SR-IOV port
# Therefore we can trust that pci_slot value in the vif is correct.
sriov_pci_addresses = [
vif['profile']['pci_slot']
for vif in network_info
if vif['vnic_type'] in network_model.VNIC_TYPES_SRIOV and
vif['profile'].get('pci_slot') is not None
]
# use detach_pci_devices to avoid failure in case of
# multiple guest SRIOV ports with the same MAC
# (protection use-case, ports are on different physical
# interfaces)
pci_devs = pci_manager.get_instance_pci_devs(instance, 'all')
sriov_devs = [pci_dev for pci_dev in pci_devs
if pci_dev.address in sriov_pci_addresses]
self._detach_pci_devices(guest, sriov_devs)
def _set_host_enabled(self, enabled,
disable_reason=DISABLE_REASON_UNDEFINED):
"""Enables / Disables the compute service on this host.
This doesn't override non-automatic disablement with an automatic
setting; thereby permitting operators to keep otherwise
healthy hosts out of rotation.
"""
status_name = {True: 'disabled',
False: 'enabled'}
disable_service = not enabled
ctx = nova_context.get_admin_context()
try:
service = objects.Service.get_by_compute_host(ctx, CONF.host)
if service.disabled != disable_service:
# Note(jang): this is a quick fix to stop operator-
# disabled compute hosts from re-enabling themselves
# automatically. We prefix any automatic reason code
# with a fixed string. We only re-enable a host
# automatically if we find that string in place.
# This should probably be replaced with a separate flag.
if not service.disabled or (
service.disabled_reason and
service.disabled_reason.startswith(DISABLE_PREFIX)):
service.disabled = disable_service
service.disabled_reason = (
DISABLE_PREFIX + disable_reason
if disable_service else DISABLE_REASON_UNDEFINED)
service.save()
LOG.debug('Updating compute service status to %s',
status_name[disable_service])
else:
LOG.debug('Not overriding manual compute service '
'status with: %s',
status_name[disable_service])
except exception.ComputeHostNotFound:
LOG.warning(_LW('Cannot update service status on host "%s" '
'since it is not registered.'), CONF.host)
except Exception:
LOG.warning(_LW('Cannot update service status on host "%s" '
'due to an unexpected exception.'), CONF.host,
exc_info=True)
def _get_guest_cpu_model_config(self):
mode = CONF.libvirt.cpu_mode
model = CONF.libvirt.cpu_model
if (CONF.libvirt.virt_type == "kvm" or
CONF.libvirt.virt_type == "qemu"):
if mode is None:
mode = "host-model"
if mode == "none":
return vconfig.LibvirtConfigGuestCPU()
else:
if mode is None or mode == "none":
return None
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Config requested an explicit CPU model, but "
"the current libvirt hypervisor '%s' does not "
"support selecting CPU models") % CONF.libvirt.virt_type
raise exception.Invalid(msg)
if mode == "custom" and model is None:
msg = _("Config requested a custom CPU model, but no "
"model name was provided")
raise exception.Invalid(msg)
elif mode != "custom" and model is not None:
msg = _("A CPU model name should not be set when a "
"host CPU model is requested")
raise exception.Invalid(msg)
LOG.debug("CPU mode '%(mode)s' model '%(model)s' was chosen",
{'mode': mode, 'model': (model or "")})
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.mode = mode
cpu.model = model
return cpu
def _get_guest_cpu_config(self, flavor, image_meta,
guest_cpu_numa_config, instance_numa_topology):
cpu = self._get_guest_cpu_model_config()
if cpu is None:
return None
topology = hardware.get_best_cpu_topology(
flavor, image_meta, numa_topology=instance_numa_topology)
cpu.sockets = topology.sockets
cpu.cores = topology.cores
cpu.threads = topology.threads
cpu.numa = guest_cpu_numa_config
return cpu
def _get_guest_disk_config(self, instance, name, disk_mapping, inst_type,
image_type=None):
if CONF.libvirt.hw_disk_discard:
if not self._host.has_min_version(hv_ver=MIN_QEMU_DISCARD_VERSION,
hv_type=host.HV_DRIVER_QEMU):
msg = (_('Volume sets discard option, qemu %(qemu)s'
' or later is required.') %
{'qemu': MIN_QEMU_DISCARD_VERSION})
raise exception.Invalid(msg)
image = self.image_backend.image(instance,
name,
image_type)
if (name == 'disk.config' and image_type == 'rbd' and
not image.exists()):
# This is likely an older config drive that has not been migrated
# to rbd yet. Try to fall back on 'flat' image type.
# TODO(melwitt): Add online migration of some sort so we can
# remove this fall back once we know all config drives are in rbd.
# NOTE(vladikr): make sure that the flat image exist, otherwise
# the image will be created after the domain definition.
flat_image = self.image_backend.image(instance, name, 'flat')
if flat_image.exists():
image = flat_image
LOG.debug('Config drive not found in RBD, falling back to the '
'instance directory', instance=instance)
disk_info = disk_mapping[name]
return image.libvirt_info(disk_info['bus'],
disk_info['dev'],
disk_info['type'],
self.disk_cachemode,
inst_type['extra_specs'],
self._host.get_version())
def _get_guest_fs_config(self, instance, name, image_type=None):
image = self.image_backend.image(instance,
name,
image_type)
return image.libvirt_fs_info("/", "ploop")
def _get_guest_storage_config(self, instance, image_meta,
disk_info,
rescue, block_device_info,
inst_type, os_type):
devices = []
disk_mapping = disk_info['mapping']
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
mount_rootfs = CONF.libvirt.virt_type == "lxc"
if mount_rootfs:
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
fs.source_dir = os.path.join(
libvirt_utils.get_instance_path(instance), 'rootfs')
devices.append(fs)
elif os_type == vm_mode.EXE and CONF.libvirt.virt_type == "parallels":
if rescue:
fsrescue = self._get_guest_fs_config(instance, "disk.rescue")
devices.append(fsrescue)
fsos = self._get_guest_fs_config(instance, "disk")
fsos.target_dir = "/mnt/rescue"
devices.append(fsos)
else:
if 'disk' in disk_mapping:
fs = self._get_guest_fs_config(instance, "disk")
devices.append(fs)
else:
if rescue:
diskrescue = self._get_guest_disk_config(instance,
'disk.rescue',
disk_mapping,
inst_type)
devices.append(diskrescue)
diskos = self._get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
else:
if 'disk' in disk_mapping:
diskos = self._get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
if 'disk.local' in disk_mapping:
disklocal = self._get_guest_disk_config(instance,
'disk.local',
disk_mapping,
inst_type)
devices.append(disklocal)
instance.default_ephemeral_device = (
block_device.prepend_dev(disklocal.target_dev))
for idx, eph in enumerate(
driver.block_device_info_get_ephemerals(
block_device_info)):
diskeph = self._get_guest_disk_config(
instance,
blockinfo.get_eph_disk(idx),
disk_mapping, inst_type)
devices.append(diskeph)
if 'disk.swap' in disk_mapping:
diskswap = self._get_guest_disk_config(instance,
'disk.swap',
disk_mapping,
inst_type)
devices.append(diskswap)
instance.default_swap_device = (
block_device.prepend_dev(diskswap.target_dev))
if 'disk.config' in disk_mapping:
diskconfig = self._get_guest_disk_config(
instance, 'disk.config', disk_mapping, inst_type,
self._get_disk_config_image_type())
devices.append(diskconfig)
for vol in block_device.get_bdms_to_connect(block_device_mapping,
mount_rootfs):
connection_info = vol['connection_info']
vol_dev = block_device.prepend_dev(vol['mount_device'])
info = disk_mapping[vol_dev]
self._connect_volume(connection_info, info)
cfg = self._get_volume_config(connection_info, info)
devices.append(cfg)
vol['connection_info'] = connection_info
vol.save()
for d in devices:
self._set_cache_mode(d)
if image_meta.properties.get('hw_scsi_model'):
hw_scsi_model = image_meta.properties.hw_scsi_model
scsi_controller = vconfig.LibvirtConfigGuestController()
scsi_controller.type = 'scsi'
scsi_controller.model = hw_scsi_model
devices.append(scsi_controller)
return devices
def _get_host_sysinfo_serial_hardware(self):
"""Get a UUID from the host hardware
Get a UUID for the host hardware reported by libvirt.
This is typically from the SMBIOS data, unless it has
been overridden in /etc/libvirt/libvirtd.conf
"""
caps = self._host.get_capabilities()
return caps.host.uuid
def _get_host_sysinfo_serial_os(self):
"""Get a UUID from the host operating system
Get a UUID for the host operating system. Modern Linux
distros based on systemd provide a /etc/machine-id
file containing a UUID. This is also provided inside
systemd based containers and can be provided by other
init systems too, since it is just a plain text file.
"""
if not os.path.exists("/etc/machine-id"):
msg = _("Unable to get host UUID: /etc/machine-id does not exist")
raise exception.NovaException(msg)
with open("/etc/machine-id") as f:
# We want to have '-' in the right place
# so we parse & reformat the value
lines = f.read().split()
if not lines:
msg = _("Unable to get host UUID: /etc/machine-id is empty")
raise exception.NovaException(msg)
return str(uuid.UUID(lines[0]))
def _get_host_sysinfo_serial_auto(self):
if os.path.exists("/etc/machine-id"):
return self._get_host_sysinfo_serial_os()
else:
return self._get_host_sysinfo_serial_hardware()
def _get_guest_config_sysinfo(self, instance):
sysinfo = vconfig.LibvirtConfigGuestSysinfo()
sysinfo.system_manufacturer = version.vendor_string()
sysinfo.system_product = version.product_string()
sysinfo.system_version = version.version_string_with_package()
sysinfo.system_serial = self._sysinfo_serial_func()
sysinfo.system_uuid = instance.uuid
sysinfo.system_family = "Virtual Machine"
return sysinfo
def _get_guest_pci_device(self, pci_device):
dbsf = pci_utils.parse_address(pci_device.address)
dev = vconfig.LibvirtConfigGuestHostdevPCI()
dev.domain, dev.bus, dev.slot, dev.function = dbsf
# only kvm support managed mode
if CONF.libvirt.virt_type in ('xen', 'parallels',):
dev.managed = 'no'
if CONF.libvirt.virt_type in ('kvm', 'qemu'):
dev.managed = 'yes'
return dev
def _get_guest_config_meta(self, context, instance):
"""Get metadata config for guest."""
meta = vconfig.LibvirtConfigGuestMetaNovaInstance()
meta.package = version.version_string_with_package()
meta.name = instance.display_name
meta.creationTime = time.time()
if instance.image_ref not in ("", None):
meta.roottype = "image"
meta.rootid = instance.image_ref
if context is not None:
ometa = vconfig.LibvirtConfigGuestMetaNovaOwner()
ometa.userid = context.user_id
ometa.username = context.user_name
ometa.projectid = context.project_id
ometa.projectname = context.project_name
meta.owner = ometa
fmeta = vconfig.LibvirtConfigGuestMetaNovaFlavor()
flavor = instance.flavor
fmeta.name = flavor.name
fmeta.memory = flavor.memory_mb
fmeta.vcpus = flavor.vcpus
fmeta.ephemeral = flavor.ephemeral_gb
fmeta.disk = flavor.root_gb
fmeta.swap = flavor.swap
meta.flavor = fmeta
return meta
def _machine_type_mappings(self):
mappings = {}
for mapping in CONF.libvirt.hw_machine_type:
host_arch, _, machine_type = mapping.partition('=')
mappings[host_arch] = machine_type
return mappings
def _get_machine_type(self, image_meta, caps):
# The underlying machine type can be set as an image attribute,
# or otherwise based on some architecture specific defaults
mach_type = None
if image_meta.properties.get('hw_machine_type') is not None:
mach_type = image_meta.properties.hw_machine_type
else:
# For ARM systems we will default to vexpress-a15 for armv7
# and virt for aarch64
if caps.host.cpu.arch == arch.ARMV7:
mach_type = "vexpress-a15"
if caps.host.cpu.arch == arch.AARCH64:
mach_type = "virt"
if caps.host.cpu.arch in (arch.S390, arch.S390X):
mach_type = 's390-ccw-virtio'
# If set in the config, use that as the default.
if CONF.libvirt.hw_machine_type:
mappings = self._machine_type_mappings()
mach_type = mappings.get(caps.host.cpu.arch)
return mach_type
@staticmethod
def _create_idmaps(klass, map_strings):
idmaps = []
if len(map_strings) > 5:
map_strings = map_strings[0:5]
LOG.warning(_LW("Too many id maps, only included first five."))
for map_string in map_strings:
try:
idmap = klass()
values = [int(i) for i in map_string.split(":")]
idmap.start = values[0]
idmap.target = values[1]
idmap.count = values[2]
idmaps.append(idmap)
except (ValueError, IndexError):
LOG.warning(_LW("Invalid value for id mapping %s"), map_string)
return idmaps
def _get_guest_idmaps(self):
id_maps = []
if CONF.libvirt.virt_type == 'lxc' and CONF.libvirt.uid_maps:
uid_maps = self._create_idmaps(vconfig.LibvirtConfigGuestUIDMap,
CONF.libvirt.uid_maps)
id_maps.extend(uid_maps)
if CONF.libvirt.virt_type == 'lxc' and CONF.libvirt.gid_maps:
gid_maps = self._create_idmaps(vconfig.LibvirtConfigGuestGIDMap,
CONF.libvirt.gid_maps)
id_maps.extend(gid_maps)
return id_maps
def _update_guest_cputune(self, guest, flavor, virt_type):
is_able = self._host.is_cpu_control_policy_capable()
cputuning = ['shares', 'period', 'quota']
wants_cputune = any([k for k in cputuning
if "quota:cpu_" + k in flavor.extra_specs.keys()])
if wants_cputune and not is_able:
raise exception.UnsupportedHostCPUControlPolicy()
if not is_able or virt_type not in ('lxc', 'kvm', 'qemu'):
return
if guest.cputune is None:
guest.cputune = vconfig.LibvirtConfigGuestCPUTune()
# Setting the default cpu.shares value to be a value
# dependent on the number of vcpus
guest.cputune.shares = 1024 * guest.vcpus
for name in cputuning:
key = "quota:cpu_" + name
if key in flavor.extra_specs:
setattr(guest.cputune, name,
int(flavor.extra_specs[key]))
def _get_cpu_numa_config_from_instance(self, instance_numa_topology,
wants_hugepages):
if instance_numa_topology:
guest_cpu_numa = vconfig.LibvirtConfigGuestCPUNUMA()
for instance_cell in instance_numa_topology.cells:
guest_cell = vconfig.LibvirtConfigGuestCPUNUMACell()
guest_cell.id = instance_cell.id
guest_cell.cpus = instance_cell.cpuset
guest_cell.memory = instance_cell.memory * units.Ki
# The vhost-user network backend requires file backed
# guest memory (ie huge pages) to be marked as shared
# access, not private, so an external process can read
# and write the pages.
#
# You can't change the shared vs private flag for an
# already running guest, and since we can't predict what
# types of NIC may be hotplugged, we have no choice but
# to unconditionally turn on the shared flag. This has
# no real negative functional effect on the guest, so
# is a reasonable approach to take
if wants_hugepages:
guest_cell.memAccess = "shared"
guest_cpu_numa.cells.append(guest_cell)
return guest_cpu_numa
def _has_cpu_policy_support(self):
for ver in BAD_LIBVIRT_CPU_POLICY_VERSIONS:
if self._host.has_version(ver):
ver_ = self._version_to_string(ver)
raise exception.CPUPinningNotSupported(reason=_(
'Invalid libvirt version %(version)s') % {'version': ver_})
return True
def _wants_hugepages(self, host_topology, instance_topology):
"""Determine if the guest / host topology implies the
use of huge pages for guest RAM backing
"""
if host_topology is None or instance_topology is None:
return False
avail_pagesize = [page.size_kb
for page in host_topology.cells[0].mempages]
avail_pagesize.sort()
# Remove smallest page size as that's not classed as a largepage
avail_pagesize = avail_pagesize[1:]
# See if we have page size set
for cell in instance_topology.cells:
if (cell.pagesize is not None and
cell.pagesize in avail_pagesize):
return True
return False
def _get_guest_numa_config(self, instance_numa_topology, flavor,
allowed_cpus=None, image_meta=None):
"""Returns the config objects for the guest NUMA specs.
Determines the CPUs that the guest can be pinned to if the guest
specifies a cell topology and the host supports it. Constructs the
libvirt XML config object representing the NUMA topology selected
for the guest. Returns a tuple of:
(cpu_set, guest_cpu_tune, guest_cpu_numa, guest_numa_tune)
With the following caveats:
a) If there is no specified guest NUMA topology, then
all tuple elements except cpu_set shall be None. cpu_set
will be populated with the chosen CPUs that the guest
allowed CPUs fit within, which could be the supplied
allowed_cpus value if the host doesn't support NUMA
topologies.
b) If there is a specified guest NUMA topology, then
cpu_set will be None and guest_cpu_numa will be the
LibvirtConfigGuestCPUNUMA object representing the guest's
NUMA topology. If the host supports NUMA, then guest_cpu_tune
will contain a LibvirtConfigGuestCPUTune object representing
the optimized chosen cells that match the host capabilities
with the instance's requested topology. If the host does
not support NUMA, then guest_cpu_tune and guest_numa_tune
will be None.
"""
if (not self._has_numa_support() and
instance_numa_topology is not None):
# We should not get here, since we should have avoided
# reporting NUMA topology from _get_host_numa_topology
# in the first place. Just in case of a scheduler
# mess up though, raise an exception
raise exception.NUMATopologyUnsupported()
topology = self._get_host_numa_topology()
# We have instance NUMA so translate it to the config class
guest_cpu_numa_config = self._get_cpu_numa_config_from_instance(
instance_numa_topology,
self._wants_hugepages(topology, instance_numa_topology))
if not guest_cpu_numa_config:
# No NUMA topology defined for instance - let the host kernel deal
# with the NUMA effects.
# TODO(ndipanov): Attempt to spread the instance
# across NUMA nodes and expose the topology to the
# instance as an optimisation
return GuestNumaConfig(allowed_cpus, None, None, None)
else:
if topology:
# Now get the CpuTune configuration from the numa_topology
guest_cpu_tune = vconfig.LibvirtConfigGuestCPUTune()
guest_numa_tune = vconfig.LibvirtConfigGuestNUMATune()
allpcpus = []
numa_mem = vconfig.LibvirtConfigGuestNUMATuneMemory()
numa_memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()
for _ in guest_cpu_numa_config.cells]
for host_cell in topology.cells:
for guest_node_id, guest_config_cell in enumerate(
guest_cpu_numa_config.cells):
if guest_config_cell.id == host_cell.id:
node = numa_memnodes[guest_node_id]
node.cellid = guest_node_id
node.nodeset = [host_cell.id]
node.mode = "strict"
numa_mem.nodeset.append(host_cell.id)
object_numa_cell = (
instance_numa_topology.cells[guest_node_id]
)
for cpu in guest_config_cell.cpus:
pin_cpuset = (
vconfig.LibvirtConfigGuestCPUTuneVCPUPin())
pin_cpuset.id = cpu
# If there is pinning information in the cell
# we pin to individual CPUs, otherwise we float
# over the whole host NUMA node
if (object_numa_cell.cpu_pinning and
self._has_cpu_policy_support()):
pcpu = object_numa_cell.cpu_pinning[cpu]
pin_cpuset.cpuset = set([pcpu])
else:
pin_cpuset.cpuset = host_cell.cpuset
allpcpus.extend(pin_cpuset.cpuset)
guest_cpu_tune.vcpupin.append(pin_cpuset)
# TODO(berrange) When the guest has >1 NUMA node, it will
# span multiple host NUMA nodes. By pinning emulator threads
# to the union of all nodes, we guarantee there will be
# cross-node memory access by the emulator threads when
# responding to guest I/O operations. The only way to avoid
# this would be to pin emulator threads to a single node and
# tell the guest OS to only do I/O from one of its virtual
# NUMA nodes. This is not even remotely practical.
#
# The long term solution is to make use of a new QEMU feature
# called "I/O Threads" which will let us configure an explicit
# I/O thread for each guest vCPU or guest NUMA node. It is
# still TBD how to make use of this feature though, especially
# how to associate IO threads with guest devices to eliminiate
# cross NUMA node traffic. This is an area of investigation
# for QEMU community devs.
emulatorpin = vconfig.LibvirtConfigGuestCPUTuneEmulatorPin()
emulatorpin.cpuset = set(allpcpus)
guest_cpu_tune.emulatorpin = emulatorpin
# Sort the vcpupin list per vCPU id for human-friendlier XML
guest_cpu_tune.vcpupin.sort(key=operator.attrgetter("id"))
if hardware.is_realtime_enabled(flavor):
if not self._host.has_min_version(
MIN_LIBVIRT_REALTIME_VERSION):
raise exception.RealtimePolicyNotSupported()
vcpus_rt, vcpus_em = hardware.vcpus_realtime_topology(
set(cpu.id for cpu in guest_cpu_tune.vcpupin),
flavor, image_meta)
vcpusched = vconfig.LibvirtConfigGuestCPUTuneVCPUSched()
vcpusched.vcpus = vcpus_rt
vcpusched.scheduler = "fifo"
vcpusched.priority = (
CONF.libvirt.realtime_scheduler_priority)
guest_cpu_tune.vcpusched.append(vcpusched)
guest_cpu_tune.emulatorpin.cpuset = vcpus_em
guest_numa_tune.memory = numa_mem
guest_numa_tune.memnodes = numa_memnodes
# normalize cell.id
for i, (cell, memnode) in enumerate(
zip(guest_cpu_numa_config.cells,
guest_numa_tune.memnodes)):
cell.id = i
memnode.cellid = i
return GuestNumaConfig(None, guest_cpu_tune,
guest_cpu_numa_config,
guest_numa_tune)
else:
return GuestNumaConfig(allowed_cpus, None,
guest_cpu_numa_config, None)
def _get_guest_os_type(self, virt_type):
"""Returns the guest OS type based on virt type."""
if virt_type == "lxc":
ret = vm_mode.EXE
elif virt_type == "uml":
ret = vm_mode.UML
elif virt_type == "xen":
ret = vm_mode.XEN
else:
ret = vm_mode.HVM
return ret
def _set_guest_for_rescue(self, rescue, guest, inst_path, virt_type,
root_device_name):
if rescue.get('kernel_id'):
guest.os_kernel = os.path.join(inst_path, "kernel.rescue")
if virt_type == "xen":
guest.os_cmdline = "ro root=%s" % root_device_name
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name, CONSOLE))
if virt_type == "qemu":
guest.os_cmdline += " no_timer_check"
if rescue.get('ramdisk_id'):
guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
def _set_guest_for_inst_kernel(self, instance, guest, inst_path, virt_type,
root_device_name, image_meta):
guest.os_kernel = os.path.join(inst_path, "kernel")
if virt_type == "xen":
guest.os_cmdline = "ro root=%s" % root_device_name
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name, CONSOLE))
if virt_type == "qemu":
guest.os_cmdline += " no_timer_check"
if instance.ramdisk_id:
guest.os_initrd = os.path.join(inst_path, "ramdisk")
# we only support os_command_line with images with an explicit
# kernel set and don't want to break nova if there's an
# os_command_line property without a specified kernel_id param
if image_meta.properties.get("os_command_line"):
guest.os_cmdline = image_meta.properties.os_command_line
def _set_clock(self, guest, os_type, image_meta, virt_type):
# NOTE(mikal): Microsoft Windows expects the clock to be in
# "localtime". If the clock is set to UTC, then you can use a
# registry key to let windows know, but Microsoft says this is
# buggy in http://support.microsoft.com/kb/2687252
clk = vconfig.LibvirtConfigGuestClock()
if os_type == 'windows':
LOG.info(_LI('Configuring timezone for windows instance to '
'localtime'))
clk.offset = 'localtime'
else:
clk.offset = 'utc'
guest.set_clock(clk)
if virt_type == "kvm":
self._set_kvm_timers(clk, os_type, image_meta)
def _set_kvm_timers(self, clk, os_type, image_meta):
# TODO(berrange) One day this should be per-guest
# OS type configurable
tmpit = vconfig.LibvirtConfigGuestTimer()
tmpit.name = "pit"
tmpit.tickpolicy = "delay"
tmrtc = vconfig.LibvirtConfigGuestTimer()
tmrtc.name = "rtc"
tmrtc.tickpolicy = "catchup"
clk.add_timer(tmpit)
clk.add_timer(tmrtc)
guestarch = libvirt_utils.get_arch(image_meta)
if guestarch in (arch.I686, arch.X86_64):
# NOTE(rfolco): HPET is a hardware timer for x86 arch.
# qemu -no-hpet is not supported on non-x86 targets.
tmhpet = vconfig.LibvirtConfigGuestTimer()
tmhpet.name = "hpet"
tmhpet.present = False
clk.add_timer(tmhpet)
# With new enough QEMU we can provide Windows guests
# with the paravirtualized hyperv timer source. This
# is the windows equiv of kvm-clock, allowing Windows
# guests to accurately keep time.
if (os_type == 'windows' and
self._host.has_min_version(MIN_LIBVIRT_HYPERV_TIMER_VERSION,
MIN_QEMU_HYPERV_TIMER_VERSION)):
tmhyperv = vconfig.LibvirtConfigGuestTimer()
tmhyperv.name = "hypervclock"
tmhyperv.present = True
clk.add_timer(tmhyperv)
def _set_features(self, guest, os_type, caps, virt_type):
if virt_type == "xen":
# PAE only makes sense in X86
if caps.host.cpu.arch in (arch.I686, arch.X86_64):
guest.features.append(vconfig.LibvirtConfigGuestFeaturePAE())
if (virt_type not in ("lxc", "uml", "parallels", "xen") or
(virt_type == "xen" and guest.os_type == vm_mode.HVM)):
guest.features.append(vconfig.LibvirtConfigGuestFeatureACPI())
guest.features.append(vconfig.LibvirtConfigGuestFeatureAPIC())
if (virt_type in ("qemu", "kvm") and
os_type == 'windows'):
hv = vconfig.LibvirtConfigGuestFeatureHyperV()
hv.relaxed = True
hv.spinlocks = True
# Increase spinlock retries - value recommended by
# KVM maintainers who certify Windows guests
# with Microsoft
hv.spinlock_retries = 8191
hv.vapic = True
guest.features.append(hv)
def _check_number_of_serial_console(self, num_ports):
virt_type = CONF.libvirt.virt_type
if (virt_type in ("kvm", "qemu") and
num_ports > ALLOWED_QEMU_SERIAL_PORTS):
raise exception.SerialPortNumberLimitExceeded(
allowed=ALLOWED_QEMU_SERIAL_PORTS, virt_type=virt_type)
def _create_serial_console_devices(self, guest, instance, flavor,
image_meta):
guest_arch = libvirt_utils.get_arch(image_meta)
if CONF.serial_console.enabled:
num_ports = hardware.get_number_of_serial_ports(
flavor, image_meta)
if guest_arch in (arch.S390, arch.S390X):
console_cls = vconfig.LibvirtConfigGuestConsole
else:
console_cls = vconfig.LibvirtConfigGuestSerial
self._check_number_of_serial_console(num_ports)
for port in six.moves.range(num_ports):
console = console_cls()
console.port = port
console.type = "tcp"
console.listen_host = (
CONF.serial_console.proxyclient_address)
console.listen_port = (
serial_console.acquire_port(
console.listen_host))
guest.add_device(console)
else:
# The QEMU 'pty' driver throws away any data if no
# client app is connected. Thus we can't get away
# with a single type=pty console. Instead we have
# to configure two separate consoles.
if guest_arch in (arch.S390, arch.S390X):
consolelog = vconfig.LibvirtConfigGuestConsole()
consolelog.target_type = "sclplm"
else:
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = "file"
consolelog.source_path = self._get_console_log_path(instance)
guest.add_device(consolelog)
def _add_video_driver(self, guest, image_meta, flavor):
VALID_VIDEO_DEVICES = ("vga", "cirrus", "vmvga", "xen", "qxl")
video = vconfig.LibvirtConfigGuestVideo()
# NOTE(ldbragst): The following logic sets the video.type
# depending on supported defaults given the architecture,
# virtualization type, and features. The video.type attribute can
# be overridden by the user with image_meta.properties, which
# is carried out in the next if statement below this one.
guestarch = libvirt_utils.get_arch(image_meta)
if guest.os_type == vm_mode.XEN:
video.type = 'xen'
elif CONF.libvirt.virt_type == 'parallels':
video.type = 'vga'
elif guestarch in (arch.PPC, arch.PPC64, arch.PPC64LE):
# NOTE(ldbragst): PowerKVM doesn't support 'cirrus' be default
# so use 'vga' instead when running on Power hardware.
video.type = 'vga'
elif CONF.spice.enabled:
video.type = 'qxl'
if image_meta.properties.get('hw_video_model'):
video.type = image_meta.properties.hw_video_model
if (video.type not in VALID_VIDEO_DEVICES):
raise exception.InvalidVideoMode(model=video.type)
# Set video memory, only if the flavor's limit is set
video_ram = image_meta.properties.get('hw_video_ram', 0)
max_vram = int(flavor.extra_specs.get('hw_video:ram_max_mb', 0))
if video_ram > max_vram:
raise exception.RequestedVRamTooHigh(req_vram=video_ram,
max_vram=max_vram)
if max_vram and video_ram:
video.vram = video_ram * units.Mi / units.Ki
guest.add_device(video)
def _add_qga_device(self, guest, instance):
qga = vconfig.LibvirtConfigGuestChannel()
qga.type = "unix"
qga.target_name = "org.qemu.guest_agent.0"
qga.source_path = ("/var/lib/libvirt/qemu/%s.%s.sock" %
("org.qemu.guest_agent.0", instance.name))
guest.add_device(qga)
def _add_rng_device(self, guest, flavor):
rng_device = vconfig.LibvirtConfigGuestRng()
rate_bytes = flavor.extra_specs.get('hw_rng:rate_bytes', 0)
period = flavor.extra_specs.get('hw_rng:rate_period', 0)
if rate_bytes:
rng_device.rate_bytes = int(rate_bytes)
rng_device.rate_period = int(period)
rng_path = CONF.libvirt.rng_dev_path
if (rng_path and not os.path.exists(rng_path)):
raise exception.RngDeviceNotExist(path=rng_path)
rng_device.backend = rng_path
guest.add_device(rng_device)
def _set_qemu_guest_agent(self, guest, flavor, instance, image_meta):
# Enable qga only if the 'hw_qemu_guest_agent' is equal to yes
if image_meta.properties.get('hw_qemu_guest_agent', False):
LOG.debug("Qemu guest agent is enabled through image "
"metadata", instance=instance)
self._add_qga_device(guest, instance)
rng_is_virtio = image_meta.properties.get('hw_rng_model') == 'virtio'
rng_allowed_str = flavor.extra_specs.get('hw_rng:allowed', '')
rng_allowed = strutils.bool_from_string(rng_allowed_str)
if rng_is_virtio and rng_allowed:
self._add_rng_device(guest, flavor)
def _get_guest_memory_backing_config(
self, inst_topology, numatune, flavor):
wantsmempages = False
if inst_topology:
for cell in inst_topology.cells:
if cell.pagesize:
wantsmempages = True
break
wantsrealtime = hardware.is_realtime_enabled(flavor)
membacking = None
if wantsmempages:
pages = self._get_memory_backing_hugepages_support(
inst_topology, numatune)
if pages:
membacking = vconfig.LibvirtConfigGuestMemoryBacking()
membacking.hugepages = pages
if wantsrealtime:
if not membacking:
membacking = vconfig.LibvirtConfigGuestMemoryBacking()
membacking.locked = True
membacking.sharedpages = False
return membacking
def _get_memory_backing_hugepages_support(self, inst_topology, numatune):
if not self._has_hugepage_support():
# We should not get here, since we should have avoided
# reporting NUMA topology from _get_host_numa_topology
# in the first place. Just in case of a scheduler
# mess up though, raise an exception
raise exception.MemoryPagesUnsupported()
host_topology = self._get_host_numa_topology()
if host_topology is None:
# As above, we should not get here but just in case...
raise exception.MemoryPagesUnsupported()
# Currently libvirt does not support the smallest
# pagesize set as a backend memory.
# https://bugzilla.redhat.com/show_bug.cgi?id=1173507
avail_pagesize = [page.size_kb
for page in host_topology.cells[0].mempages]
avail_pagesize.sort()
smallest = avail_pagesize[0]
pages = []
for guest_cellid, inst_cell in enumerate(inst_topology.cells):
if inst_cell.pagesize and inst_cell.pagesize > smallest:
for memnode in numatune.memnodes:
if guest_cellid == memnode.cellid:
page = (
vconfig.LibvirtConfigGuestMemoryBackingPage())
page.nodeset = [guest_cellid]
page.size_kb = inst_cell.pagesize
pages.append(page)
break # Quit early...
return pages
def _get_flavor(self, ctxt, instance, flavor):
if flavor is not None:
return flavor
return instance.flavor
def _has_uefi_support(self):
# This means that the host can support uefi booting for guests
supported_archs = [arch.X86_64, arch.AARCH64]
caps = self._host.get_capabilities()
return ((caps.host.cpu.arch in supported_archs) and
self._host.has_min_version(MIN_LIBVIRT_UEFI_VERSION) and
os.path.exists(DEFAULT_UEFI_LOADER_PATH[caps.host.cpu.arch]))
def _get_supported_perf_events(self):
if (len(CONF.libvirt.enabled_perf_events) == 0 or
not self._host.has_min_version(MIN_LIBVIRT_PERF_VERSION)):
return []
supported_events = []
host_cpu_info = self._get_cpu_info()
for event in CONF.libvirt.enabled_perf_events:
if self._supported_perf_event(event, host_cpu_info['features']):
supported_events.append(event)
return supported_events
def _supported_perf_event(self, event, cpu_features):
libvirt_perf_event_name = LIBVIRT_PERF_EVENT_PREFIX + event.upper()
if not hasattr(libvirt, libvirt_perf_event_name):
LOG.warning(_LW("Libvirt doesn't support event type %s."),
event)
return False
if (event in PERF_EVENTS_CPU_FLAG_MAPPING
and PERF_EVENTS_CPU_FLAG_MAPPING[event] not in cpu_features):
LOG.warning(_LW("Host does not support event type %s."), event)
return False
return True
def _configure_guest_by_virt_type(self, guest, virt_type, caps, instance,
image_meta, flavor, root_device_name):
if virt_type == "xen":
if guest.os_type == vm_mode.HVM:
guest.os_loader = CONF.libvirt.xen_hvmloader_path
elif virt_type in ("kvm", "qemu"):
if caps.host.cpu.arch in (arch.I686, arch.X86_64):
guest.sysinfo = self._get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
hw_firmware_type = image_meta.properties.get('hw_firmware_type')
if hw_firmware_type == fields.FirmwareType.UEFI:
if self._has_uefi_support():
global uefi_logged
if not uefi_logged:
LOG.warning(_LW("uefi support is without some kind of "
"functional testing and therefore "
"considered experimental."))
uefi_logged = True
guest.os_loader = DEFAULT_UEFI_LOADER_PATH[
caps.host.cpu.arch]
guest.os_loader_type = "pflash"
else:
raise exception.UEFINotSupported()
guest.os_mach_type = self._get_machine_type(image_meta, caps)
if image_meta.properties.get('hw_boot_menu') is None:
guest.os_bootmenu = strutils.bool_from_string(
flavor.extra_specs.get('hw:boot_menu', 'no'))
else:
guest.os_bootmenu = image_meta.properties.hw_boot_menu
elif virt_type == "lxc":
guest.os_init_path = "/sbin/init"
guest.os_cmdline = CONSOLE
elif virt_type == "uml":
guest.os_kernel = "/usr/bin/linux"
guest.os_root = root_device_name
elif virt_type == "parallels":
if guest.os_type == vm_mode.EXE:
guest.os_init_path = "/sbin/init"
def _conf_non_lxc_uml(self, virt_type, guest, root_device_name, rescue,
instance, inst_path, image_meta, disk_info):
if rescue:
self._set_guest_for_rescue(rescue, guest, inst_path, virt_type,
root_device_name)
elif instance.kernel_id:
self._set_guest_for_inst_kernel(instance, guest, inst_path,
virt_type, root_device_name,
image_meta)
else:
guest.os_boot_dev = blockinfo.get_boot_order(disk_info)
def _create_consoles(self, virt_type, guest, instance, flavor, image_meta,
caps):
if virt_type in ("qemu", "kvm"):
# Create the serial console char devices
self._create_serial_console_devices(guest, instance, flavor,
image_meta)
if caps.host.cpu.arch in (arch.S390, arch.S390X):
consolepty = vconfig.LibvirtConfigGuestConsole()
consolepty.target_type = "sclp"
else:
consolepty = vconfig.LibvirtConfigGuestSerial()
else:
consolepty = vconfig.LibvirtConfigGuestConsole()
return consolepty
def _cpu_config_to_vcpu_model(self, cpu_config, vcpu_model):
"""Update VirtCPUModel object according to libvirt CPU config.
:param:cpu_config: vconfig.LibvirtConfigGuestCPU presenting the
instance's virtual cpu configuration.
:param:vcpu_model: VirtCPUModel object. A new object will be created
if None.
:return: Updated VirtCPUModel object, or None if cpu_config is None
"""
if not cpu_config:
return
if not vcpu_model:
vcpu_model = objects.VirtCPUModel()
vcpu_model.arch = cpu_config.arch
vcpu_model.vendor = cpu_config.vendor
vcpu_model.model = cpu_config.model
vcpu_model.mode = cpu_config.mode
vcpu_model.match = cpu_config.match
if cpu_config.sockets:
vcpu_model.topology = objects.VirtCPUTopology(
sockets=cpu_config.sockets,
cores=cpu_config.cores,
threads=cpu_config.threads)
else:
vcpu_model.topology = None
features = [objects.VirtCPUFeature(
name=f.name,
policy=f.policy) for f in cpu_config.features]
vcpu_model.features = features
return vcpu_model
def _vcpu_model_to_cpu_config(self, vcpu_model):
"""Create libvirt CPU config according to VirtCPUModel object.
:param:vcpu_model: VirtCPUModel object.
:return: vconfig.LibvirtConfigGuestCPU.
"""
cpu_config = vconfig.LibvirtConfigGuestCPU()
cpu_config.arch = vcpu_model.arch
cpu_config.model = vcpu_model.model
cpu_config.mode = vcpu_model.mode
cpu_config.match = vcpu_model.match
cpu_config.vendor = vcpu_model.vendor
if vcpu_model.topology:
cpu_config.sockets = vcpu_model.topology.sockets
cpu_config.cores = vcpu_model.topology.cores
cpu_config.threads = vcpu_model.topology.threads
if vcpu_model.features:
for f in vcpu_model.features:
xf = vconfig.LibvirtConfigGuestCPUFeature()
xf.name = f.name
xf.policy = f.policy
cpu_config.features.add(xf)
return cpu_config
def _get_guest_config(self, instance, network_info, image_meta,
disk_info, rescue=None, block_device_info=None,
context=None):
"""Get config data for parameters.
:param rescue: optional dictionary that should contain the key
'ramdisk_id' if a ramdisk is needed for the rescue image and
'kernel_id' if a kernel is needed for the rescue image.
"""
LOG.warn("_get_guest_config.............instance:%s" % instance)
flavor = instance.flavor
inst_path = libvirt_utils.get_instance_path(instance)
disk_mapping = disk_info['mapping']
virt_type = CONF.libvirt.virt_type
guest = vconfig.LibvirtConfigGuest()
LOG.warn("guest----------------------%s" % guest)
guest.virt_type = virt_type
guest.name = instance.name
guest.uuid = instance.uuid
# We are using default unit for memory: KiB
guest.memory = flavor.memory_mb * units.Ki
guest.vcpus = flavor.vcpus
allowed_cpus = hardware.get_vcpu_pin_set()
pci_devs = pci_manager.get_instance_pci_devs(instance, 'all')
guest_numa_config = self._get_guest_numa_config(
instance.numa_topology, flavor, allowed_cpus, image_meta)
guest.cpuset = guest_numa_config.cpuset
guest.cputune = guest_numa_config.cputune
guest.numatune = guest_numa_config.numatune
guest.membacking = self._get_guest_memory_backing_config(
instance.numa_topology,
guest_numa_config.numatune,
flavor)
guest.metadata.append(self._get_guest_config_meta(context,
instance))
guest.idmaps = self._get_guest_idmaps()
for event in self._supported_perf_events:
guest.add_perf_event(event)
self._update_guest_cputune(guest, flavor, virt_type)
guest.cpu = self._get_guest_cpu_config(
flavor, image_meta, guest_numa_config.numaconfig,
instance.numa_topology)
# Notes(yjiang5): we always sync the instance's vcpu model with
# the corresponding config file.
instance.vcpu_model = self._cpu_config_to_vcpu_model(
guest.cpu, instance.vcpu_model)
if 'root' in disk_mapping:
root_device_name = block_device.prepend_dev(
disk_mapping['root']['dev'])
else:
root_device_name = None
if root_device_name:
# NOTE(yamahata):
# for nova.api.ec2.cloud.CloudController.get_metadata()
instance.root_device_name = root_device_name
guest.os_type = (vm_mode.get_from_instance(instance) or
self._get_guest_os_type(virt_type))
caps = self._host.get_capabilities()
self._configure_guest_by_virt_type(guest, virt_type, caps, instance,
image_meta, flavor,
root_device_name)
if virt_type not in ('lxc', 'uml'):
self._conf_non_lxc_uml(virt_type, guest, root_device_name, rescue,
instance, inst_path, image_meta, disk_info)
self._set_features(guest, instance.os_type, caps, virt_type)
self._set_clock(guest, instance.os_type, image_meta, virt_type)
storage_configs = self._get_guest_storage_config(
instance, image_meta, disk_info, rescue, block_device_info,
flavor, guest.os_type)
for config in storage_configs:
guest.add_device(config)
#import pdb
#pdb.set_trace()
if self.has_cdrom(instance,disk_info) is None:
cdrom = vconfig.LibvirtConfigGuestDisk()
cdrom.source_type = 'file'
cdrom.source_device = 'cdrom'
cdrom.target_bus = 'ide'
cdrom.target_dev = 'hdc'
cdrom.driver_name = 'qemu'
cdrom.driver_format = 'raw'
def is_iso_image_active(context, fake_image_id):
active_iso_images, flug = libvirt_utils.get_active_images(context, 'iso')
if flug:
fake_active_iso_images = []
for image in active_iso_images:
fake_active_iso_images.append(
hashlib.sha1(image).hexdigest())
if fake_image_id in fake_active_iso_images:
return True
else:
return False
else:
return True
try:
#exist_cdroms = self._list_cdrom(instance)
exist_cdroms = self.cdrom_list(instance)
found_instance = True
except:
found_instance = False
if found_instance:
if exist_cdroms:
image_id = exist_cdroms[0].get('image_id', '')
if image_id:
if not imagecache.iso_base_file_exists(image_id):
image_id = ''
if (image_id and not is_iso_image_active(context, image_id)):
imagecache.remove_base_image(image_id)
image_id = ''
else:
image_id = ''
else:
disk_format = getattr(image_meta, 'disk_format', '')
if disk_format == 'iso':
image_id = image_meta.get('id', '')
if not image_id:
image_id = image_meta['properties'].get('base_image_ref', '')
if image_id:
image_info = {}
image_info['image_id'] = image_id
image_id = imagecache.get_cache_fname(image_info, 'image_id')
else:
image_id = ''
if image_id != '':
base_url = self.image_cache_manager._get_base()
image_url = os.path.join(base_url, image_id)
else:
image_url = ''
cdrom.source_path = image_url
guest.add_device(cdrom)
for vif in network_info:
config = self.vif_driver.get_config(
instance, vif, image_meta,
flavor, virt_type, self._host)
guest.add_device(config)
consolepty = self._create_consoles(virt_type, guest, instance, flavor,
image_meta, caps)
if virt_type != 'parallels':
consolepty.type = "pty"
guest.add_device(consolepty)
pointer = self._get_guest_pointer_model(guest.os_type, image_meta)
if pointer:
guest.add_device(pointer)
if (CONF.spice.enabled and CONF.spice.agent_enabled and
virt_type not in ('lxc', 'uml', 'xen')):
channel = vconfig.LibvirtConfigGuestChannel()
channel.target_name = "com.redhat.spice.0"
guest.add_device(channel)
# NB some versions of libvirt support both SPICE and VNC
# at the same time. We're not trying to second guess which
# those versions are. We'll just let libvirt report the
# errors appropriately if the user enables both.
add_video_driver = False
if ((CONF.vnc.enabled and
virt_type not in ('lxc', 'uml'))):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "vnc"
graphics.passwd = "%s" % instance.get("cipher", "00000")
graphics.keymap = CONF.vnc.keymap
graphics.listen = CONF.vnc.vncserver_listen
guest.add_device(graphics)
add_video_driver = True
if (CONF.spice.enabled and
virt_type not in ('lxc', 'uml', 'xen')):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.passwd = "%s" % instance.get("cipher", "00000")
graphics.keymap = CONF.spice.keymap
graphics.listen = CONF.spice.server_listen
guest.add_device(graphics)
add_video_driver = True
if add_video_driver:
self._add_video_driver(guest, image_meta, flavor)
# Qemu guest agent only support 'qemu' and 'kvm' hypervisor
if virt_type in ('qemu', 'kvm'):
self._set_qemu_guest_agent(guest, flavor, instance, image_meta)
if virt_type in ('xen', 'qemu', 'kvm'):
for pci_dev in pci_manager.get_instance_pci_devs(instance):
guest.add_device(self._get_guest_pci_device(pci_dev))
else:
if len(pci_devs) > 0:
raise exception.PciDeviceUnsupportedHypervisor(
type=virt_type)
if 'hw_watchdog_action' in flavor.extra_specs:
LOG.warning(_LW('Old property name "hw_watchdog_action" is now '
'deprecated and will be removed in the next release. '
'Use updated property name '
'"hw:watchdog_action" instead'), instance=instance)
# TODO(pkholkin): accepting old property name 'hw_watchdog_action'
# should be removed in the next release
watchdog_action = (flavor.extra_specs.get('hw_watchdog_action') or
flavor.extra_specs.get('hw:watchdog_action')
or 'disabled')
watchdog_action = image_meta.properties.get('hw_watchdog_action',
watchdog_action)
# NB(sross): currently only actually supported by KVM/QEmu
if watchdog_action != 'disabled':
if watchdog_actions.is_valid_watchdog_action(watchdog_action):
bark = vconfig.LibvirtConfigGuestWatchdog()
bark.action = watchdog_action
guest.add_device(bark)
else:
raise exception.InvalidWatchdogAction(action=watchdog_action)
# Memory balloon device only support 'qemu/kvm' and 'xen' hypervisor
if (virt_type in ('xen', 'qemu', 'kvm') and
CONF.libvirt.mem_stats_period_seconds > 0):
balloon = vconfig.LibvirtConfigMemoryBalloon()
if virt_type in ('qemu', 'kvm'):
balloon.model = 'virtio'
else:
balloon.model = 'xen'
balloon.period = CONF.libvirt.mem_stats_period_seconds
guest.add_device(balloon)
return guest
def _get_guest_pointer_model(self, os_type, image_meta):
pointer_model = image_meta.properties.get(
'hw_pointer_model', CONF.pointer_model)
if pointer_model is None and CONF.libvirt.use_usb_tablet:
# TODO(sahid): We set pointer_model to keep compatibility
# until the next release O*. It means operators can continue
# to use the deprecated option "use_usb_tablet" or set a
# specific device to use
pointer_model = "usbtablet"
LOG.warning(_LW('The option "use_usb_tablet" has been '
'deprecated for Newton in favor of the more '
'generic "pointer_model". Please update '
'nova.conf to address this change.'))
if pointer_model == "usbtablet":
# We want a tablet if VNC is enabled, or SPICE is enabled and
# the SPICE agent is disabled. If the SPICE agent is enabled
# it provides a paravirt mouse which drastically reduces
# overhead (by eliminating USB polling).
if CONF.vnc.enabled or (
CONF.spice.enabled and not CONF.spice.agent_enabled):
return self._get_guest_usb_tablet(os_type)
else:
if CONF.pointer_model or CONF.libvirt.use_usb_tablet:
# For backward compatibility We don't want to break
# process of booting an instance if host is configured
# to use USB tablet without VNC or SPICE and SPICE
# agent disable.
LOG.warning(_LW('USB tablet requested for guests by host '
'configuration. In order to accept this '
'request VNC should be enabled or SPICE '
'and SPICE agent disabled on host.'))
else:
raise exception.UnsupportedPointerModelRequested(
model="usbtablet")
def _get_guest_usb_tablet(self, os_type):
tablet = None
if os_type == vm_mode.HVM:
tablet = vconfig.LibvirtConfigGuestInput()
tablet.type = "tablet"
tablet.bus = "usb"
else:
if CONF.pointer_model or CONF.libvirt.use_usb_tablet:
# For backward compatibility We don't want to break
# process of booting an instance if virtual machine mode
# is not configured as HVM.
LOG.warning(_LW('USB tablet requested for guests by host '
'configuration. In order to accept this '
'request the machine mode should be '
'configured as HVM.'))
else:
raise exception.UnsupportedPointerModelRequested(
model="usbtablet")
return tablet
def _get_guest_xml(self, context, instance, network_info, disk_info,
image_meta, rescue=None,
block_device_info=None, write_to_disk=False):
# NOTE(danms): Stringifying a NetworkInfo will take a lock. Do
# this ahead of time so that we don't acquire it while also
# holding the logging lock.
network_info_str = str(network_info)
msg = ('Start _get_guest_xml '
'network_info=%(network_info)s '
'disk_info=%(disk_info)s '
'image_meta=%(image_meta)s rescue=%(rescue)s '
'block_device_info=%(block_device_info)s' %
{'network_info': network_info_str, 'disk_info': disk_info,
'image_meta': image_meta, 'rescue': rescue,
'block_device_info': block_device_info})
# NOTE(mriedem): block_device_info can contain auth_password so we
# need to sanitize the password in the message.
LOG.debug(strutils.mask_password(msg), instance=instance)
conf = self._get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info,
context)
xml = conf.to_xml()
if write_to_disk:
instance_dir = libvirt_utils.get_instance_path(instance)
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
LOG.debug('End _get_guest_xml xml=%(xml)s',
{'xml': xml}, instance=instance)
return xml
def get_info(self, instance):
"""Retrieve information from libvirt for a specific instance name.
If a libvirt error is encountered during lookup, we might raise a
NotFound exception or Error exception depending on how severe the
libvirt error is.
"""
guest = self._host.get_guest(instance)
# Kind of ugly but we need to pass host to get_info as for a
# workaround, see libvirt/compat.py
return guest.get_info(self._host)
def _create_domain_setup_lxc(self, instance, image_meta,
block_device_info, disk_info):
inst_path = libvirt_utils.get_instance_path(instance)
disk_info = disk_info or {}
disk_mapping = disk_info.get('mapping', {})
if self._is_booted_from_volume(instance, disk_mapping):
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
root_disk = block_device.get_root_bdm(block_device_mapping)
disk_info = blockinfo.get_info_from_bdm(
instance, CONF.libvirt.virt_type, image_meta, root_disk)
self._connect_volume(root_disk['connection_info'], disk_info)
disk_path = root_disk['connection_info']['data']['device_path']
# NOTE(apmelton) - Even though the instance is being booted from a
# cinder volume, it is still presented as a local block device.
# LocalBlockImage is used here to indicate that the instance's
# disk is backed by a local block device.
image_model = imgmodel.LocalBlockImage(disk_path)
else:
image = self.image_backend.image(instance, 'disk')
image_model = image.get_model(self._conn)
container_dir = os.path.join(inst_path, 'rootfs')
fileutils.ensure_tree(container_dir)
rootfs_dev = disk_api.setup_container(image_model,
container_dir=container_dir)
try:
# Save rootfs device to disconnect it when deleting the instance
if rootfs_dev:
instance.system_metadata['rootfs_device_name'] = rootfs_dev
if CONF.libvirt.uid_maps or CONF.libvirt.gid_maps:
id_maps = self._get_guest_idmaps()
libvirt_utils.chown_for_id_maps(container_dir, id_maps)
except Exception:
with excutils.save_and_reraise_exception():
self._create_domain_cleanup_lxc(instance)
def _create_domain_cleanup_lxc(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
try:
state = self.get_info(instance).state
except exception.InstanceNotFound:
# The domain may not be present if the instance failed to start
state = None
if state == power_state.RUNNING:
# NOTE(uni): Now the container is running with its own private
# mount namespace and so there is no need to keep the container
# rootfs mounted in the host namespace
LOG.debug('Attempting to unmount container filesystem: %s',
container_dir, instance=instance)
disk_api.clean_lxc_namespace(container_dir=container_dir)
else:
disk_api.teardown_container(container_dir=container_dir)
@contextlib.contextmanager
def _lxc_disk_handler(self, instance, image_meta,
block_device_info, disk_info):
"""Context manager to handle the pre and post instance boot,
LXC specific disk operations.
An image or a volume path will be prepared and setup to be
used by the container, prior to starting it.
The disk will be disconnected and unmounted if a container has
failed to start.
"""
if CONF.libvirt.virt_type != 'lxc':
yield
return
self._create_domain_setup_lxc(instance, image_meta,
block_device_info, disk_info)
try:
yield
finally:
self._create_domain_cleanup_lxc(instance)
# TODO(sahid): Consider renaming this to _create_guest.
def _create_domain(self, xml=None, domain=None,
power_on=True, pause=False, post_xml_callback=None):
"""Create a domain.
Either domain or xml must be passed in. If both are passed, then
the domain definition is overwritten from the xml.
:returns guest.Guest: Guest just created
"""
if xml:
guest = libvirt_guest.Guest.create(xml, self._host)
if post_xml_callback is not None:
post_xml_callback()
else:
guest = libvirt_guest.Guest(domain)
if power_on or pause:
guest.launch(pause=pause)
if not utils.is_neutron():
guest.enable_hairpin()
return guest
def _neutron_failed_callback(self, event_name, instance):
LOG.error(_LE('Neutron Reported failure on event '
'%(event)s for instance %(uuid)s'),
{'event': event_name, 'uuid': instance.uuid},
instance=instance)
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
def _get_neutron_events(self, network_info):
# NOTE(danms): We need to collect any VIFs that are currently
# down that we expect a down->up event for. Anything that is
# already up will not undergo that transition, and for
# anything that might be stale (cache-wise) assume it's
# already up so we don't block on it.
return [('network-vif-plugged', vif['id'])
for vif in network_info if vif.get('active', True) is False]
def _create_domain_and_network(self, context, xml, instance, network_info,
disk_info, block_device_info=None,
power_on=True, reboot=False,
vifs_already_plugged=False,
post_xml_callback=None):
"""Do required network setup and create domain."""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
if (not reboot and 'data' in connection_info and
'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
timeout = CONF.vif_plugging_timeout
if (self._conn_supports_start_paused and
utils.is_neutron() and not
vifs_already_plugged and power_on and timeout):
events = self._get_neutron_events(network_info)
else:
events = []
pause = bool(events)
guest = None
try:
with self.virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
error_callback=self._neutron_failed_callback):
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance,
network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
with self._lxc_disk_handler(instance, instance.image_meta,
block_device_info, disk_info):
guest = self._create_domain(
xml, pause=pause, power_on=power_on,
post_xml_callback=post_xml_callback)
self.firewall_driver.apply_instance_filter(instance,
network_info)
except exception.VirtualInterfaceCreateException:
# Neutron reported failure and we didn't swallow it, so
# bail here
with excutils.save_and_reraise_exception():
if guest:
guest.poweroff()
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info)
except eventlet.timeout.Timeout:
# We never heard from Neutron
LOG.warning(_LW('Timeout waiting for vif plugging callback for '
'instance %(uuid)s'), {'uuid': instance.uuid},
instance=instance)
if CONF.vif_plugging_is_fatal:
if guest:
guest.poweroff()
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info)
raise exception.VirtualInterfaceCreateException()
# Resume only if domain has been paused
if pause:
guest.resume()
return guest
def _get_vcpu_total(self):
"""Get available vcpu number of physical computer.
:returns: the number of cpu core instances can be used.
"""
try:
total_pcpus = self._host.get_cpu_count()
except libvirt.libvirtError:
LOG.warning(_LW("Cannot get the number of cpu, because this "
"function is not implemented for this platform. "))
return 0
if not CONF.vcpu_pin_set:
return total_pcpus
available_ids = hardware.get_vcpu_pin_set()
# We get the list of online CPUs on the host and see if the requested
# set falls under these. If not, we retain the old behavior.
online_pcpus = None
try:
online_pcpus = self._host.get_online_cpus()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
LOG.warning(
_LW("Couldn't retrieve the online CPUs due to a Libvirt "
"error: %(error)s with error code: %(error_code)s"),
{'error': ex, 'error_code': error_code})
if online_pcpus:
if not (available_ids <= online_pcpus):
msg = (_("Invalid vcpu_pin_set config, one or more of the "
"specified cpuset is not online. Online cpuset(s): "
"%(online)s, requested cpuset(s): %(req)s"),
{'online': sorted(online_pcpus),
'req': sorted(available_ids)})
raise exception.Invalid(msg)
elif sorted(available_ids)[-1] >= total_pcpus:
raise exception.Invalid(_("Invalid vcpu_pin_set config, "
"out of hypervisor cpu range."))
return len(available_ids)
@staticmethod
def _get_local_gb_info():
"""Get local storage info of the compute node in GB.
:returns: A dict containing:
:total: How big the overall usable filesystem is (in gigabytes)
:free: How much space is free (in gigabytes)
:used: How much space is used (in gigabytes)
"""
if CONF.libvirt.images_type == 'lvm':
info = lvm.get_volume_group_info(
CONF.libvirt.images_volume_group)
elif CONF.libvirt.images_type == 'rbd':
info = LibvirtDriver._get_rbd_driver().get_pool_info()
else:
info = libvirt_utils.get_fs_info(CONF.instances_path)
for (k, v) in six.iteritems(info):
info[k] = v / units.Gi
return info
def _get_vcpu_used(self):
"""Get vcpu usage number of physical computer.
:returns: The total number of vcpu(s) that are currently being used.
"""
total = 0
if CONF.libvirt.virt_type == 'lxc':
return total + 1
for guest in self._host.list_guests():
try:
vcpus = guest.get_vcpus_info()
if vcpus is not None:
total += len(list(vcpus))
except libvirt.libvirtError as e:
LOG.warning(
_LW("couldn't obtain the vcpu count from domain id:"
" %(uuid)s, exception: %(ex)s"),
{"uuid": guest.uuid, "ex": e})
# NOTE(gtt116): give other tasks a chance.
greenthread.sleep(0)
return total
def _get_instance_capabilities(self):
"""Get hypervisor instance capabilities
Returns a list of tuples that describe instances the
hypervisor is capable of hosting. Each tuple consists
of the triplet (arch, hypervisor_type, vm_mode).
:returns: List of tuples describing instance capabilities
"""
caps = self._host.get_capabilities()
instance_caps = list()
for g in caps.guests:
for dt in g.domtype:
instance_cap = (
arch.canonicalize(g.arch),
hv_type.canonicalize(dt),
vm_mode.canonicalize(g.ostype))
instance_caps.append(instance_cap)
return instance_caps
def _get_cpu_info(self):
"""Get cpuinfo information.
Obtains cpu feature from virConnect.getCapabilities.
:return: see above description
"""
caps = self._host.get_capabilities()
cpu_info = dict()
cpu_info['arch'] = caps.host.cpu.arch
cpu_info['model'] = caps.host.cpu.model
cpu_info['vendor'] = caps.host.cpu.vendor
topology = dict()
topology['cells'] = len(getattr(caps.host.topology, 'cells', [1]))
topology['sockets'] = caps.host.cpu.sockets
topology['cores'] = caps.host.cpu.cores
topology['threads'] = caps.host.cpu.threads
cpu_info['topology'] = topology
features = set()
for f in caps.host.cpu.features:
features.add(f.name)
cpu_info['features'] = features
return cpu_info
def _get_pcidev_info(self, devname):
"""Returns a dict of PCI device."""
def _get_device_type(cfgdev, pci_address):
"""Get a PCI device's device type.
An assignable PCI device can be a normal PCI device,
a SR-IOV Physical Function (PF), or a SR-IOV Virtual
Function (VF). Only normal PCI devices or SR-IOV VFs
are assignable, while SR-IOV PFs are always owned by
hypervisor.
"""
for fun_cap in cfgdev.pci_capability.fun_capability:
if fun_cap.type == 'virt_functions':
return {
'dev_type': fields.PciDeviceType.SRIOV_PF,
}
if (fun_cap.type == 'phys_function' and
len(fun_cap.device_addrs) != 0):
phys_address = "%04x:%02x:%02x.%01x" % (
fun_cap.device_addrs[0][0],
fun_cap.device_addrs[0][1],
fun_cap.device_addrs[0][2],
fun_cap.device_addrs[0][3])
return {
'dev_type': fields.PciDeviceType.SRIOV_VF,
'parent_addr': phys_address,
}
# Note(moshele): libvirt < 1.3 reported virt_functions capability
# only when VFs are enabled. The check below is a workaround
# to get the correct report regardless of whether or not any
# VFs are enabled for the device.
if not self._host.has_min_version(
MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION):
is_physical_function = pci_utils.is_physical_function(
*pci_utils.get_pci_address_fields(pci_address))
if is_physical_function:
return {'dev_type': fields.PciDeviceType.SRIOV_PF}
return {'dev_type': fields.PciDeviceType.STANDARD}
virtdev = self._host.device_lookup_by_name(devname)
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
address = "%04x:%02x:%02x.%1x" % (
cfgdev.pci_capability.domain,
cfgdev.pci_capability.bus,
cfgdev.pci_capability.slot,
cfgdev.pci_capability.function)
device = {
"dev_id": cfgdev.name,
"address": address,
"product_id": "%04x" % cfgdev.pci_capability.product_id,
"vendor_id": "%04x" % cfgdev.pci_capability.vendor_id,
}
device["numa_node"] = cfgdev.pci_capability.numa_node
# requirement by DataBase Model
device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device
device.update(_get_device_type(cfgdev, address))
return device
def _get_pci_passthrough_devices(self):
"""Get host PCI devices information.
Obtains pci devices information from libvirt, and returns
as a JSON string.
Each device information is a dictionary, with mandatory keys
of 'address', 'vendor_id', 'product_id', 'dev_type', 'dev_id',
'label' and other optional device specific information.
Refer to the objects/pci_device.py for more idea of these keys.
:returns: a JSON string containing a list of the assignable PCI
devices information
"""
# Bail early if we know we can't support `listDevices` to avoid
# repeated warnings within a periodic task
if not getattr(self, '_list_devices_supported', True):
return jsonutils.dumps([])
try:
dev_names = self._host.list_pci_devices() or []
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
self._list_devices_supported = False
LOG.warning(_LW("URI %(uri)s does not support "
"listDevices: %(error)s"),
{'uri': self._uri(), 'error': ex})
return jsonutils.dumps([])
else:
raise
pci_info = []
for name in dev_names:
pci_info.append(self._get_pcidev_info(name))
return jsonutils.dumps(pci_info)
def _has_numa_support(self):
# This means that the host can support LibvirtConfigGuestNUMATune
# and the nodeset field in LibvirtConfigGuestMemoryBackingPage
for ver in BAD_LIBVIRT_NUMA_VERSIONS:
if self._host.has_version(ver):
if not getattr(self, '_bad_libvirt_numa_version_warn', False):
LOG.warning(_LW('You are running with libvirt version %s '
'which is known to have broken NUMA support. '
'Consider patching or updating libvirt on '
'this host if you need NUMA support.'),
self._version_to_string(ver))
self._bad_libvirt_numa_version_warn = True
return False
support_matrix = {(arch.I686, arch.X86_64): MIN_LIBVIRT_NUMA_VERSION,
(arch.PPC64,
arch.PPC64LE): MIN_LIBVIRT_NUMA_VERSION_PPC}
caps = self._host.get_capabilities()
is_supported = False
for archs, libvirt_ver in support_matrix.items():
if ((caps.host.cpu.arch in archs) and
self._host.has_min_version(libvirt_ver,
MIN_QEMU_NUMA_HUGEPAGE_VERSION,
host.HV_DRIVER_QEMU)):
is_supported = True
return is_supported
def _has_hugepage_support(self):
# This means that the host can support multiple values for the size
# field in LibvirtConfigGuestMemoryBackingPage
supported_archs = [arch.I686, arch.X86_64, arch.PPC64LE, arch.PPC64]
caps = self._host.get_capabilities()
return ((caps.host.cpu.arch in supported_archs) and
self._host.has_min_version(MIN_LIBVIRT_HUGEPAGE_VERSION,
MIN_QEMU_NUMA_HUGEPAGE_VERSION,
host.HV_DRIVER_QEMU))
def _get_host_numa_topology(self):
if not self._has_numa_support():
return
caps = self._host.get_capabilities()
topology = caps.host.topology
if topology is None or not topology.cells:
return
cells = []
allowed_cpus = hardware.get_vcpu_pin_set()
online_cpus = self._host.get_online_cpus()
if allowed_cpus:
allowed_cpus &= online_cpus
else:
allowed_cpus = online_cpus
def _get_reserved_memory_for_cell(self, cell_id, page_size):
cell = self._reserved_hugepages.get(cell_id, {})
return cell.get(page_size, 0)
for cell in topology.cells:
cpuset = set(cpu.id for cpu in cell.cpus)
siblings = sorted(map(set,
set(tuple(cpu.siblings)
if cpu.siblings else ()
for cpu in cell.cpus)
))
cpuset &= allowed_cpus
siblings = [sib & allowed_cpus for sib in siblings]
# Filter out singles and empty sibling sets that may be left
siblings = [sib for sib in siblings if len(sib) > 1]
mempages = []
if self._has_hugepage_support():
mempages = [
objects.NUMAPagesTopology(
size_kb=pages.size,
total=pages.total,
used=0,
reserved=_get_reserved_memory_for_cell(
self, cell.id, pages.size))
for pages in cell.mempages]
cell = objects.NUMACell(id=cell.id, cpuset=cpuset,
memory=cell.memory / units.Ki,
cpu_usage=0, memory_usage=0,
siblings=siblings,
pinned_cpus=set([]),
mempages=mempages)
cells.append(cell)
return objects.NUMATopology(cells=cells)
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.
"""
vol_usage = []
for instance_bdms in compute_host_bdms:
instance = instance_bdms['instance']
for bdm in instance_bdms['instance_bdms']:
mountpoint = bdm['device_name']
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
volume_id = bdm['volume_id']
LOG.debug("Trying to get stats for the volume %s",
volume_id, instance=instance)
vol_stats = self.block_stats(instance, mountpoint)
if vol_stats:
stats = dict(volume=volume_id,
instance=instance,
rd_req=vol_stats[0],
rd_bytes=vol_stats[1],
wr_req=vol_stats[2],
wr_bytes=vol_stats[3])
LOG.debug(
"Got volume usage stats for the volume=%(volume)s,"
" rd_req=%(rd_req)d, rd_bytes=%(rd_bytes)d, "
"wr_req=%(wr_req)d, wr_bytes=%(wr_bytes)d",
stats, instance=instance)
vol_usage.append(stats)
return vol_usage
def block_stats(self, instance, disk_id):
"""Note that this function takes an instance name."""
try:
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove domain at the end.
domain = guest._domain
return domain.blockStats(disk_id)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.info(_LI('Getting block stats failed, device might have '
'been detached. Instance=%(instance_name)s '
'Disk=%(disk)s Code=%(errcode)s Error=%(e)s'),
{'instance_name': instance.name, 'disk': disk_id,
'errcode': errcode, 'e': e},
instance=instance)
except exception.InstanceNotFound:
LOG.info(_LI('Could not find domain in libvirt for instance %s. '
'Cannot get block stats for device'), instance.name,
instance=instance)
def get_console_pool_info(self, console_type):
# TODO(mdragon): console proxy should be implemented for libvirt,
# in case someone wants to use it with kvm or
# such. For now return fake data.
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: unused in this driver
:returns: dictionary containing resource info
"""
disk_info_dict = self._get_local_gb_info()
data = {}
# NOTE(dprince): calling capabilities before getVersion works around
# an initialization issue with some versions of Libvirt (1.0.5.5).
# See: https://bugzilla.redhat.com/show_bug.cgi?id=1000116
# See: https://bugs.launchpad.net/nova/+bug/1215593
data["supported_instances"] = self._get_instance_capabilities()
data["vcpus"] = self._get_vcpu_total()
data["memory_mb"] = self._host.get_memory_mb_total()
data["local_gb"] = disk_info_dict['total']
data["vcpus_used"] = self._get_vcpu_used()
data["memory_mb_used"] = self._host.get_memory_mb_used()
data["local_gb_used"] = disk_info_dict['used']
data["hypervisor_type"] = self._host.get_driver_type()
data["hypervisor_version"] = self._host.get_version()
data["hypervisor_hostname"] = self._host.get_hostname()
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
# so we could just return the raw capabilities XML
# which 'compare_cpu' could use directly
#
# That said, arch_filter.py now seems to rely on
# the libvirt drivers format which suggests this
# data format needs to be standardized across drivers
data["cpu_info"] = jsonutils.dumps(self._get_cpu_info())
disk_free_gb = disk_info_dict['free']
disk_over_committed = self._get_disk_over_committed_size_total()
available_least = disk_free_gb * units.Gi - disk_over_committed
data['disk_available_least'] = available_least / units.Gi
data['pci_passthrough_devices'] = \
self._get_pci_passthrough_devices()
numa_topology = self._get_host_numa_topology()
if numa_topology:
data['numa_topology'] = numa_topology._to_json()
else:
data['numa_topology'] = None
return data
def check_instance_shared_storage_local(self, context, instance):
"""Check if instance files located on shared storage.
This runs check on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.objects.instance.Instance object
:returns:
- tempfile: A dict containing the tempfile info on the destination
host
- None:
1. If the instance path is not existing.
2. If the image backend is shared block storage type.
"""
if self.image_backend.backend().is_shared_block_storage():
return None
dirpath = libvirt_utils.get_instance_path(instance)
if not os.path.exists(dirpath):
return None
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug("Creating tmpfile %s to verify with other "
"compute node that the instance is on "
"the same shared storage.",
tmp_file, instance=instance)
os.close(fd)
return {"filename": tmp_file}
def check_instance_shared_storage_remote(self, context, data):
return os.path.exists(data['filename'])
def check_instance_shared_storage_cleanup(self, context, data):
fileutils.delete_if_exists(data["filename"])
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a LibvirtLiveMigrateData object
"""
disk_available_gb = dst_compute_info['disk_available_least']
disk_available_mb = (
(disk_available_gb * units.Ki) - CONF.reserved_host_disk_mb)
# Compare CPU
if not instance.vcpu_model or not instance.vcpu_model.model:
source_cpu_info = src_compute_info['cpu_info']
self._compare_cpu(None, source_cpu_info, instance)
else:
self._compare_cpu(instance.vcpu_model, None, instance)
# Create file on storage, to be checked on source host
filename = self._create_shared_storage_test_file(instance)
data = objects.LibvirtLiveMigrateData()
data.filename = filename
data.image_type = CONF.libvirt.images_type
# Notes(eliqiao): block_migration and disk_over_commit are not
# nullable, so just don't set them if they are None
if block_migration is not None:
data.block_migration = block_migration
if disk_over_commit is not None:
data.disk_over_commit = disk_over_commit
data.disk_available_mb = disk_available_mb
return data
def cleanup_live_migration_destination_check(self, context,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param context: security context
"""
filename = dest_check_data.filename
self._cleanup_shared_storage_test_file(filename)
def check_can_live_migrate_source(self, context, instance,
dest_check_data,
block_device_info=None):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
:param block_device_info: result of _get_instance_block_device_info
:returns: a LibvirtLiveMigrateData object
"""
if not isinstance(dest_check_data, migrate_data_obj.LiveMigrateData):
md_obj = objects.LibvirtLiveMigrateData()
md_obj.from_legacy_dict(dest_check_data)
dest_check_data = md_obj
# Checking shared storage connectivity
# if block migration, instances_paths should not be on shared storage.
source = CONF.host
dest_check_data.is_shared_instance_path = (
self._check_shared_storage_test_file(
dest_check_data.filename, instance))
dest_check_data.is_shared_block_storage = (
self._is_shared_block_storage(instance, dest_check_data,
block_device_info))
disk_info_text = self.get_instance_disk_info(
instance, block_device_info=block_device_info)
booted_from_volume = self._is_booted_from_volume(instance,
disk_info_text)
has_local_disk = self._has_local_disk(instance, disk_info_text)
if 'block_migration' not in dest_check_data:
dest_check_data.block_migration = (
not dest_check_data.is_on_shared_storage())
if dest_check_data.block_migration:
# TODO(eliqiao): Once block_migration flag is removed from the API
# we can safely remove the if condition
if dest_check_data.is_on_shared_storage():
reason = _("Block migration can not be used "
"with shared storage.")
raise exception.InvalidLocalStorage(reason=reason, path=source)
if 'disk_over_commit' in dest_check_data:
self._assert_dest_node_has_enough_disk(context, instance,
dest_check_data.disk_available_mb,
dest_check_data.disk_over_commit,
block_device_info)
if block_device_info:
bdm = block_device_info.get('block_device_mapping')
# NOTE(pkoniszewski): libvirt from version 1.2.17 upwards
# supports selective block device migration. It means that it
# is possible to define subset of block devices to be copied
# during migration. If they are not specified - block devices
# won't be migrated. However, it does not work when live
# migration is tunnelled through libvirt.
if bdm and not self._host.has_min_version(
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION):
# NOTE(stpierre): if this instance has mapped volumes,
# we can't do a block migration, since that will result
# in volumes being copied from themselves to themselves,
# which is a recipe for disaster.
ver = ".".join([str(x) for x in
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION])
msg = (_('Cannot block migrate instance %(uuid)s with'
' mapped volumes. Selective block device'
' migration feature requires libvirt version'
' %(libvirt_ver)s') %
{'uuid': instance.uuid, 'libvirt_ver': ver})
LOG.error(msg, instance=instance)
raise exception.MigrationPreCheckError(reason=msg)
# NOTE(eliqiao): Selective disk migrations are not supported
# with tunnelled block migrations so we can block them early.
if (bdm and
(self._block_migration_flags &
libvirt.VIR_MIGRATE_TUNNELLED != 0)):
msg = (_('Cannot block migrate instance %(uuid)s with'
' mapped volumes. Selective block device'
' migration is not supported with tunnelled'
' block migrations.') % {'uuid': instance.uuid})
LOG.error(msg, instance=instance)
raise exception.MigrationPreCheckError(reason=msg)
elif not (dest_check_data.is_shared_block_storage or
dest_check_data.is_shared_instance_path or
(booted_from_volume and not has_local_disk)):
reason = _("Live migration can not be used "
"without shared storage except "
"a booted from volume VM which "
"does not have a local disk.")
raise exception.InvalidSharedStorage(reason=reason, path=source)
# NOTE(mikal): include the instance directory name here because it
# doesn't yet exist on the destination but we want to force that
# same name to be used
instance_path = libvirt_utils.get_instance_path(instance,
relative=True)
dest_check_data.instance_relative_path = instance_path
return dest_check_data
def _is_shared_block_storage(self, instance, dest_check_data,
block_device_info=None):
"""Check if all block storage of an instance can be shared
between source and destination of a live migration.
Returns true if the instance is volume backed and has no local disks,
or if the image backend is the same on source and destination and the
backend shares block storage between compute nodes.
:param instance: nova.objects.instance.Instance object
:param dest_check_data: dict with boolean fields image_type,
is_shared_instance_path, and is_volume_backed
"""
if (dest_check_data.obj_attr_is_set('image_type') and
CONF.libvirt.images_type == dest_check_data.image_type and
self.image_backend.backend().is_shared_block_storage()):
# NOTE(dgenin): currently true only for RBD image backend
return True
if (dest_check_data.is_shared_instance_path and
self.image_backend.backend().is_file_in_instance_path()):
# NOTE(angdraug): file based image backends (Flat, Qcow2)
# place block device files under the instance path
return True
if (dest_check_data.is_volume_backed and
not bool(jsonutils.loads(
self.get_instance_disk_info(instance,
block_device_info)))):
return True
return False
def _assert_dest_node_has_enough_disk(self, context, instance,
available_mb, disk_over_commit,
block_device_info=None):
"""Checks if destination has enough disk for block migration."""
# Libvirt supports qcow2 disk format,which is usually compressed
# on compute nodes.
# Real disk image (compressed) may enlarged to "virtual disk size",
# that is specified as the maximum disk size.
# (See qemu-img -f path-to-disk)
# Scheduler recognizes destination host still has enough disk space
# if real disk size < available disk size
# if disk_over_commit is True,
# otherwise virtual disk size < available disk size.
available = 0
if available_mb:
available = available_mb * units.Mi
ret = self.get_instance_disk_info(instance,
block_device_info=block_device_info)
disk_infos = jsonutils.loads(ret)
necessary = 0
if disk_over_commit:
for info in disk_infos:
necessary += int(info['disk_size'])
else:
for info in disk_infos:
necessary += int(info['virt_disk_size'])
# Check that available disk > necessary disk
if (available - necessary) < 0:
reason = (_('Unable to migrate %(instance_uuid)s: '
'Disk of instance is too large(available'
' on destination host:%(available)s '
'< need:%(necessary)s)') %
{'instance_uuid': instance.uuid,
'available': available,
'necessary': necessary})
raise exception.MigrationPreCheckError(reason=reason)
def _compare_cpu(self, guest_cpu, host_cpu_str, instance):
"""Check the host is compatible with the requested CPU
:param guest_cpu: nova.objects.VirtCPUModel or None
:param host_cpu_str: JSON from _get_cpu_info() method
If the 'guest_cpu' parameter is not None, this will be
validated for migration compatibility with the host.
Otherwise the 'host_cpu_str' JSON string will be used for
validation.
:returns:
None. if given cpu info is not compatible to this server,
raise exception.
"""
# NOTE(kchamart): Comparing host to guest CPU model for emulated
# guests (<domain type='qemu'>) should not matter -- in this
# mode (QEMU "TCG") the CPU is fully emulated in software and no
# hardware acceleration, like KVM, is involved. So, skip the CPU
# compatibility check for the QEMU domain type, and retain it for
# KVM guests.
if CONF.libvirt.virt_type not in ['kvm']:
return
if guest_cpu is None:
info = jsonutils.loads(host_cpu_str)
LOG.info(_LI('Instance launched has CPU info: %s'), host_cpu_str)
cpu = vconfig.LibvirtConfigCPU()
cpu.arch = info['arch']
cpu.model = info['model']
cpu.vendor = info['vendor']
cpu.sockets = info['topology']['sockets']
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
else:
cpu = self._vcpu_model_to_cpu_config(guest_cpu)
u = ("http://libvirt.org/html/libvirt-libvirt-host.html#"
"virCPUCompareResult")
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
# unknown character exists in xml, then libvirt complains
try:
cpu_xml = cpu.to_xml()
LOG.debug("cpu compare xml: %s", cpu_xml, instance=instance)
ret = self._host.compare_cpu(cpu_xml)
except libvirt.libvirtError as e:
error_code = e.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.debug("URI %(uri)s does not support cpu comparison. "
"It will be proceeded though. Error: %(error)s",
{'uri': self._uri(), 'error': e})
return
else:
LOG.error(m, {'ret': e, 'u': u})
raise exception.MigrationPreCheckError(
reason=m % {'ret': e, 'u': u})
if ret <= 0:
LOG.error(m, {'ret': ret, 'u': u})
raise exception.InvalidCPUInfo(reason=m % {'ret': ret, 'u': u})
def _create_shared_storage_test_file(self, instance):
"""Makes tmpfile under CONF.instances_path."""
dirpath = CONF.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
"the same storage.", tmp_file, instance=instance)
os.close(fd)
return os.path.basename(tmp_file)
def _check_shared_storage_test_file(self, filename, instance):
"""Confirms existence of the tmpfile under CONF.instances_path.
Cannot confirm tmpfile return False.
"""
# NOTE(tpatzig): if instances_path is a shared volume that is
# under heavy IO (many instances on many compute nodes),
# then checking the existence of the testfile fails,
# just because it takes longer until the client refreshes and new
# content gets visible.
# os.utime (like touch) on the directory forces the client to refresh.
os.utime(CONF.instances_path, None)
tmp_file = os.path.join(CONF.instances_path, filename)
if not os.path.exists(tmp_file):
exists = False
else:
exists = True
LOG.debug('Check if temp file %s exists to indicate shared storage '
'is being used for migration. Exists? %s', tmp_file, exists,
instance=instance)
return exists
def _cleanup_shared_storage_test_file(self, filename):
"""Removes existence of the tmpfile under CONF.instances_path."""
tmp_file = os.path.join(CONF.instances_path, filename)
os.remove(tmp_file)
def ensure_filtering_rules_for_instance(self, instance, network_info):
"""Ensure that an instance's filtering rules are enabled.
When migrating an instance, we need the filtering rules to
be configured on the destination host before starting the
migration.
Also, when restarting the compute service, we need to ensure
that filtering rules exist for all running services.
"""
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
timeout_count = list(range(CONF.live_migration_retry_count))
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance,
network_info):
break
timeout_count.pop()
if len(timeout_count) == 0:
msg = _('The firewall filter for %s does not exist')
raise exception.NovaException(msg % instance.name)
greenthread.sleep(1)
def filter_defer_apply_on(self):
self.firewall_driver.filter_defer_apply_on()
def filter_defer_apply_off(self):
self.firewall_driver.filter_defer_apply_off()
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Spawning live_migration operation for distributing high-load.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, do block migration.
:param migrate_data: a LibvirtLiveMigrateData object
"""
# 'dest' will be substituted into 'migration_uri' so ensure
# it does't contain any characters that could be used to
# exploit the URI accepted by libivrt
if not libvirt_utils.is_valid_hostname(dest):
raise exception.InvalidHostname(hostname=dest)
self._live_migration(context, instance, dest,
post_method, recover_method, block_migration,
migrate_data)
def live_migration_abort(self, instance):
"""Aborting a running live-migration.
:param instance: instance object that is in migration
"""
guest = self._host.get_guest(instance)
dom = guest._domain
try:
dom.abortJob()
except libvirt.libvirtError as e:
LOG.error(_LE("Failed to cancel migration %s"),
e, instance=instance)
raise
def _check_graphics_addresses_can_live_migrate(self, listen_addrs):
LOCAL_ADDRS = ('0.0.0.0', '127.0.0.1', '::', '::1')
local_vnc = CONF.vnc.vncserver_listen in LOCAL_ADDRS
local_spice = CONF.spice.server_listen in LOCAL_ADDRS
if ((CONF.vnc.enabled and not local_vnc) or
(CONF.spice.enabled and not local_spice)):
msg = _('Your libvirt version does not support the'
' VIR_DOMAIN_XML_MIGRATABLE flag or your'
' destination node does not support'
' retrieving listen addresses. In order'
' for live migration to work properly, you'
' must configure the graphics (VNC and/or'
' SPICE) listen addresses to be either'
' the catch-all address (0.0.0.0 or ::) or'
' the local address (127.0.0.1 or ::1).')
raise exception.MigrationError(reason=msg)
if listen_addrs:
dest_local_vnc = listen_addrs.get('vnc') in LOCAL_ADDRS
dest_local_spice = listen_addrs.get('spice') in LOCAL_ADDRS
if ((CONF.vnc.enabled and not dest_local_vnc) or
(CONF.spice.enabled and not dest_local_spice)):
LOG.warning(_LW('Your libvirt version does not support the'
' VIR_DOMAIN_XML_MIGRATABLE flag, and the'
' graphics (VNC and/or SPICE) listen'
' addresses on the destination node do not'
' match the addresses on the source node.'
' Since the source node has listen'
' addresses set to either the catch-all'
' address (0.0.0.0 or ::) or the local'
' address (127.0.0.1 or ::1), the live'
' migration will succeed, but the VM will'
' continue to listen on the current'
' addresses.'))
def _verify_serial_console_is_disabled(self):
if CONF.serial_console.enabled:
msg = _('Your libvirt version does not support the'
' VIR_DOMAIN_XML_MIGRATABLE flag or your'
' destination node does not support'
' retrieving listen addresses. In order'
' for live migration to work properly you'
' must either disable serial console or'
' upgrade your libvirt version.')
raise exception.MigrationError(reason=msg)
def _live_migration_operation(self, context, instance, dest,
block_migration, migrate_data, guest,
device_names):
"""Invoke the live migration operation
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param block_migration: if true, do block migration.
:param migrate_data: a LibvirtLiveMigrateData object
:param guest: the guest domain object
:param device_names: list of device names that are being migrated with
instance
This method is intended to be run in a background thread and will
block that thread until the migration is finished or failed.
"""
try:
if migrate_data.block_migration:
migration_flags = self._block_migration_flags
else:
migration_flags = self._live_migration_flags
listen_addrs = libvirt_migrate.graphics_listen_addrs(
migrate_data)
migratable_flag = self._host.is_migratable_xml_flag()
if not migratable_flag or not listen_addrs:
# In this context want to ensure we do not have to migrate
# graphic or serial consoles since we can't update guest's
# domain XML to make it handle destination host.
# TODO(alexs-h): These checks could be moved to the
# check_can_live_migrate_destination/source phase
self._check_graphics_addresses_can_live_migrate(listen_addrs)
self._verify_serial_console_is_disabled()
if ('target_connect_addr' in migrate_data and
migrate_data.target_connect_addr is not None):
dest = migrate_data.target_connect_addr
new_xml_str = None
params = None
if (self._host.is_migratable_xml_flag() and (
listen_addrs or migrate_data.bdms)):
new_xml_str = libvirt_migrate.get_updated_guest_xml(
# TODO(sahid): It's not a really well idea to pass
# the method _get_volume_config and we should to find
# a way to avoid this in future.
guest, migrate_data, self._get_volume_config)
if self._host.has_min_version(
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION):
params = {
'bandwidth': CONF.libvirt.live_migration_bandwidth,
'destination_xml': new_xml_str,
'migrate_disks': device_names,
}
# NOTE(pkoniszewski): Because of precheck which blocks
# tunnelled block live migration with mapped volumes we
# can safely remove migrate_disks when tunnelling is on.
# Otherwise we will block all tunnelled block migrations,
# even when an instance does not have volumes mapped.
# This is because selective disk migration is not
# supported in tunnelled block live migration. Also we
# cannot fallback to migrateToURI2 in this case because of
# bug #1398999
if (migration_flags &
libvirt.VIR_MIGRATE_TUNNELLED != 0):
params.pop('migrate_disks')
guest.migrate(self._live_migration_uri(dest),
flags=migration_flags,
params=params,
domain_xml=new_xml_str,
bandwidth=CONF.libvirt.live_migration_bandwidth)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Live Migration failure: %s"), e,
instance=instance)
# If 'migrateToURI' fails we don't know what state the
# VM instances on each host are in. Possibilities include
#
# 1. src==running, dst==none
#
# Migration failed & rolled back, or never started
#
# 2. src==running, dst==paused
#
# Migration started but is still ongoing
#
# 3. src==paused, dst==paused
#
# Migration data transfer completed, but switchover
# is still ongoing, or failed
#
# 4. src==paused, dst==running
#
# Migration data transfer completed, switchover
# happened but cleanup on source failed
#
# 5. src==none, dst==running
#
# Migration fully succeeded.
#
# Libvirt will aim to complete any migration operation
# or roll it back. So even if the migrateToURI call has
# returned an error, if the migration was not finished
# libvirt should clean up.
#
# So we take the error raise here with a pinch of salt
# and rely on the domain job info status to figure out
# what really happened to the VM, which is a much more
# reliable indicator.
#
# In particular we need to try very hard to ensure that
# Nova does not "forget" about the guest. ie leaving it
# running on a different host to the one recorded in
# the database, as that would be a serious resource leak
LOG.debug("Migration operation thread has finished",
instance=instance)
@staticmethod
def _migration_downtime_steps(data_gb):
'''Calculate downtime value steps and time between increases.
:param data_gb: total GB of RAM and disk to transfer
This looks at the total downtime steps and upper bound
downtime value and uses an exponential backoff. So initially
max downtime is increased by small amounts, and as time goes
by it is increased by ever larger amounts
For example, with 10 steps, 30 second step delay, 3 GB
of RAM and 400ms target maximum downtime, the downtime will
be increased every 90 seconds in the following progression:
- 0 seconds -> set downtime to 37ms
- 90 seconds -> set downtime to 38ms
- 180 seconds -> set downtime to 39ms
- 270 seconds -> set downtime to 42ms
- 360 seconds -> set downtime to 46ms
- 450 seconds -> set downtime to 55ms
- 540 seconds -> set downtime to 70ms
- 630 seconds -> set downtime to 98ms
- 720 seconds -> set downtime to 148ms
- 810 seconds -> set downtime to 238ms
- 900 seconds -> set downtime to 400ms
This allows the guest a good chance to complete migration
with a small downtime value.
'''
downtime = CONF.libvirt.live_migration_downtime
steps = CONF.libvirt.live_migration_downtime_steps
delay = CONF.libvirt.live_migration_downtime_delay
# TODO(hieulq): Need to move min/max value into the config option,
# currently oslo_config will raise ValueError instead of setting
# option value to its min/max.
if downtime < nova.conf.libvirt.LIVE_MIGRATION_DOWNTIME_MIN:
downtime = nova.conf.libvirt.LIVE_MIGRATION_DOWNTIME_MIN
if steps < nova.conf.libvirt.LIVE_MIGRATION_DOWNTIME_STEPS_MIN:
steps = nova.conf.libvirt.LIVE_MIGRATION_DOWNTIME_STEPS_MIN
if delay < nova.conf.libvirt.LIVE_MIGRATION_DOWNTIME_DELAY_MIN:
delay = nova.conf.libvirt.LIVE_MIGRATION_DOWNTIME_DELAY_MIN
delay = int(delay * data_gb)
offset = downtime / float(steps + 1)
base = (downtime - offset) ** (1 / float(steps))
for i in range(steps + 1):
yield (int(delay * i), int(offset + base ** i))
def _live_migration_copy_disk_paths(self, context, instance, guest):
'''Get list of disks to copy during migration
:param context: security context
:param instance: the instance being migrated
:param guest: the Guest instance being migrated
Get the list of disks to copy during migration.
:returns: a list of local source paths and a list of device names to
copy
'''
disk_paths = []
device_names = []
block_devices = []
# TODO(pkoniszewski): Remove version check when we bump min libvirt
# version to >= 1.2.17.
if (self._block_migration_flags &
libvirt.VIR_MIGRATE_TUNNELLED == 0 and
self._host.has_min_version(
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION)):
bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = driver.get_block_device_info(instance,
bdm_list)
block_device_mappings = driver.block_device_info_get_mapping(
block_device_info)
for bdm in block_device_mappings:
device_name = str(bdm['mount_device'].rsplit('/', 1)[1])
block_devices.append(device_name)
for dev in guest.get_all_disks():
if dev.readonly or dev.shareable:
continue
if dev.source_type not in ["file", "block"]:
continue
if dev.target_dev in block_devices:
continue
disk_paths.append(dev.source_path)
device_names.append(dev.target_dev)
return (disk_paths, device_names)
def _live_migration_data_gb(self, instance, disk_paths):
'''Calculate total amount of data to be transferred
:param instance: the nova.objects.Instance being migrated
:param disk_paths: list of disk paths that are being migrated
with instance
Calculates the total amount of data that needs to be
transferred during the live migration. The actual
amount copied will be larger than this, due to the
guest OS continuing to dirty RAM while the migration
is taking place. So this value represents the minimal
data size possible.
:returns: data size to be copied in GB
'''
ram_gb = instance.flavor.memory_mb * units.Mi / units.Gi
if ram_gb < 2:
ram_gb = 2
disk_gb = 0
for path in disk_paths:
try:
size = os.stat(path).st_size
size_gb = (size / units.Gi)
if size_gb < 2:
size_gb = 2
disk_gb += size_gb
except OSError as e:
LOG.warning(_LW("Unable to stat %(disk)s: %(ex)s"),
{'disk': path, 'ex': e})
# Ignore error since we don't want to break
# the migration monitoring thread operation
return ram_gb + disk_gb
def _get_migration_flags(self, is_block_migration):
if is_block_migration:
return self._block_migration_flags
return self._live_migration_flags
def _live_migration_monitor(self, context, instance, guest,
dest, post_method,
recover_method, block_migration,
migrate_data, finish_event,
disk_paths):
on_migration_failure = deque()
data_gb = self._live_migration_data_gb(instance, disk_paths)
downtime_steps = list(self._migration_downtime_steps(data_gb))
migration = migrate_data.migration
curdowntime = None
migration_flags = self._get_migration_flags(
migrate_data.block_migration)
n = 0
start = time.time()
progress_time = start
progress_watermark = None
previous_data_remaining = -1
is_post_copy_enabled = self._is_post_copy_enabled(migration_flags)
while True:
info = guest.get_job_info()
if info.type == libvirt.VIR_DOMAIN_JOB_NONE:
# Either still running, or failed or completed,
# lets untangle the mess
if not finish_event.ready():
LOG.debug("Operation thread is still running",
instance=instance)
else:
info.type = libvirt_migrate.find_job_type(guest, instance)
LOG.debug("Fixed incorrect job type to be %d",
info.type, instance=instance)
if info.type == libvirt.VIR_DOMAIN_JOB_NONE:
# Migration is not yet started
LOG.debug("Migration not running yet",
instance=instance)
elif info.type == libvirt.VIR_DOMAIN_JOB_UNBOUNDED:
# Migration is still running
#
# This is where we wire up calls to change live
# migration status. eg change max downtime, cancel
# the operation, change max bandwidth
libvirt_migrate.run_tasks(guest, instance,
self.active_migrations,
on_migration_failure,
migration,
is_post_copy_enabled)
now = time.time()
elapsed = now - start
if ((progress_watermark is None) or
(progress_watermark == 0) or
(progress_watermark > info.data_remaining)):
progress_watermark = info.data_remaining
progress_time = now
progress_timeout = CONF.libvirt.live_migration_progress_timeout
completion_timeout = int(
CONF.libvirt.live_migration_completion_timeout * data_gb)
if libvirt_migrate.should_abort(instance, now, progress_time,
progress_timeout, elapsed,
completion_timeout,
migration.status):
try:
guest.abort_job()
except libvirt.libvirtError as e:
LOG.warning(_LW("Failed to abort migration %s"),
e, instance=instance)
self._clear_empty_migration(instance)
raise
if (is_post_copy_enabled and
libvirt_migrate.should_switch_to_postcopy(
info.memory_iteration, info.data_remaining,
previous_data_remaining, migration.status)):
libvirt_migrate.trigger_postcopy_switch(guest,
instance,
migration)
previous_data_remaining = info.data_remaining
curdowntime = libvirt_migrate.update_downtime(
guest, instance, curdowntime,
downtime_steps, elapsed)
# We loop every 500ms, so don't log on every
# iteration to avoid spamming logs for long
# running migrations. Just once every 5 secs
# is sufficient for developers to debug problems.
# We log once every 30 seconds at info to help
# admins see slow running migration operations
# when debug logs are off.
if (n % 10) == 0:
# Ignoring memory_processed, as due to repeated
# dirtying of data, this can be way larger than
# memory_total. Best to just look at what's
# remaining to copy and ignore what's done already
#
# TODO(berrange) perhaps we could include disk
# transfer stats in the progress too, but it
# might make memory info more obscure as large
# disk sizes might dwarf memory size
remaining = 100
if info.memory_total != 0:
remaining = round(info.memory_remaining *
100 / info.memory_total)
libvirt_migrate.save_stats(instance, migration,
info, remaining)
lg = LOG.debug
if (n % 60) == 0:
lg = LOG.info
lg(_LI("Migration running for %(secs)d secs, "
"memory %(remaining)d%% remaining; "
"(bytes processed=%(processed_memory)d, "
"remaining=%(remaining_memory)d, "
"total=%(total_memory)d)"),
{"secs": n / 2, "remaining": remaining,
"processed_memory": info.memory_processed,
"remaining_memory": info.memory_remaining,
"total_memory": info.memory_total}, instance=instance)
if info.data_remaining > progress_watermark:
lg(_LI("Data remaining %(remaining)d bytes, "
"low watermark %(watermark)d bytes "
"%(last)d seconds ago"),
{"remaining": info.data_remaining,
"watermark": progress_watermark,
"last": (now - progress_time)}, instance=instance)
n = n + 1
elif info.type == libvirt.VIR_DOMAIN_JOB_COMPLETED:
# Migration is all done
LOG.info(_LI("Migration operation has completed"),
instance=instance)
post_method(context, instance, dest, block_migration,
migrate_data)
break
elif info.type == libvirt.VIR_DOMAIN_JOB_FAILED:
# Migration did not succeed
LOG.error(_LE("Migration operation has aborted"),
instance=instance)
libvirt_migrate.run_recover_tasks(self._host, guest, instance,
on_migration_failure)
recover_method(context, instance, dest, block_migration,
migrate_data)
break
elif info.type == libvirt.VIR_DOMAIN_JOB_CANCELLED:
# Migration was stopped by admin
LOG.warning(_LW("Migration operation was cancelled"),
instance=instance)
libvirt_migrate.run_recover_tasks(self._host, guest, instance,
on_migration_failure)
recover_method(context, instance, dest, block_migration,
migrate_data, migration_status='cancelled')
break
else:
LOG.warning(_LW("Unexpected migration job type: %d"),
info.type, instance=instance)
time.sleep(0.5)
self._clear_empty_migration(instance)
def _clear_empty_migration(self, instance):
try:
del self.active_migrations[instance.uuid]
except KeyError:
LOG.warning(_LW("There are no records in active migrations "
"for instance"), instance=instance)
def _live_migration(self, context, instance, dest, post_method,
recover_method, block_migration,
migrate_data):
"""Do live migration.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, do block migration.
:param migrate_data: a LibvirtLiveMigrateData object
This fires off a new thread to run the blocking migration
operation, and then this thread monitors the progress of
migration and controls its operation
"""
guest = self._host.get_guest(instance)
disk_paths = []
device_names = []
if migrate_data.block_migration:
disk_paths, device_names = self._live_migration_copy_disk_paths(
context, instance, guest)
opthread = utils.spawn(self._live_migration_operation,
context, instance, dest,
block_migration,
migrate_data, guest,
device_names)
finish_event = eventlet.event.Event()
self.active_migrations[instance.uuid] = deque()
def thread_finished(thread, event):
LOG.debug("Migration operation thread notification",
instance=instance)
event.send()
opthread.link(thread_finished, finish_event)
# Let eventlet schedule the new thread right away
time.sleep(0)
try:
LOG.debug("Starting monitoring of live migration",
instance=instance)
self._live_migration_monitor(context, instance, guest, dest,
post_method, recover_method,
block_migration, migrate_data,
finish_event, disk_paths)
except Exception as ex:
LOG.warning(_LW("Error monitoring migration: %(ex)s"),
{"ex": ex}, instance=instance, exc_info=True)
raise
finally:
LOG.debug("Live migration monitoring is all done",
instance=instance)
def _is_post_copy_enabled(self, migration_flags):
if self._is_post_copy_available():
if (migration_flags & libvirt.VIR_MIGRATE_POSTCOPY) != 0:
return True
return False
def live_migration_force_complete(self, instance):
try:
self.active_migrations[instance.uuid].append('force-complete')
except KeyError:
raise exception.NoActiveMigrationForInstance(
instance_id=instance.uuid)
def _try_fetch_image(self, context, path, image_id, instance,
fallback_from_host=None):
try:
libvirt_utils.fetch_image(context, path, image_id)
except exception.ImageNotFound:
if not fallback_from_host:
raise
LOG.debug("Image %(image_id)s doesn't exist anymore on "
"image service, attempting to copy image "
"from %(host)s",
{'image_id': image_id, 'host': fallback_from_host})
libvirt_utils.copy_image(src=path, dest=path,
host=fallback_from_host,
receive=True)
def _fetch_instance_kernel_ramdisk(self, context, instance,
fallback_from_host=None):
"""Download kernel and ramdisk for instance in instance directory."""
instance_dir = libvirt_utils.get_instance_path(instance)
if instance.kernel_id:
kernel_path = os.path.join(instance_dir, 'kernel')
# NOTE(dsanders): only fetch image if it's not available at
# kernel_path. This also avoids ImageNotFound exception if
# the image has been deleted from glance
if not os.path.exists(kernel_path):
self._try_fetch_image(context,
kernel_path,
instance.kernel_id,
instance, fallback_from_host)
if instance.ramdisk_id:
ramdisk_path = os.path.join(instance_dir, 'ramdisk')
# NOTE(dsanders): only fetch image if it's not available at
# ramdisk_path. This also avoids ImageNotFound exception if
# the image has been deleted from glance
if not os.path.exists(ramdisk_path):
self._try_fetch_image(context,
ramdisk_path,
instance.ramdisk_id,
instance, fallback_from_host)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
"""Clean up destination node after a failed live migration."""
try:
self.destroy(context, instance, network_info, block_device_info,
destroy_disks, migrate_data)
finally:
# NOTE(gcb): Failed block live migration may leave instance
# directory at destination node, ensure it is always deleted.
is_shared_instance_path = True
if migrate_data:
is_shared_instance_path = migrate_data.is_shared_instance_path
if not is_shared_instance_path:
instance_dir = libvirt_utils.get_instance_path_at_destination(
instance, migrate_data)
if os.path.exists(instance_dir):
shutil.rmtree(instance_dir)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data):
"""Preparation live migration."""
if disk_info is not None:
disk_info = jsonutils.loads(disk_info)
LOG.debug('migrate_data in pre_live_migration: %s', migrate_data,
instance=instance)
is_shared_block_storage = migrate_data.is_shared_block_storage
is_shared_instance_path = migrate_data.is_shared_instance_path
is_block_migration = migrate_data.block_migration
if not is_shared_instance_path:
instance_dir = libvirt_utils.get_instance_path_at_destination(
instance, migrate_data)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
LOG.debug('Creating instance directory: %s', instance_dir,
instance=instance)
os.mkdir(instance_dir)
# Recreate the disk.info file and in doing so stop the
# imagebackend from recreating it incorrectly by inspecting the
# contents of each file when using the Raw backend.
if disk_info:
image_disk_info = {}
for info in disk_info:
image_file = os.path.basename(info['path'])
image_path = os.path.join(instance_dir, image_file)
image_disk_info[image_path] = info['type']
LOG.debug('Creating disk.info with the contents: %s',
image_disk_info, instance=instance)
image_disk_info_path = os.path.join(instance_dir,
'disk.info')
libvirt_utils.write_to_file(image_disk_info_path,
jsonutils.dumps(image_disk_info))
if not is_shared_block_storage:
# Ensure images and backing files are present.
LOG.debug('Checking to make sure images and backing files are '
'present before live migration.', instance=instance)
self._create_images_and_backing(
context, instance, instance_dir, disk_info,
fallback_from_host=instance.host)
if (configdrive.required_by(instance) and
CONF.config_drive_format == 'iso9660'):
# NOTE(pkoniszewski): Due to a bug in libvirt iso config
# drive needs to be copied to destination prior to
# migration when instance path is not shared and block
# storage is not shared. Files that are already present
# on destination are excluded from a list of files that
# need to be copied to destination. If we don't do that
# live migration will fail on copying iso config drive to
# destination and writing to read-only device.
# Please see bug/1246201 for more details.
src = "%s:%s/disk.config" % (instance.host, instance_dir)
self._remotefs.copy_file(src, instance_dir)
if not is_block_migration:
# NOTE(angdraug): when block storage is shared between source
# and destination and instance path isn't (e.g. volume backed
# or rbd backed instance), instance path on destination has to
# be prepared
# Required by Quobyte CI
self._ensure_console_log_for_instance(instance)
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance)
# Establishing connection to volume server.
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
if len(block_device_mapping):
LOG.debug('Connecting volumes before live migration.',
instance=instance)
for bdm in block_device_mapping:
connection_info = bdm['connection_info']
disk_info = blockinfo.get_info_from_bdm(
instance, CONF.libvirt.virt_type,
instance.image_meta, bdm)
self._connect_volume(connection_info, disk_info)
# We call plug_vifs before the compute manager calls
# ensure_filtering_rules_for_instance, to ensure bridge is set up
# Retry operation is necessary because continuously request comes,
# concurrent request occurs to iptables, then it complains.
LOG.debug('Plugging VIFs before live migration.', instance=instance)
max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
self.plug_vifs(instance, network_info)
break
except processutils.ProcessExecutionError:
if cnt == max_retry - 1:
raise
else:
LOG.warning(_LW('plug_vifs() failed %(cnt)d. Retry up to '
'%(max_retry)d.'),
{'cnt': cnt,
'max_retry': max_retry},
instance=instance)
greenthread.sleep(1)
# Store vncserver_listen and latest disk device info
if not migrate_data:
migrate_data = objects.LibvirtLiveMigrateData(bdms=[])
else:
migrate_data.bdms = []
migrate_data.graphics_listen_addr_vnc = CONF.vnc.vncserver_listen
migrate_data.graphics_listen_addr_spice = CONF.spice.server_listen
migrate_data.serial_listen_addr = \
CONF.serial_console.proxyclient_address
# Store live_migration_inbound_addr
migrate_data.target_connect_addr = \
CONF.libvirt.live_migration_inbound_addr
migrate_data.supported_perf_events = self._supported_perf_events
for vol in block_device_mapping:
connection_info = vol['connection_info']
if connection_info.get('serial'):
disk_info = blockinfo.get_info_from_bdm(
instance, CONF.libvirt.virt_type,
instance.image_meta, vol)
bdmi = objects.LibvirtLiveMigrateBDMInfo()
bdmi.serial = connection_info['serial']
bdmi.connection_info = connection_info
bdmi.bus = disk_info['bus']
bdmi.dev = disk_info['dev']
bdmi.type = disk_info['type']
bdmi.format = disk_info.get('format')
bdmi.boot_index = disk_info.get('boot_index')
migrate_data.bdms.append(bdmi)
return migrate_data
def _try_fetch_image_cache(self, image, fetch_func, context, filename,
image_id, instance, size,
fallback_from_host=None):
try:
image.cache(fetch_func=fetch_func,
context=context,
filename=filename,
image_id=image_id,
size=size)
except exception.ImageNotFound:
if not fallback_from_host:
raise
LOG.debug("Image %(image_id)s doesn't exist anymore "
"on image service, attempting to copy "
"image from %(host)s",
{'image_id': image_id, 'host': fallback_from_host},
instance=instance)
def copy_from_host(target):
libvirt_utils.copy_image(src=target,
dest=target,
host=fallback_from_host,
receive=True)
image.cache(fetch_func=copy_from_host,
filename=filename)
def _create_images_and_backing(self, context, instance, instance_dir,
disk_info, fallback_from_host=None):
""":param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param instance_dir:
instance path to use, calculated externally to handle block
migrating an instance with an old style instance path
:param disk_info:
disk info specified in _get_instance_disk_info (list of dicts)
:param fallback_from_host:
host where we can retrieve images if the glance images are
not available.
"""
if not disk_info:
disk_info = []
for info in disk_info:
base = os.path.basename(info['path'])
# Get image type and create empty disk image, and
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file'] and not os.path.exists(instance_disk):
libvirt_utils.create_image(info['type'], instance_disk,
info['virt_disk_size'])
elif info['backing_file']:
# Creating backing file follows same way as spawning instances.
cache_name = os.path.basename(info['backing_file'])
image = self.image_backend.image(instance,
instance_disk,
CONF.libvirt.images_type)
if cache_name.startswith('ephemeral'):
# The argument 'size' is used by image.cache to
# validate disk size retrieved from cache against
# the instance disk size (should always return OK)
# and ephemeral_size is used by _create_ephemeral
# to build the image if the disk is not already
# cached.
image.cache(
fetch_func=self._create_ephemeral,
fs_label=cache_name,
os_type=instance.os_type,
filename=cache_name,
size=info['virt_disk_size'],
ephemeral_size=info['virt_disk_size'] / units.Gi)
elif cache_name.startswith('swap'):
inst_type = instance.get_flavor()
swap_mb = inst_type.swap
image.cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=swap_mb * units.Mi,
swap_mb=swap_mb)
else:
self._try_fetch_image_cache(image,
libvirt_utils.fetch_image,
context, cache_name,
instance.image_ref,
instance,
info['virt_disk_size'],
fallback_from_host)
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(
context, instance, fallback_from_host=fallback_from_host)
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
# Disconnect from volume server
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
connector = self.get_volume_connector(instance)
volume_api = self._volume_api
for vol in block_device_mapping:
# Retrieve connection info from Cinder's initialize_connection API.
# The info returned will be accurate for the source server.
volume_id = vol['connection_info']['serial']
connection_info = volume_api.initialize_connection(context,
volume_id,
connector)
# TODO(leeantho) The following multipath_id logic is temporary
# and will be removed in the future once os-brick is updated
# to handle multipath for drivers in a more efficient way.
# For now this logic is needed to ensure the connection info
# data is correct.
# Pull out multipath_id from the bdm information. The
# multipath_id can be placed into the connection info
# because it is based off of the volume and will be the
# same on the source and destination hosts.
if 'multipath_id' in vol['connection_info']['data']:
multipath_id = vol['connection_info']['data']['multipath_id']
connection_info['data']['multipath_id'] = multipath_id
disk_dev = vol['mount_device'].rpartition("/")[2]
self._disconnect_volume(connection_info, disk_dev)
def post_live_migration_at_source(self, context, instance, network_info):
"""Unplug VIFs from networks at source.
:param context: security context
:param instance: instance object reference
:param network_info: instance network information
"""
self.unplug_vifs(instance, network_info)
def post_live_migration_at_destination(self, context,
instance,
network_info,
block_migration=False,
block_device_info=None):
"""Post operation of live migration at destination host.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
# Define migrated instance, otherwise, suspend/destroy does not work.
# In case of block migration, destination does not have
# libvirt.xml
disk_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance,
instance.image_meta, block_device_info)
xml = self._get_guest_xml(context, instance,
network_info, disk_info,
instance.image_meta,
block_device_info=block_device_info,
write_to_disk=True)
self._host.write_instance_config(xml)
def _get_instance_disk_info(self, instance_name, xml,
block_device_info=None):
"""Get the non-volume disk information from the domain xml
:param str instance_name: the name of the instance (domain)
:param str xml: the libvirt domain xml for the instance
:param dict block_device_info: block device info for BDMs
:returns disk_info: list of dicts with keys:
* 'type': the disk type (str)
* 'path': the disk path (str)
* 'virt_disk_size': the virtual disk size (int)
* 'backing_file': backing file of a disk image (str)
* 'disk_size': physical disk size (int)
* 'over_committed_disk_size': virt_disk_size - disk_size or 0
"""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
volume_devices = set()
for vol in block_device_mapping:
disk_dev = vol['mount_device'].rpartition("/")[2]
volume_devices.add(disk_dev)
disk_info = []
doc = etree.fromstring(xml)
def find_nodes(doc, device_type):
return (doc.findall('.//devices/%s' % device_type),
doc.findall('.//devices/%s/source' % device_type),
doc.findall('.//devices/%s/driver' % device_type),
doc.findall('.//devices/%s/target' % device_type))
if (CONF.libvirt.virt_type == 'parallels' and
doc.find('os/type').text == vm_mode.EXE):
node_type = 'filesystem'
else:
node_type = 'disk'
(disk_nodes, path_nodes,
driver_nodes, target_nodes) = find_nodes(doc, node_type)
for cnt, path_node in enumerate(path_nodes):
disk_type = disk_nodes[cnt].get('type')
path = path_node.get('file') or path_node.get('dev')
if (node_type == 'filesystem'):
target = target_nodes[cnt].attrib['dir']
else:
target = target_nodes[cnt].attrib['dev']
if not path:
LOG.debug('skipping disk for %s as it does not have a path',
instance_name)
continue
if disk_type not in ['file', 'block']:
LOG.debug('skipping disk because it looks like a volume', path)
continue
if target in volume_devices:
LOG.debug('skipping disk %(path)s (%(target)s) as it is a '
'volume', {'path': path, 'target': target})
continue
# get the real disk size or
# raise a localized error if image is unavailable
if disk_type == 'file':
if driver_nodes[cnt].get('type') == 'ploop':
dk_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
dk_size += os.path.getsize(fp)
else:
dk_size = int(os.path.getsize(path))
elif disk_type == 'block' and block_device_info:
dk_size = lvm.get_volume_size(path)
else:
LOG.debug('skipping disk %(path)s (%(target)s) - unable to '
'determine if volume',
{'path': path, 'target': target})
continue
disk_type = driver_nodes[cnt].get('type')
if disk_type in ("qcow2", "ploop"):
backing_file = libvirt_utils.get_disk_backing_file(path)
virt_size = disk_api.get_disk_size(path)
over_commit_size = int(virt_size) - dk_size
else:
backing_file = ""
virt_size = dk_size
over_commit_size = 0
disk_info.append({'type': disk_type,
'path': path,
'virt_disk_size': virt_size,
'backing_file': backing_file,
'disk_size': dk_size,
'over_committed_disk_size': over_commit_size})
return disk_info
def get_instance_disk_info(self, instance,
block_device_info=None):
try:
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
LOG.warning(_LW('Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] '
'%(ex)s'),
{'instance_name': instance.name,
'error_code': error_code,
'ex': ex},
instance=instance)
raise exception.InstanceNotFound(instance_id=instance.uuid)
return jsonutils.dumps(
self._get_instance_disk_info(instance.name, xml,
block_device_info))
def _get_disk_over_committed_size_total(self):
"""Return total over committed disk size for all instances."""
# Disk size that all instance uses : virtual_size - disk_size
disk_over_committed_size = 0
instance_domains = self._host.list_instance_domains()
if not instance_domains:
return disk_over_committed_size
# Get all instance uuids
instance_uuids = [dom.UUIDString() for dom in instance_domains]
ctx = nova_context.get_admin_context()
# Get instance object list by uuid filter
filters = {'uuid': instance_uuids}
# NOTE(ankit): objects.InstanceList.get_by_filters method is
# getting called twice one is here and another in the
# _update_available_resource method of resource_tracker. Since
# _update_available_resource method is synchronized, there is a
# possibility the instances list retrieved here to calculate
# disk_over_committed_size would differ to the list you would get
# in _update_available_resource method for calculating usages based
# on instance utilization.
local_instance_list = objects.InstanceList.get_by_filters(
ctx, filters, use_slave=True)
# Convert instance list to dictionary with instace uuid as key.
local_instances = {inst.uuid: inst for inst in local_instance_list}
# Get bdms by instance uuids
bdms = objects.BlockDeviceMappingList.bdms_by_instance_uuid(
ctx, instance_uuids)
for dom in instance_domains:
try:
guest = libvirt_guest.Guest(dom)
xml = guest.get_xml_desc()
block_device_info = None
if guest.uuid in local_instances \
and (bdms and guest.uuid in bdms):
# Get block device info for instance
block_device_info = driver.get_block_device_info(
local_instances[guest.uuid], bdms[guest.uuid])
disk_infos = self._get_instance_disk_info(guest.name, xml,
block_device_info=block_device_info)
if not disk_infos:
continue
for info in disk_infos:
disk_over_committed_size += int(
info['over_committed_disk_size'])
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
LOG.warning(_LW(
'Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] %(ex)s'
), {'instance_name': guest.name,
'error_code': error_code,
'ex': ex})
except OSError as e:
if e.errno in (errno.ENOENT, errno.ESTALE):
LOG.warning(_LW('Periodic task is updating the host stat, '
'it is trying to get disk %(i_name)s, '
'but disk file was removed by concurrent '
'operations such as resize.'),
{'i_name': guest.name})
elif e.errno == errno.EACCES:
LOG.warning(_LW('Periodic task is updating the host stat, '
'it is trying to get disk %(i_name)s, '
'but access is denied. It is most likely '
'due to a VM that exists on the compute '
'node but is not managed by Nova.'),
{'i_name': guest.name})
else:
raise
except exception.VolumeBDMPathNotFound as e:
LOG.warning(_LW('Periodic task is updating the host stats, '
'it is trying to get disk info for %(i_name)s, '
'but the backing volume block device was removed '
'by concurrent operations such as resize. '
'Error: %(error)s'),
{'i_name': guest.name,
'error': e})
# NOTE(gtt116): give other tasks a chance.
greenthread.sleep(0)
return disk_over_committed_size
def unfilter_instance(self, instance, network_info):
"""See comments of same method in firewall_driver."""
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
def get_available_nodes(self, refresh=False):
return [self._host.get_hostname()]
def get_host_cpu_stats(self):
"""Return the current CPU state of the host."""
return self._host.get_cpu_stats()
def get_host_uptime(self):
"""Returns the result of calling "uptime"."""
out, err = utils.execute('env', 'LANG=C', 'uptime')
return out
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self.image_cache_manager.update(context, all_instances)
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize,
shared_storage=False):
"""Used only for cleanup in case migrate_disk_and_power_off fails."""
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
utils.execute('mv', inst_base_resize, inst_base)
if not shared_storage:
self._remotefs.remove_dir(dest, inst_base)
except Exception:
pass
def _is_storage_shared_with(self, dest, inst_base):
# NOTE (rmk): There are two methods of determining whether we are
# on the same filesystem: the source and dest IP are the
# same, or we create a file on the dest system via SSH
# and check whether the source system can also see it.
# NOTE (drwahl): Actually, there is a 3rd way: if images_type is rbd,
# it will always be shared storage
if CONF.libvirt.images_type == 'rbd':
return True
shared_storage = (dest == self.get_host_ip_addr())
if not shared_storage:
tmp_file = uuid.uuid4().hex + '.tmp'
tmp_path = os.path.join(inst_base, tmp_file)
try:
self._remotefs.create_file(dest, tmp_path)
if os.path.exists(tmp_path):
shared_storage = True
os.unlink(tmp_path)
else:
self._remotefs.remove_file(dest, tmp_path)
except Exception:
pass
return shared_storage
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
LOG.debug("Starting migrate_disk_and_power_off",
instance=instance)
ephemerals = driver.block_device_info_get_ephemerals(block_device_info)
# get_bdm_ephemeral_disk_size() will return 0 if the new
# instance's requested block device mapping contain no
# ephemeral devices. However, we still want to check if
# the original instance's ephemeral_gb property was set and
# ensure that the new requested flavor ephemeral size is greater
eph_size = (block_device.get_bdm_ephemeral_disk_size(ephemerals) or
instance.flavor.ephemeral_gb)
# Checks if the migration needs a disk resize down.
root_down = flavor.root_gb < instance.flavor.root_gb
ephemeral_down = flavor.ephemeral_gb < eph_size
disk_info_text = self.get_instance_disk_info(
instance, block_device_info=block_device_info)
booted_from_volume = self._is_booted_from_volume(instance,
disk_info_text)
if (root_down and not booted_from_volume) or ephemeral_down:
reason = _("Unable to resize disk down.")
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
disk_info = jsonutils.loads(disk_info_text)
# NOTE(dgenin): Migration is not implemented for LVM backed instances.
if CONF.libvirt.images_type == 'lvm' and not booted_from_volume:
reason = _("Migration is not supported for LVM backed instances")
raise exception.InstanceFaultRollback(
exception.MigrationPreCheckError(reason=reason))
# copy disks to destination
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
shared_storage = self._is_storage_shared_with(dest, inst_base)
# try to create the directory on the remote compute node
# if this fails we pass the exception up the stack so we can catch
# failures here earlier
if not shared_storage:
try:
self._remotefs.create_dir(dest, inst_base)
except processutils.ProcessExecutionError as e:
reason = _("not able to execute ssh command: %s") % e
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
self.power_off(instance, timeout, retry_interval)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self._disconnect_volume(connection_info, disk_dev)
try:
utils.execute('mv', inst_base, inst_base_resize)
# if we are migrating the instance with shared storage then
# create the directory. If it is a remote node the directory
# has already been created
if shared_storage:
dest = None
utils.execute('mkdir', '-p', inst_base)
on_execute = lambda process: \
self.job_tracker.add_job(instance, process.pid)
on_completion = lambda process: \
self.job_tracker.remove_job(instance, process.pid)
active_flavor = instance.get_flavor()
for info in disk_info:
# assume inst_base == dirname(info['path'])
img_path = info['path']
fname = os.path.basename(img_path)
from_path = os.path.join(inst_base_resize, fname)
# To properly resize the swap partition, it must be
# re-created with the proper size. This is acceptable
# because when an OS is shut down, the contents of the
# swap space are just garbage, the OS doesn't bother about
# what is in it.
# We will not copy over the swap disk here, and rely on
# finish_migration/_create_image to re-create it for us.
if not (fname == 'disk.swap' and
active_flavor.get('swap', 0) != flavor.get('swap', 0)):
compression = info['type'] not in NO_COMPRESSION_TYPES
libvirt_utils.copy_image(from_path, img_path, host=dest,
on_execute=on_execute,
on_completion=on_completion,
compression=compression)
# Ensure disk.info is written to the new path to avoid disks being
# reinspected and potentially changing format.
src_disk_info_path = os.path.join(inst_base_resize, 'disk.info')
if os.path.exists(src_disk_info_path):
dst_disk_info_path = os.path.join(inst_base, 'disk.info')
libvirt_utils.copy_image(src_disk_info_path,
dst_disk_info_path,
host=dest, on_execute=on_execute,
on_completion=on_completion)
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_remote_migration(dest, inst_base,
inst_base_resize,
shared_storage)
return disk_info_text
def _wait_for_running(self, instance):
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info(_LI("Instance running successfully."), instance=instance)
raise loopingcall.LoopingCallDone()
@staticmethod
def _disk_size_from_instance(instance, disk_name):
"""Determines the disk size from instance properties
Returns the disk size by using the disk name to determine whether it
is a root or an ephemeral disk, then by checking properties of the
instance returns the size converted to bytes.
Returns 0 if the disk name not match (disk, disk.local).
"""
if disk_name == 'disk':
size = instance.flavor.root_gb
elif disk_name == 'disk.local':
size = instance.flavor.ephemeral_gb
# N.B. We don't handle ephemeral disks named disk.ephN here,
# which is almost certainly a bug. It's not clear what this function
# should return if an instance has multiple ephemeral disks.
else:
size = 0
return size * units.Gi
@staticmethod
def _disk_raw_to_qcow2(path):
"""Converts a raw disk to qcow2."""
path_qcow = path + '_qcow'
utils.execute('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', path, path_qcow)
utils.execute('mv', path_qcow, path)
@staticmethod
def _disk_qcow2_to_raw(path):
"""Converts a qcow2 disk to raw."""
path_raw = path + '_raw'
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', path, path_raw)
utils.execute('mv', path_raw, path)
def _disk_resize(self, image, size):
"""Attempts to resize a disk to size
:param image: an instance of nova.virt.image.model.Image
Attempts to resize a disk by checking the capabilities and
preparing the format, then calling disk.api.extend.
Note: Currently only support disk extend.
"""
if not isinstance(image, imgmodel.LocalFileImage):
LOG.debug("Skipping resize of non-local image")
return
# If we have a non partitioned image that we can extend
# then ensure we're in 'raw' format so we can extend file system.
converted = False
if (size and
image.format == imgmodel.FORMAT_QCOW2 and
disk_api.can_resize_image(image.path, size) and
disk_api.is_image_extendable(image)):
self._disk_qcow2_to_raw(image.path)
converted = True
image = imgmodel.LocalFileImage(image.path,
imgmodel.FORMAT_RAW)
if size:
disk_api.extend(image, size)
if converted:
# back to qcow2 (no backing_file though) so that snapshot
# will be available
self._disk_raw_to_qcow2(image.path)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
LOG.debug("Starting finish_migration", instance=instance)
block_disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
block_device_info)
# assume _create_image does nothing if a target file exists.
# NOTE: This has the intended side-effect of fetching a missing
# backing file.
self._create_image(context, instance, block_disk_info['mapping'],
network_info=network_info,
block_device_info=None, inject_files=False,
fallback_from_host=migration.source_compute)
# Required by Quobyte CI
self._ensure_console_log_for_instance(instance)
gen_confdrive = functools.partial(self._create_configdrive,
context, instance,
network_info=network_info)
# Resize root disk and a single ephemeral disk called disk.local
# Also convert raw disks to qcow2 if migrating to host which uses
# qcow2 from host which uses raw.
# TODO(mbooth): Handle resize of multiple ephemeral disks, and
# ephemeral disks not called disk.local.
disk_info = jsonutils.loads(disk_info)
for info in disk_info:
path = info['path']
disk_name = os.path.basename(path)
size = self._disk_size_from_instance(instance, disk_name)
if resize_instance:
image = imgmodel.LocalFileImage(path, info['type'])
self._disk_resize(image, size)
# NOTE(mdbooth): The code below looks wrong, but is actually
# required to prevent a security hole when migrating from a host
# with use_cow_images=False to one with use_cow_images=True.
# Imagebackend uses use_cow_images to select between the
# atrociously-named-Raw and Qcow2 backends. The Qcow2 backend
# writes to disk.info, but does not read it as it assumes qcow2.
# Therefore if we don't convert raw to qcow2 here, a raw disk will
# be incorrectly assumed to be qcow2, which is a severe security
# flaw. The reverse is not true, because the atrociously-named-Raw
# backend supports both qcow2 and raw disks, and will choose
# appropriately between them as long as disk.info exists and is
# correctly populated, which it is because Qcow2 writes to
# disk.info.
#
# In general, we do not yet support format conversion during
# migration. For example:
# * Converting from use_cow_images=True to use_cow_images=False
# isn't handled. This isn't a security bug, but is almost
# certainly buggy in other cases, as the 'Raw' backend doesn't
# expect a backing file.
# * Converting to/from lvm and rbd backends is not supported.
#
# This behaviour is inconsistent, and therefore undesirable for
# users. It is tightly-coupled to implementation quirks of 2
# out of 5 backends in imagebackend and defends against a severe
# security flaw which is not at all obvious without deep analysis,
# and is therefore undesirable to developers. We should aim to
# remove it. This will not be possible, though, until we can
# represent the storage layout of a specific instance
# independent of the default configuration of the local compute
# host.
# Config disks are hard-coded to be raw even when
# use_cow_images=True (see _get_disk_config_image_type),so don't
# need to be converted.
if (disk_name != 'disk.config' and
info['type'] == 'raw' and CONF.use_cow_images):
self._disk_raw_to_qcow2(info['path'])
xml = self._get_guest_xml(context, instance, network_info,
block_disk_info, image_meta,
block_device_info=block_device_info,
write_to_disk=True)
# NOTE(mriedem): vifs_already_plugged=True here, regardless of whether
# or not we've migrated to another host, because we unplug VIFs locally
# and the status change in the port might go undetected by the neutron
# L2 agent (or neutron server) so neutron may not know that the VIF was
# unplugged in the first place and never send an event.
self._create_domain_and_network(context, xml, instance, network_info,
block_disk_info,
block_device_info=block_device_info,
power_on=power_on,
vifs_already_plugged=True,
post_xml_callback=gen_confdrive)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
LOG.debug("finish_migration finished successfully.", instance=instance)
def _cleanup_failed_migration(self, inst_base):
"""Make sure that a failed migrate doesn't prevent us from rolling
back in a revert.
"""
try:
shutil.rmtree(inst_base)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug("Starting finish_revert_migration",
instance=instance)
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
# NOTE(danms): if we're recovering from a failed migration,
# make sure we don't have a left-over same-host base directory
# that would conflict. Also, don't fail on the rename if the
# failure happened early.
if os.path.exists(inst_base_resize):
self._cleanup_failed_migration(inst_base)
utils.execute('mv', inst_base_resize, inst_base)
root_disk = self.image_backend.image(instance, 'disk')
# Once we rollback, the snapshot is no longer needed, so remove it
# TODO(nic): Remove the try/except/finally in a future release
# To avoid any upgrade issues surrounding instances being in pending
# resize state when the software is updated, this portion of the
# method logs exceptions rather than failing on them. Once it can be
# reasonably assumed that no such instances exist in the wild
# anymore, the try/except/finally should be removed,
# and ignore_errors should be set back to False (the default) so
# that problems throw errors, like they should.
if root_disk.exists():
try:
root_disk.rollback_to_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME)
except exception.SnapshotNotFound:
LOG.warning(_LW("Failed to rollback snapshot (%s)"),
libvirt_utils.RESIZE_SNAPSHOT_NAME)
finally:
root_disk.remove_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME,
ignore_errors=True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
instance.image_meta,
block_device_info)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
instance.image_meta,
block_device_info=block_device_info)
self._create_domain_and_network(context, xml, instance, network_info,
disk_info,
block_device_info=block_device_info,
power_on=power_on,
vifs_already_plugged=True)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
LOG.debug("finish_revert_migration finished successfully.",
instance=instance)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._cleanup_resize(instance, network_info)
@staticmethod
def _get_io_devices(xml_doc):
"""get the list of io devices from the xml document."""
result = {"volumes": [], "ifaces": []}
try:
doc = etree.fromstring(xml_doc)
except Exception:
return result
blocks = [('./devices/disk', 'volumes'),
('./devices/interface', 'ifaces')]
for block, key in blocks:
section = doc.findall(block)
for node in section:
for child in node.getchildren():
if child.tag == 'target' and child.get('dev'):
result[key].append(child.get('dev'))
return result
def get_diagnostics(self, instance):
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove domain at the end.
domain = guest._domain
output = {}
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
for vcpu in guest.get_vcpus_info():
output["cpu" + str(vcpu.id) + "_time"] = vcpu.time
except libvirt.libvirtError:
pass
# get io status
xml = guest.get_xml_desc()
dom_io = LibvirtDriver._get_io_devices(xml)
for guest_disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(guest_disk)
output[guest_disk + "_read_req"] = stats[0]
output[guest_disk + "_read"] = stats[1]
output[guest_disk + "_write_req"] = stats[2]
output[guest_disk + "_write"] = stats[3]
output[guest_disk + "_errors"] = stats[4]
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
output[interface + "_rx"] = stats[0]
output[interface + "_rx_packets"] = stats[1]
output[interface + "_rx_errors"] = stats[2]
output[interface + "_rx_drop"] = stats[3]
output[interface + "_tx"] = stats[4]
output[interface + "_tx_packets"] = stats[5]
output[interface + "_tx_errors"] = stats[6]
output[interface + "_tx_drop"] = stats[7]
except libvirt.libvirtError:
pass
output["memory"] = domain.maxMemory()
# memoryStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
mem = domain.memoryStats()
for key in mem.keys():
output["memory-" + key] = mem[key]
except (libvirt.libvirtError, AttributeError):
pass
return output
def get_instance_diagnostics(self, instance):
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove domain at the end.
domain = guest._domain
xml = guest.get_xml_desc()
xml_doc = etree.fromstring(xml)
# TODO(sahid): Needs to use get_info but more changes have to
# be done since a mapping STATE_MAP LIBVIRT_POWER_STATE is
# needed.
(state, max_mem, mem, num_cpu, cpu_time) = \
guest._get_domain_info(self._host)
config_drive = configdrive.required_by(instance)
launched_at = timeutils.normalize_time(instance.launched_at)
uptime = timeutils.delta_seconds(launched_at,
timeutils.utcnow())
diags = diagnostics.Diagnostics(state=power_state.STATE_MAP[state],
driver='libvirt',
config_drive=config_drive,
hypervisor_os='linux',
uptime=uptime)
diags.memory_details.maximum = max_mem / units.Mi
diags.memory_details.used = mem / units.Mi
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
for vcpu in guest.get_vcpus_info():
diags.add_cpu(time=vcpu.time)
except libvirt.libvirtError:
pass
# get io status
dom_io = LibvirtDriver._get_io_devices(xml)
for guest_disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(guest_disk)
diags.add_disk(read_bytes=stats[1],
read_requests=stats[0],
write_bytes=stats[3],
write_requests=stats[2])
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
diags.add_nic(rx_octets=stats[0],
rx_errors=stats[2],
rx_drop=stats[3],
rx_packets=stats[1],
tx_octets=stats[4],
tx_errors=stats[6],
tx_drop=stats[7],
tx_packets=stats[5])
except libvirt.libvirtError:
pass
# Update mac addresses of interface if stats have been reported
if diags.nic_details:
nodes = xml_doc.findall('./devices/interface/mac')
for index, node in enumerate(nodes):
diags.nic_details[index].mac_address = node.get('address')
return diags
@staticmethod
def _prepare_device_bus(dev):
"""Determins the device bus and it's hypervisor assigned address
"""
bus = None
address = (dev.device_addr.format_address() if
dev.device_addr else None)
if isinstance(dev.device_addr,
vconfig.LibvirtConfigGuestDeviceAddressPCI):
bus = objects.PCIDeviceBus()
elif isinstance(dev, vconfig.LibvirtConfigGuestDisk):
if dev.target_bus == 'scsi':
bus = objects.SCSIDeviceBus()
elif dev.target_bus == 'ide':
bus = objects.IDEDeviceBus()
elif dev.target_bus == 'usb':
bus = objects.USBDeviceBus()
if address is not None and bus is not None:
bus.address = address
return bus
def _build_device_metadata(self, context, instance):
"""Builds a metadata object for instance devices, that maps the user
provided tag to the hypervisor assigned device address.
"""
def _get_device_name(bdm):
return block_device.strip_dev(bdm.device_name)
vifs = objects.VirtualInterfaceList.get_by_instance_uuid(context,
instance.uuid)
tagged_vifs = {vif.address: vif for vif in vifs if vif.tag}
# TODO(mriedem): We should be able to avoid the DB query here by using
# block_device_info['block_device_mapping'] which is passed into most
# methods that call this function.
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
tagged_bdms = {_get_device_name(bdm): bdm for bdm in bdms if bdm.tag}
devices = []
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
xml_dom = etree.fromstring(xml)
guest_config = vconfig.LibvirtConfigGuest()
guest_config.parse_dom(xml_dom)
for dev in guest_config.devices:
# Build network intefaces related metedata
if isinstance(dev, vconfig.LibvirtConfigGuestInterface):
vif = tagged_vifs.get(dev.mac_addr)
if not vif:
continue
bus = self._prepare_device_bus(dev)
device = objects.NetworkInterfaceMetadata(
mac=vif.address,
tags=[vif.tag]
)
if bus:
device.bus = bus
devices.append(device)
# Build disks related metedata
if isinstance(dev, vconfig.LibvirtConfigGuestDisk):
bdm = tagged_bdms.get(dev.target_dev)
if not bdm:
continue
bus = self._prepare_device_bus(dev)
device = objects.DiskMetadata(tags=[bdm.tag])
if bus:
device.bus = bus
devices.append(device)
if devices:
dev_meta = objects.InstanceDeviceMetadata(devices=devices)
return dev_meta
def instance_on_disk(self, instance):
# ensure directories exist and are writable
instance_path = libvirt_utils.get_instance_path(instance)
LOG.debug('Checking instance files accessibility %s', instance_path,
instance=instance)
shared_instance_path = os.access(instance_path, os.W_OK)
# NOTE(flwang): For shared block storage scenario, the file system is
# not really shared by the two hosts, but the volume of evacuated
# instance is reachable.
shared_block_storage = (self.image_backend.backend().
is_shared_block_storage())
return shared_instance_path or shared_block_storage
def inject_network_info(self, instance, nw_info):
self.firewall_driver.setup_basic_filtering(instance, nw_info)
def delete_instance_files(self, instance):
target = libvirt_utils.get_instance_path(instance)
# A resize may be in progress
target_resize = target + '_resize'
# Other threads may attempt to rename the path, so renaming the path
# to target + '_del' (because it is atomic) and iterating through
# twice in the unlikely event that a concurrent rename occurs between
# the two rename attempts in this method. In general this method
# should be fairly thread-safe without these additional checks, since
# other operations involving renames are not permitted when the task
# state is not None and the task state should be set to something
# other than None by the time this method is invoked.
target_del = target + '_del'
for i in six.moves.range(2):
try:
utils.execute('mv', target, target_del)
break
except Exception:
pass
try:
utils.execute('mv', target_resize, target_del)
break
except Exception:
pass
# Either the target or target_resize path may still exist if all
# rename attempts failed.
remaining_path = None
for p in (target, target_resize):
if os.path.exists(p):
remaining_path = p
break
# A previous delete attempt may have been interrupted, so target_del
# may exist even if all rename attempts during the present method
# invocation failed due to the absence of both target and
# target_resize.
if not remaining_path and os.path.exists(target_del):
self.job_tracker.terminate_jobs(instance)
LOG.info(_LI('Deleting instance files %s'), target_del,
instance=instance)
remaining_path = target_del
try:
shutil.rmtree(target_del)
except OSError as e:
LOG.error(_LE('Failed to cleanup directory %(target)s: '
'%(e)s'), {'target': target_del, 'e': e},
instance=instance)
# It is possible that the delete failed, if so don't mark the instance
# as cleaned.
if remaining_path and os.path.exists(remaining_path):
LOG.info(_LI('Deletion of %s failed'), remaining_path,
instance=instance)
return False
LOG.info(_LI('Deletion of %s complete'), target_del, instance=instance)
return True
@property
def need_legacy_block_device_info(self):
return False
def default_root_device_name(self, instance, image_meta, root_bdm):
disk_bus = blockinfo.get_disk_bus_for_device_type(
instance, CONF.libvirt.virt_type, image_meta, "disk")
cdrom_bus = blockinfo.get_disk_bus_for_device_type(
instance, CONF.libvirt.virt_type, image_meta, "cdrom")
root_info = blockinfo.get_root_info(
instance, CONF.libvirt.virt_type, image_meta,
root_bdm, disk_bus, cdrom_bus)
return block_device.prepend_dev(root_info['dev'])
def default_device_names_for_instance(self, instance, root_device_name,
*block_device_lists):
block_device_mapping = list(itertools.chain(*block_device_lists))
# NOTE(ndipanov): Null out the device names so that blockinfo code
# will assign them
for bdm in block_device_mapping:
if bdm.device_name is not None:
LOG.warning(
_LW("Ignoring supplied device name: %(device_name)s. "
"Libvirt can't honour user-supplied dev names"),
{'device_name': bdm.device_name}, instance=instance)
bdm.device_name = None
block_device_info = driver.get_block_device_info(instance,
block_device_mapping)
blockinfo.default_device_names(CONF.libvirt.virt_type,
nova_context.get_admin_context(),
instance,
block_device_info,
instance.image_meta)
def get_device_name_for_instance(self, instance, bdms, block_device_obj):
block_device_info = driver.get_block_device_info(instance, bdms)
instance_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance,
instance.image_meta, block_device_info=block_device_info)
suggested_dev_name = block_device_obj.device_name
if suggested_dev_name is not None:
LOG.warning(
_LW('Ignoring supplied device name: %(suggested_dev)s'),
{'suggested_dev': suggested_dev_name}, instance=instance)
# NOTE(ndipanov): get_info_from_bdm will generate the new device name
# only when it's actually not set on the bd object
block_device_obj.device_name = None
disk_info = blockinfo.get_info_from_bdm(
instance, CONF.libvirt.virt_type, instance.image_meta,
block_device_obj, mapping=instance_info['mapping'])
return block_device.prepend_dev(disk_info['dev'])
def is_supported_fs_format(self, fs_type):
return fs_type in [disk_api.FS_FORMAT_EXT2, disk_api.FS_FORMAT_EXT3,
disk_api.FS_FORMAT_EXT4, disk_api.FS_FORMAT_XFS]
| 44.665839 | 105 | 0.575309 |
import string
import collections
from collections import deque
import contextlib
import errno
import functools
import glob
import itertools
import mmap
import operator
import os
import shutil
import tempfile
import time
import uuid
import eventlet
from eventlet import greenthread
from eventlet import tpool
from lxml import etree
from os_brick.initiator import connector
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import importutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
import six
from six.moves import range
from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import arch
from nova.compute import hv_type
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_mode
import nova.conf
from nova.console import serial as serial_console
from nova.console import type as ctype
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import image
from nova.network import model as network_model
from nova import objects
from nova.objects import fields
from nova.objects import migrate_data as migrate_data_obj
from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt import diagnostics
from nova.virt.disk import api as disk_api
from nova.virt.disk.vfs import guestfs
from nova.virt import driver
from nova.virt import firewall
from nova.virt import hardware
from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import instancejobtracker
from nova.virt.libvirt import migration as libvirt_migrate
from nova.virt.libvirt.storage import dmcrypt
from nova.virt.libvirt.storage import lvm
from nova.virt.libvirt.storage import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt import vif as libvirt_vif
from nova.virt.libvirt.volume import remotefs
from nova.virt import netutils
from nova.virt import watchdog_actions
from nova.volume import cinder
from nova.volume import encryptors
libvirt = None
uefi_logged = False
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
libvirt_firewall.__name__,
libvirt_firewall.IptablesFirewallDriver.__name__)
DEFAULT_UEFI_LOADER_PATH = {
"x86_64": "/usr/share/OVMF/OVMF_CODE.fd",
"aarch64": "/usr/share/AAVMF/AAVMF_CODE.fd"
}
MAX_CONSOLE_BYTES = 100 * units.Ki
DISABLE_PREFIX = 'AUTO: '
DISABLE_REASON_UNDEFINED = None
CONSOLE = "console=tty0 console=ttyS0"
GuestNumaConfig = collections.namedtuple(
'GuestNumaConfig', ['cpuset', 'cputune', 'numaconfig', 'numatune'])
libvirt_volume_drivers = [
'iscsi=nova.virt.libvirt.volume.iscsi.LibvirtISCSIVolumeDriver',
'iser=nova.virt.libvirt.volume.iser.LibvirtISERVolumeDriver',
'local=nova.virt.libvirt.volume.volume.LibvirtVolumeDriver',
'fake=nova.virt.libvirt.volume.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.net.LibvirtNetVolumeDriver',
'sheepdog=nova.virt.libvirt.volume.net.LibvirtNetVolumeDriver',
'nfs=nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver',
'smbfs=nova.virt.libvirt.volume.smbfs.LibvirtSMBFSVolumeDriver',
'aoe=nova.virt.libvirt.volume.aoe.LibvirtAOEVolumeDriver',
'glusterfs='
'nova.virt.libvirt.volume.glusterfs.LibvirtGlusterfsVolumeDriver',
'fibre_channel='
'nova.virt.libvirt.volume.fibrechannel.'
'LibvirtFibreChannelVolumeDriver',
'scality=nova.virt.libvirt.volume.scality.LibvirtScalityVolumeDriver',
'gpfs=nova.virt.libvirt.volume.gpfs.LibvirtGPFSVolumeDriver',
'quobyte=nova.virt.libvirt.volume.quobyte.LibvirtQuobyteVolumeDriver',
'hgst=nova.virt.libvirt.volume.hgst.LibvirtHGSTVolumeDriver',
'scaleio=nova.virt.libvirt.volume.scaleio.LibvirtScaleIOVolumeDriver',
'disco=nova.virt.libvirt.volume.disco.LibvirtDISCOVolumeDriver',
'vzstorage='
'nova.virt.libvirt.volume.vzstorage.LibvirtVZStorageVolumeDriver',
]
def patch_tpool_proxy():
def str_method(self):
return str(self._obj)
def repr_method(self):
return repr(self._obj)
tpool.Proxy.__str__ = str_method
tpool.Proxy.__repr__ = repr_method
patch_tpool_proxy()
MIN_LIBVIRT_VERSION = (1, 2, 1)
MIN_QEMU_VERSION = (1, 5, 3)
NEXT_MIN_LIBVIRT_VERSION = (1, 2, 1)
NEXT_MIN_QEMU_VERSION = (1, 5, 3)
MIN_LIBVIRT_BLOCKJOB_RELATIVE_VERSION = (1, 2, 7)
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION = (1, 2, 17)
MIN_QEMU_DISCARD_VERSION = (1, 6, 0)
MIN_LIBVIRT_NUMA_VERSION = (1, 2, 7)
MIN_LIBVIRT_NUMA_VERSION_PPC = (1, 2, 19)
VIRT_NUMA_VERSIONS = [(1, 2, 9, 2)]
MIN_LIBVIRT_HUGEPAGE_VERSION = (1, 2, 8)
VIRT_CPU_POLICY_VERSIONS = [(1, 2, 10)]
MIN_QEMU_NUMA_HUGEPAGE_VERSION = (2, 1, 0)
MIN_LIBVIRT_FSFREEZE_VERSION = (1, 2, 5)
MIN_LIBVIRT_UEFI_VERSION = (1, 2, 9)
MIN_LIBVIRT_HYPERV_TIMER_VERSION = (1, 2, 2)
MIN_QEMU_HYPERV_TIMER_VERSION = (2, 0, 0)
MIN_LIBVIRT_PARALLELS_VERSION = (1, 2, 12)
MIN_LIBVIRT_SET_ADMIN_PASSWD = (1, 2, 16)
MIN_LIBVIRT_KVM_S390_VERSION = (1, 2, 13)
MIN_QEMU_S390_VERSION = (2, 3, 0)
MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION = (1, 3, 0)
MIN_LIBVIRT_KVM_PPC64_VERSION = (1, 2, 12)
MIN_QEMU_PPC64_VERSION = (2, 1, 0)
MIN_LIBVIRT_AUTO_CONVERGE_VERSION = (1, 2, 3)
MIN_QEMU_AUTO_CONVERGE = (1, 6, 0)
NO_COMPRESSION_TYPES = ('qcow2',)
QEMU_MAX_SERIAL_PORTS = 4
ALLOWED_QEMU_SERIAL_PORTS = QEMU_MAX_SERIAL_PORTS - 1
MIN_LIBVIRT_REALTIME_VERSION = (1, 2, 13)
MIN_LIBVIRT_POSTCOPY_VERSION = (1, 3, 3)
MIN_QEMU_POSTCOPY_VERSION = (2, 5, 0)
MIN_LIBVIRT_OTHER_ARCH = {arch.S390: MIN_LIBVIRT_KVM_S390_VERSION,
arch.S390X: MIN_LIBVIRT_KVM_S390_VERSION,
arch.PPC: MIN_LIBVIRT_KVM_PPC64_VERSION,
arch.PPC64: MIN_LIBVIRT_KVM_PPC64_VERSION,
arch.PPC64LE: MIN_LIBVIRT_KVM_PPC64_VERSION,
}
MIN_QEMU_OTHER_ARCH = {arch.S390: MIN_QEMU_S390_VERSION,
arch.S390X: MIN_QEMU_S390_VERSION,
arch.PPC: MIN_QEMU_PPC64_VERSION,
arch.PPC64: MIN_QEMU_PPC64_VERSION,
arch.PPC64LE: MIN_QEMU_PPC64_VERSION,
}
MIN_LIBVIRT_PERF_VERSION = (2, 0, 0)
LIBVIRT_PERF_EVENT_PREFIX = 'VIR_PERF_PARAM_'
PERF_EVENTS_CPU_FLAG_MAPPING = {'cmt': 'cmt',
'mbml': 'mbm_local',
'mbmt': 'mbm_total',
}
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
"supports_migrate_to_same_host": False,
"supports_attach_interface": True,
"supports_device_tagging": True,
}
def __init__(self, virtapi, read_only=False):
super(LibvirtDriver, self).__init__(virtapi)
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
libvirt_migrate.libvirt = libvirt
self._host = host.Host(self._uri(), read_only,
lifecycle_event_handler=self.emit_event,
conn_event_handler=self._handle_conn_event)
self._initiator = None
self._fc_wwnns = None
self._fc_wwpns = None
self._caps = None
self._supported_perf_events = []
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
host=self._host)
self.vif_driver = libvirt_vif.LibvirtGenericVIFDriver()
self.volume_drivers = driver.driver_dict_from_config(
self._get_volume_drivers(), self)
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
self.valid_cachemodes = ["default",
"none",
"writethrough",
"writeback",
"directsync",
"unsafe",
]
self._conn_supports_start_paused = CONF.libvirt.virt_type in ('kvm',
'qemu')
for mode_str in CONF.libvirt.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in self.valid_cachemodes:
LOG.warning(_LW('Invalid cachemode %(cache_mode)s specified '
'for disk type %(disk_type)s.'),
{'cache_mode': cache_mode, 'disk_type': disk_type})
continue
self.disk_cachemodes[disk_type] = cache_mode
self._volume_api = cinder.API()
self._image_api = image.API()
sysinfo_serial_funcs = {
'none': lambda: None,
'hardware': self._get_host_sysinfo_serial_hardware,
'os': self._get_host_sysinfo_serial_os,
'auto': self._get_host_sysinfo_serial_auto,
}
self._sysinfo_serial_func = sysinfo_serial_funcs.get(
CONF.libvirt.sysinfo_serial)
self.job_tracker = instancejobtracker.InstanceJobTracker()
self._remotefs = remotefs.RemoteFilesystem()
self._live_migration_flags = self._block_migration_flags = 0
self.active_migrations = {}
self._reserved_hugepages = hardware.numa_get_reserved_huge_pages()
def _get_volume_drivers(self):
return libvirt_volume_drivers
@property
def disk_cachemode(self):
if self._disk_cachemode is None:
# O_DIRECT though. For those we fallback to 'writethrough'
# which gives host crash safety, and is safe for migration
# provided the filesystem is cache coherent (cluster filesystems
# typically are, but things like NFS are not).
self._disk_cachemode = "none"
if not self._supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
def _set_cache_mode(self, conf):
try:
source_type = conf.source_type
driver_cache = conf.driver_cache
except AttributeError:
return
cache_mode = self.disk_cachemodes.get(source_type,
driver_cache)
conf.driver_cache = cache_mode
def _do_quality_warnings(self):
caps = self._host.get_capabilities()
hostarch = caps.host.cpu.arch
if (CONF.libvirt.virt_type not in ('qemu', 'kvm') or
hostarch not in (arch.I686, arch.X86_64)):
LOG.warning(_LW('The libvirt driver is not tested on '
'%(type)s/%(arch)s by the OpenStack project and '
'thus its quality can not be ensured. For more '
'information, see: http://docs.openstack.org/'
'developer/nova/support-matrix.html'),
{'type': CONF.libvirt.virt_type, 'arch': hostarch})
def _handle_conn_event(self, enabled, reason):
LOG.info(_LI("Connection event '%(enabled)d' reason '%(reason)s'"),
{'enabled': enabled, 'reason': reason})
self._set_host_enabled(enabled, reason)
def _version_to_string(self, version):
return '.'.join([str(x) for x in version])
def init_host(self, host):
self._host.initialize()
self._do_quality_warnings()
self._parse_migration_flags()
self._supported_perf_events = self._get_supported_perf_events()
if (CONF.libvirt.virt_type == 'lxc' and
not (CONF.libvirt.uid_maps and CONF.libvirt.gid_maps)):
LOG.warning(_LW("Running libvirt-lxc without user namespaces is "
"dangerous. Containers spawned by Nova will be run "
"as the host's root user. It is highly suggested "
"that user namespaces be used in a public or "
"multi-tenant environment."))
# to use this. This solves problem where people need to
# stop Nova use of KVM because nested-virt is broken
if CONF.libvirt.virt_type != "kvm":
guestfs.force_tcg()
if not self._host.has_min_version(MIN_LIBVIRT_VERSION):
raise exception.NovaException(
_('Nova requires libvirt version %s or greater.') %
self._version_to_string(MIN_LIBVIRT_VERSION))
if (CONF.libvirt.virt_type in ("qemu", "kvm") and
not self._host.has_min_version(hv_ver=MIN_QEMU_VERSION)):
raise exception.NovaException(
_('Nova requires QEMU version %s or greater.') %
self._version_to_string(MIN_QEMU_VERSION))
if (CONF.libvirt.virt_type == 'parallels' and
not self._host.has_min_version(MIN_LIBVIRT_PARALLELS_VERSION)):
raise exception.NovaException(
_('Running Nova with parallels virt_type requires '
'libvirt version %s') %
self._version_to_string(MIN_LIBVIRT_PARALLELS_VERSION))
# Give the cloud admin a heads up if we are intending to
# change the MIN_LIBVIRT_VERSION in the next release.
if not self._host.has_min_version(NEXT_MIN_LIBVIRT_VERSION):
LOG.warning(_LW('Running Nova with a libvirt version less than '
'%(version)s is deprecated. The required minimum '
'version of libvirt will be raised to %(version)s '
'in the next release.'),
{'version': self._version_to_string(
NEXT_MIN_LIBVIRT_VERSION)})
if (CONF.libvirt.virt_type in ("qemu", "kvm") and
not self._host.has_min_version(hv_ver=NEXT_MIN_QEMU_VERSION)):
LOG.warning(_LW('Running Nova with a QEMU version less than '
'%(version)s is deprecated. The required minimum '
'version of QEMU will be raised to %(version)s '
'in the next release.'),
{'version': self._version_to_string(
NEXT_MIN_QEMU_VERSION)})
kvm_arch = arch.from_host()
if (CONF.libvirt.virt_type in ('kvm', 'qemu') and
kvm_arch in MIN_LIBVIRT_OTHER_ARCH and
not self._host.has_min_version(
MIN_LIBVIRT_OTHER_ARCH.get(kvm_arch),
MIN_QEMU_OTHER_ARCH.get(kvm_arch))):
raise exception.NovaException(
_('Running Nova with qemu/kvm virt_type on %(arch)s '
'requires libvirt version %(libvirt_ver)s and '
'qemu version %(qemu_ver)s, or greater') %
{'arch': kvm_arch,
'libvirt_ver': self._version_to_string(
MIN_LIBVIRT_OTHER_ARCH.get(kvm_arch)),
'qemu_ver': self._version_to_string(
MIN_QEMU_OTHER_ARCH.get(kvm_arch))})
def _prepare_migration_flags(self):
migration_flags = 0
migration_flags |= libvirt.VIR_MIGRATE_LIVE
# Adding p2p flag only if xen is not in use, because xen does not
# support p2p migrations
if CONF.libvirt.virt_type != 'xen':
migration_flags |= libvirt.VIR_MIGRATE_PEER2PEER
# Adding VIR_MIGRATE_UNDEFINE_SOURCE because, without it, migrated
# instance will remain defined on the source host
migration_flags |= libvirt.VIR_MIGRATE_UNDEFINE_SOURCE
live_migration_flags = block_migration_flags = migration_flags
# Adding VIR_MIGRATE_NON_SHARED_INC, otherwise all block-migrations
# will be live-migrations instead
block_migration_flags |= libvirt.VIR_MIGRATE_NON_SHARED_INC
return (live_migration_flags, block_migration_flags)
def _handle_live_migration_tunnelled(self, migration_flags):
if (CONF.libvirt.live_migration_tunnelled is None or
CONF.libvirt.live_migration_tunnelled):
migration_flags |= libvirt.VIR_MIGRATE_TUNNELLED
return migration_flags
def _is_post_copy_available(self):
if self._host.has_min_version(lv_ver=MIN_LIBVIRT_POSTCOPY_VERSION,
hv_ver=MIN_QEMU_POSTCOPY_VERSION):
return True
return False
def _handle_live_migration_post_copy(self, migration_flags):
if CONF.libvirt.live_migration_permit_post_copy:
if self._is_post_copy_available():
migration_flags |= libvirt.VIR_MIGRATE_POSTCOPY
else:
LOG.info(_LI('The live_migration_permit_post_copy is set '
'to True, but it is not supported.'))
return migration_flags
def _handle_live_migration_auto_converge(self, migration_flags):
if self._host.has_min_version(lv_ver=MIN_LIBVIRT_AUTO_CONVERGE_VERSION,
hv_ver=MIN_QEMU_AUTO_CONVERGE):
if (self._is_post_copy_available() and
(migration_flags & libvirt.VIR_MIGRATE_POSTCOPY) != 0):
LOG.info(_LI('The live_migration_permit_post_copy is set to '
'True and post copy live migration is available '
'so auto-converge will not be in use.'))
elif CONF.libvirt.live_migration_permit_auto_converge:
migration_flags |= libvirt.VIR_MIGRATE_AUTO_CONVERGE
elif CONF.libvirt.live_migration_permit_auto_converge:
LOG.info(_LI('The live_migration_permit_auto_converge is set '
'to True, but it is not supported.'))
return migration_flags
def _parse_migration_flags(self):
(live_migration_flags,
block_migration_flags) = self._prepare_migration_flags()
live_migration_flags = self._handle_live_migration_tunnelled(
live_migration_flags)
block_migration_flags = self._handle_live_migration_tunnelled(
block_migration_flags)
live_migration_flags = self._handle_live_migration_post_copy(
live_migration_flags)
block_migration_flags = self._handle_live_migration_post_copy(
block_migration_flags)
live_migration_flags = self._handle_live_migration_auto_converge(
live_migration_flags)
block_migration_flags = self._handle_live_migration_auto_converge(
block_migration_flags)
self._live_migration_flags = live_migration_flags
self._block_migration_flags = block_migration_flags
# TODO(sahid): This method is targeted for removal when the tests
# have been updated to avoid its use
#
# All libvirt API calls on the libvirt.Connect object should be
# encapsulated by methods on the nova.virt.libvirt.host.Host
# object, rather than directly invoking the libvirt APIs. The goal
# is to avoid a direct dependency on the libvirt API from the
# driver.py file.
def _get_connection(self):
return self._host.get_connection()
_conn = property(_get_connection)
@staticmethod
def _uri():
if CONF.libvirt.virt_type == 'uml':
uri = CONF.libvirt.connection_uri or 'uml:///system'
elif CONF.libvirt.virt_type == 'xen':
uri = CONF.libvirt.connection_uri or 'xen:///'
elif CONF.libvirt.virt_type == 'lxc':
uri = CONF.libvirt.connection_uri or 'lxc:///'
elif CONF.libvirt.virt_type == 'parallels':
uri = CONF.libvirt.connection_uri or 'parallels:///system'
else:
uri = CONF.libvirt.connection_uri or 'qemu:///system'
return uri
@staticmethod
def _live_migration_uri(dest):
# Only Xen and QEMU support live migration, see
# https://libvirt.org/migration.html#scenarios for reference
uris = {
'kvm': 'qemu+tcp://%s/system',
'qemu': 'qemu+tcp://%s/system',
'xen': 'xenmigr://%s/system',
}
virt_type = CONF.libvirt.virt_type
uri = CONF.libvirt.live_migration_uri or uris.get(virt_type)
if uri is None:
raise exception.LiveMigrationURINotAvailable(virt_type=virt_type)
return uri % dest
def instance_exists(self, instance):
try:
self._host.get_guest(instance)
return True
except exception.NovaException:
return False
def list_instances(self):
names = []
for guest in self._host.list_guests(only_running=False):
names.append(guest.name)
return names
def list_instance_uuids(self):
uuids = []
for guest in self._host.list_guests(only_running=False):
uuids.append(guest.uuid)
return uuids
def plug_vifs(self, instance, network_info):
for vif in network_info:
self.vif_driver.plug(instance, vif)
def _unplug_vifs(self, instance, network_info, ignore_errors):
for vif in network_info:
try:
self.vif_driver.unplug(instance, vif)
except exception.NovaException:
if not ignore_errors:
raise
def unplug_vifs(self, instance, network_info):
self._unplug_vifs(instance, network_info, False)
def _teardown_container(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
rootfs_dev = instance.system_metadata.get('rootfs_device_name')
LOG.debug('Attempting to teardown container at path %(dir)s with '
'root device: %(rootfs_dev)s',
{'dir': container_dir, 'rootfs_dev': rootfs_dev},
instance=instance)
disk_api.teardown_container(container_dir, rootfs_dev)
def _destroy(self, instance, attempt=1):
try:
guest = self._host.get_guest(instance)
if CONF.serial_console.enabled:
# This method is called for several events: destroy,
# rebuild, hard-reboot, power-off - For all of these
# events we want to release the serial ports acquired
# for the guest before destroying it.
serials = self._get_serial_ports_from_guest(guest)
for hostname, port in serials:
serial_console.release_port(host=hostname, port=port)
except exception.InstanceNotFound:
guest = None
# If the instance is already terminated, we're still happy
old_domid = -1
if guest is not None:
try:
old_domid = guest.id
guest.poweroff()
except libvirt.libvirtError as e:
is_okay = False
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_NO_DOMAIN:
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_INVALID:
state = guest.get_power_state(self._host)
if state == power_state.SHUTDOWN:
is_okay = True
elif errcode == libvirt.VIR_ERR_INTERNAL_ERROR:
errmsg = e.get_error_message()
if (CONF.libvirt.virt_type == 'lxc' and
errmsg == 'internal error: '
'Some processes refused to die'):
# fast enough for libvirt. The container will
# eventually die. For now, move on and let
# the wait_for_destroy logic take over.
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:
LOG.warning(_LW("Cannot destroy instance, operation time "
"out"),
instance=instance)
reason = _("operation time out")
raise exception.InstancePowerOffFailure(reason=reason)
elif errcode == libvirt.VIR_ERR_SYSTEM_ERROR:
if e.get_int1() == errno.EBUSY:
# NOTE(danpb): When libvirt kills a process it sends it
# SIGTERM first and waits 10 seconds. If it hasn't gone
# Usually when a QEMU process fails to go away upon
# SIGKILL it is because it is stuck in an
# uninterruptible kernel sleep waiting on I/O from
# some non-responsive server.
# Given the CPU load of the gate tests though, it is
# conceivable that the 15 second timeout is too short,
# particularly if the VM running tempest has a high
# steal time from the cloud host. ie 15 wallclock
# seconds may have passed, but the VM might have only
# have a few seconds of scheduled run time.
LOG.warning(_LW('Error from libvirt during destroy. '
'Code=%(errcode)s Error=%(e)s; '
'attempt %(attempt)d of 3'),
{'errcode': errcode, 'e': e,
'attempt': attempt},
instance=instance)
with excutils.save_and_reraise_exception() as ctxt:
# Try up to 3 times before giving up.
if attempt < 3:
ctxt.reraise = False
self._destroy(instance, attempt + 1)
return
if not is_okay:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error from libvirt during destroy. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
def _wait_for_destroy(expected_domid):
# NOTE(vish): If the instance disappears during the destroy
# we ignore it so the cleanup can still be
# attempted because we would prefer destroy to
# never fail.
try:
dom_info = self.get_info(instance)
state = dom_info.state
new_domid = dom_info.id
except exception.InstanceNotFound:
LOG.info(_LI("During wait destroy, instance disappeared."),
instance=instance)
raise loopingcall.LoopingCallDone()
if state == power_state.SHUTDOWN:
LOG.info(_LI("Instance destroyed successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
# NOTE(wangpan): If the instance was booted again after destroy,
# this may be an endless loop, so check the id of
# domain here, if it changed and the instance is
# still running, we should destroy it again.
# see https://bugs.launchpad.net/nova/+bug/1111213 for more details
if new_domid != expected_domid:
LOG.info(_LI("Instance may be started again."),
instance=instance)
kwargs['is_running'] = True
raise loopingcall.LoopingCallDone()
kwargs = {'is_running': False}
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy,
old_domid)
timer.start(interval=0.5).wait()
if kwargs['is_running']:
LOG.info(_LI("Going to destroy instance again."),
instance=instance)
self._destroy(instance)
else:
# NOTE(GuanQiang): teardown container to avoid resource leak
if CONF.libvirt.virt_type == 'lxc':
self._teardown_container(instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
self._destroy(instance)
self.cleanup(context, instance, network_info, block_device_info,
destroy_disks, migrate_data)
def _undefine_domain(self, instance):
try:
guest = self._host.get_guest(instance)
try:
guest.delete_configuration()
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
errcode = e.get_error_code()
LOG.error(_LE('Error from libvirt during undefine. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e}, instance=instance)
except exception.InstanceNotFound:
pass
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
if destroy_vifs:
self._unplug_vifs(instance, network_info, True)
retry = True
while retry:
try:
self.unfilter_instance(instance, network_info)
except libvirt.libvirtError as e:
try:
state = self.get_info(instance).state
except exception.InstanceNotFound:
state = power_state.SHUTDOWN
if state != power_state.SHUTDOWN:
LOG.warning(_LW("Instance may be still running, destroy "
"it again."), instance=instance)
self._destroy(instance)
else:
retry = False
errcode = e.get_error_code()
LOG.exception(_LE('Error from libvirt during unfilter. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
reason = "Error unfiltering instance."
raise exception.InstanceTerminationFailure(reason=reason)
except Exception:
retry = False
raise
else:
retry = False
# FIXME(wangpan): if the instance is booted again here, such as the
# the soft reboot operation boot it here, it will
# become "running deleted", should we check and destroy
# it at the end of this method?
# NOTE(vish): we disconnect from volumes regardless
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device']
if disk_dev is not None:
disk_dev = disk_dev.rpartition("/")[2]
if ('data' in connection_info and
'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
# The volume must be detached from the VM before
# disconnecting it from its encryptor. Otherwise, the
# encryptor may report that the volume is still in use.
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
try:
self._disconnect_volume(connection_info, disk_dev)
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if destroy_disks:
# Don't block on Volume errors if we're trying to
# delete the instance as we may be partially created
# or deleted
ctxt.reraise = False
LOG.warning(
_LW("Ignoring Volume Error on vol %(vol_id)s "
"during delete %(exc)s"),
{'vol_id': vol.get('volume_id'), 'exc': exc},
instance=instance)
if destroy_disks:
# NOTE(haomai): destroy volumes if needed
if CONF.libvirt.images_type == 'lvm':
self._cleanup_lvm(instance, block_device_info)
if CONF.libvirt.images_type == 'rbd':
self._cleanup_rbd(instance)
is_shared_block_storage = False
if migrate_data and 'is_shared_block_storage' in migrate_data:
is_shared_block_storage = migrate_data.is_shared_block_storage
if destroy_disks or is_shared_block_storage:
attempts = int(instance.system_metadata.get('clean_attempts',
'0'))
success = self.delete_instance_files(instance)
# NOTE(mriedem): This is used in the _run_pending_deletes periodic
# task in the compute manager. The tight coupling is not great...
instance.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
instance.cleaned = True
instance.save()
self._undefine_domain(instance)
def _detach_encrypted_volumes(self, instance, block_device_info):
disks = jsonutils.loads(self.get_instance_disk_info(instance,
block_device_info))
encrypted_volumes = filter(dmcrypt.is_encrypted,
[disk['path'] for disk in disks])
for path in encrypted_volumes:
dmcrypt.delete_volume(path)
def _get_serial_ports_from_guest(self, guest, mode=None):
xml = guest.get_xml_desc()
tree = etree.fromstring(xml)
# The 'serial' device is the base for x86 platforms. Other platforms
# (e.g. kvm on system z = arch.S390X) can only use 'console' devices.
xpath_mode = "[@mode='%s']" % mode if mode else ""
serial_tcp = "./devices/serial[@type='tcp']/source" + xpath_mode
console_tcp = "./devices/console[@type='tcp']/source" + xpath_mode
tcp_devices = tree.findall(serial_tcp)
if len(tcp_devices) == 0:
tcp_devices = tree.findall(console_tcp)
for source in tcp_devices:
yield (source.get("host"), int(source.get("service")))
@staticmethod
def _get_rbd_driver():
return rbd_utils.RBDDriver(
pool=CONF.libvirt.images_rbd_pool,
ceph_conf=CONF.libvirt.images_rbd_ceph_conf,
rbd_user=CONF.libvirt.rbd_user)
def _cleanup_rbd(self, instance):
# NOTE(nic): On revert_resize, the cleanup steps for the root
# volume are handled with an "rbd snap rollback" command,
# and none of this is needed (and is, in fact, harmful) so
# filter out non-ephemerals from the list
if instance.task_state == task_states.RESIZE_REVERTING:
filter_fn = lambda disk: (disk.startswith(instance.uuid) and
disk.endswith('disk.local'))
else:
filter_fn = lambda disk: disk.startswith(instance.uuid)
LibvirtDriver._get_rbd_driver().cleanup_volumes(filter_fn)
def _cleanup_lvm(self, instance, block_device_info):
if instance.get('ephemeral_key_uuid') is not None:
self._detach_encrypted_volumes(instance, block_device_info)
disks = self._lvm_disks(instance)
if disks:
lvm.remove_volumes(disks)
def _lvm_disks(self, instance):
if CONF.libvirt.images_volume_group:
vg = os.path.join('/dev', CONF.libvirt.images_volume_group)
if not os.path.exists(vg):
return []
pattern = '%s_' % instance.uuid
def belongs_to_instance(disk):
return disk.startswith(pattern)
def fullpath(name):
return os.path.join(vg, name)
logical_volumes = lvm.list_volumes(vg)
disk_names = filter(belongs_to_instance, logical_volumes)
disks = map(fullpath, disk_names)
return disks
return []
def get_volume_connector(self, instance):
root_helper = utils.get_root_helper()
return connector.get_connector_properties(
root_helper, CONF.my_block_storage_ip,
CONF.libvirt.volume_use_multipath,
enforce_multipath=True,
host=CONF.host)
def _cleanup_resize(self, instance, network_info):
target = libvirt_utils.get_instance_path(instance) + '_resize'
if os.path.exists(target):
# Deletion can fail over NFS, so retry the deletion as required.
# Set maximum attempt as 5, most test can remove the directory
# for the second time.
utils.execute('rm', '-rf', target, delay_on_retry=True,
attempts=5)
root_disk = self.image_backend.image(instance, 'disk')
# TODO(nic): Set ignore_errors=False in a future release.
# It is set to True here to avoid any upgrade issues surrounding
# instances being in pending resize state when the software is updated;
# in that case there will be no snapshot to remove. Once it can be
# reasonably assumed that no such instances exist in the wild
# anymore, it should be set back to False (the default) so it will
# throw errors, like it should.
if root_disk.exists():
root_disk.remove_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME,
ignore_errors=True)
if instance.host != CONF.host:
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
self.unfilter_instance(instance, network_info)
def _get_volume_driver(self, connection_info):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
return self.volume_drivers[driver_type]
def _connect_volume(self, connection_info, disk_info):
vol_driver = self._get_volume_driver(connection_info)
vol_driver.connect_volume(connection_info, disk_info)
def _disconnect_volume(self, connection_info, disk_dev):
vol_driver = self._get_volume_driver(connection_info)
vol_driver.disconnect_volume(connection_info, disk_dev)
def _get_volume_config(self, connection_info, disk_info):
vol_driver = self._get_volume_driver(connection_info)
return vol_driver.get_config(connection_info, disk_info)
def _get_volume_encryptor(self, connection_info, encryption):
encryptor = encryptors.get_volume_encryptor(connection_info,
**encryption)
return encryptor
def _check_discard_for_attach_volume(self, conf, instance):
if conf.driver_discard == 'unmap' and conf.target_bus == 'virtio':
LOG.debug('Attempting to attach volume %(id)s with discard '
'support enabled to an instance using an '
'unsupported configuration. target_bus = '
'%(bus)s. Trim commands will not be issued to '
'the storage device.',
{'bus': conf.target_bus,
'id': conf.serial},
instance=instance)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
guest = self._host.get_guest(instance)
disk_dev = mountpoint.rpartition("/")[2]
bdm = {
'device_name': disk_dev,
'disk_bus': disk_bus,
'device_type': device_type}
# Note(cfb): If the volume has a custom block size, check that
# that we are using QEMU/KVM and libvirt >= 0.10.2. The
# presence of a block size is considered mandatory by
# cinder so we fail if we can't honor the request.
data = {}
if ('data' in connection_info):
data = connection_info['data']
if ('logical_block_size' in data or 'physical_block_size' in data):
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Volume sets block size, but the current "
"libvirt hypervisor '%s' does not support custom "
"block size") % CONF.libvirt.virt_type
raise exception.InvalidHypervisorType(msg)
disk_info = blockinfo.get_info_from_bdm(
instance, CONF.libvirt.virt_type, instance.image_meta, bdm)
self._connect_volume(connection_info, disk_info)
conf = self._get_volume_config(connection_info, disk_info)
self._set_cache_mode(conf)
self._check_discard_for_attach_volume(conf, instance)
try:
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
guest.attach_device(conf, persistent=True, live=live)
except Exception as ex:
LOG.exception(_LE('Failed to attach volume at mountpoint: %s'),
mountpoint, instance=instance)
if isinstance(ex, libvirt.libvirtError):
errcode = ex.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
self._disconnect_volume(connection_info, disk_dev)
raise exception.DeviceIsBusy(device=disk_dev)
with excutils.save_and_reraise_exception():
self._disconnect_volume(connection_info, disk_dev)
def _swap_volume(self, guest, disk_path, new_path, resize_to):
dev = guest.get_block_device(disk_path)
xml = guest.get_xml_desc(dump_inactive=True, dump_sensitive=True)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
dev.abort_job()
except Exception:
pass
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if guest.has_persistent_configuration():
guest.delete_configuration()
# Start copy with VIR_DOMAIN_REBASE_REUSE_EXT flag to
# allow writing to existing external volume file
dev.rebase(new_path, copy=True, reuse_ext=True)
while dev.wait_for_job():
time.sleep(0.5)
dev.abort_job(pivot=True)
if resize_to:
# NOTE(alex_xu): domain.blockJobAbort isn't sync call. This
while dev.wait_for_job(wait_for_job_clean=True):
time.sleep(0.5)
dev.resize(resize_to * units.Gi / units.Ki)
finally:
self._host.write_instance_config(xml)
def swap_volume(self, old_connection_info,
new_connection_info, instance, mountpoint, resize_to):
guest = self._host.get_guest(instance)
disk_dev = mountpoint.rpartition("/")[2]
if not guest.get_disk(disk_dev):
raise exception.DiskNotFound(location=disk_dev)
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(
CONF.libvirt.virt_type, disk_dev),
'type': 'disk',
}
self._connect_volume(new_connection_info, disk_info)
conf = self._get_volume_config(new_connection_info, disk_info)
if not conf.source_path:
self._disconnect_volume(new_connection_info, disk_dev)
raise NotImplementedError(_("Swap only supports host devices"))
volume_id = new_connection_info.get('serial')
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
nova_context.get_admin_context(), volume_id, instance.uuid)
driver_bdm = driver_block_device.convert_volume(bdm)
driver_bdm['connection_info'] = new_connection_info
driver_bdm.save()
self._swap_volume(guest, disk_dev, conf.source_path, resize_to)
self._disconnect_volume(old_connection_info, disk_dev)
def _get_existing_domain_xml(self, instance, network_info,
block_device_info=None):
try:
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
except exception.InstanceNotFound:
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
instance.image_meta,
block_device_info)
xml = self._get_guest_xml(nova_context.get_admin_context(),
instance, network_info, disk_info,
instance.image_meta,
block_device_info=block_device_info)
return xml
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
disk_dev = mountpoint.rpartition("/")[2]
try:
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
wait_for_detach = guest.detach_device_with_retry(guest.get_disk,
disk_dev,
persistent=True,
live=live)
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
wait_for_detach()
except exception.InstanceNotFound:
LOG.warning(_LW("During detach_volume, instance disappeared."),
instance=instance)
except exception.DeviceNotFound:
raise exception.DiskNotFound(location=disk_dev)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
# NOTE(vish):
LOG.warning(_LW("During detach_volume, instance disappeared."),
instance=instance)
else:
raise
self._disconnect_volume(connection_info, disk_dev)
def attach_interface(self, instance, image_meta, vif):
guest = self._host.get_guest(instance)
self.vif_driver.plug(instance, vif)
self.firewall_driver.setup_basic_filtering(instance, [vif])
cfg = self.vif_driver.get_config(instance, vif, image_meta,
instance.flavor,
CONF.libvirt.virt_type,
self._host)
try:
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
guest.attach_device(cfg, persistent=True, live=live)
except libvirt.libvirtError:
LOG.error(_LE('attaching network adapter failed.'),
instance=instance, exc_info=True)
self.vif_driver.unplug(instance, vif)
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
def detach_interface(self, instance, vif):
guest = self._host.get_guest(instance)
cfg = self.vif_driver.get_config(instance, vif,
instance.image_meta,
instance.flavor,
CONF.libvirt.virt_type, self._host)
try:
self.vif_driver.unplug(instance, vif)
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
guest.detach_device(cfg, persistent=True, live=live)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warning(_LW("During detach_interface, "
"instance disappeared."),
instance=instance)
else:
# NOTE(mriedem): When deleting an instance and using Neutron,
# we can be racing against Neutron deleting the port and
# sending the vif-deleted event which then triggers a call to
# detach the interface, so we might have failed because the
# network device no longer exists. Libvirt will fail with
# "operation failed: no matching network device was found"
# which unfortunately does not have a unique error code so we
# need to look up the interface by MAC and if it's not found
mac = vif.get('address')
interface = guest.get_interface_by_mac(mac)
if interface:
LOG.error(_LE('detaching network adapter failed.'),
instance=instance, exc_info=True)
raise exception.InterfaceDetachFailed(
instance_uuid=instance.uuid)
LOG.warning(_LW('Detaching interface %(mac)s failed because '
'the device is no longer found on the guest.'),
{'mac': mac}, instance=instance)
def _create_snapshot_metadata(self, image_meta, instance,
img_fmt, snp_name):
metadata = {'is_public': False,
'status': 'active',
'name': snp_name,
'properties': {
'kernel_id': instance.kernel_id,
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance.project_id,
'ramdisk_id': instance.ramdisk_id,
}
}
if instance.os_type:
metadata['properties']['os_type'] = instance.os_type
if image_meta.disk_format == 'ami':
metadata['disk_format'] = 'ami'
else:
metadata['disk_format'] = img_fmt
if image_meta.obj_attr_is_set("container_format"):
metadata['container_format'] = image_meta.container_format
else:
metadata['container_format'] = "bare"
return metadata
def snapshot(self, context, instance, image_id, update_task_state):
try:
guest = self._host.get_guest(instance)
virt_dom = guest._domain
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
snapshot = self._image_api.get(context, image_id)
disk_path, source_format = libvirt_utils.find_disk(virt_dom)
source_type = libvirt_utils.get_disk_type_from_path(disk_path)
if source_type is None:
source_type = source_format
# (because we just gave libvirt the mounted filesystem), or the path,
# so source_type is still going to be None. In this case,
# snapshot_backend is going to default to CONF.libvirt.images_type
# below, which is still safe.
image_format = CONF.libvirt.snapshot_image_format or source_type
# NOTE(bfilippov): save lvm and rbd as raw
if image_format == 'lvm' or image_format == 'rbd':
image_format = 'raw'
metadata = self._create_snapshot_metadata(instance.image_meta,
instance,
image_format,
snapshot['name'])
snapshot_name = uuid.uuid4().hex
state = guest.get_power_state(self._host)
# NOTE(dgenin): Instances with LVM encrypted ephemeral storage require
# cold snapshots. Currently, checking for encryption is
# redundant because LVM supports only cold snapshots.
# It is necessary in case this situation changes in the
# future.
if (self._host.has_min_version(hv_type=host.HV_DRIVER_QEMU)
and source_type not in ('lvm')
and not CONF.ephemeral_storage_encryption.enabled
and not CONF.workarounds.disable_libvirt_livesnapshot):
live_snapshot = True
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended. This operation also
# confirms the running instance, as opposed to the system as a
# whole, has a new enough version of the hypervisor (bug 1193146).
try:
guest.get_block_device(disk_path).abort_job()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
live_snapshot = False
else:
pass
else:
live_snapshot = False
# NOTE(rmk): We cannot perform live snapshots when a managedSave
# file is present, so we will use the cold/legacy method
# for instances which are shutdown.
if state == power_state.SHUTDOWN:
live_snapshot = False
self._prepare_domain_for_snapshot(context, live_snapshot, state,
instance)
snapshot_backend = self.image_backend.snapshot(instance,
disk_path,
image_type=source_type)
if live_snapshot:
LOG.info(_LI("Beginning live snapshot process"),
instance=instance)
else:
LOG.info(_LI("Beginning cold snapshot process"),
instance=instance)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
try:
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
metadata['location'] = snapshot_backend.direct_snapshot(
context, snapshot_name, image_format, image_id,
instance.image_ref)
self._snapshot_domain(context, live_snapshot, virt_dom, state,
instance)
self._image_api.update(context, image_id, metadata,
purge_props=False)
except (NotImplementedError, exception.ImageUnacceptable,
exception.Forbidden) as e:
if type(e) != NotImplementedError:
LOG.warning(_LW('Performing standard snapshot because direct '
'snapshot failed: %(error)s'), {'error': e})
failed_snap = metadata.pop('location', None)
if failed_snap:
failed_snap = {'url': str(failed_snap)}
snapshot_backend.cleanup_direct_snapshot(failed_snap,
also_destroy_volume=True,
ignore_errors=True)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD,
expected_state=task_states.IMAGE_UPLOADING)
# TODO(nic): possibly abstract this out to the snapshot_backend
if source_type == 'rbd' and live_snapshot:
# Standard snapshot uses qemu-img convert from RBD which is
# not safe to run with live_snapshot.
live_snapshot = False
# Suspend the guest, so this is no longer a live snapshot
self._prepare_domain_for_snapshot(context, live_snapshot,
state, instance)
snapshot_directory = CONF.libvirt.snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
out_path = os.path.join(tmpdir, snapshot_name)
if live_snapshot:
# NOTE(xqueralt): libvirt needs o+x in the tempdir
os.chmod(tmpdir, 0o701)
self._live_snapshot(context, instance, guest,
disk_path, out_path, source_format,
image_format, instance.image_meta)
else:
snapshot_backend.snapshot_extract(out_path,
image_format)
finally:
self._snapshot_domain(context, live_snapshot, virt_dom,
state, instance)
LOG.info(_LI("Snapshot extracted, beginning image upload"),
instance=instance)
# Upload that image to the image service
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
with libvirt_utils.file_open(out_path) as image_file:
self._image_api.update(context,
image_id,
metadata,
image_file)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to snapshot image"))
failed_snap = metadata.pop('location', None)
if failed_snap:
failed_snap = {'url': str(failed_snap)}
snapshot_backend.cleanup_direct_snapshot(
failed_snap, also_destroy_volume=True,
ignore_errors=True)
LOG.info(_LI("Snapshot image upload complete"), instance=instance)
def _prepare_domain_for_snapshot(self, context, live_snapshot, state,
instance):
# NOTE(dkang): managedSave does not work for LXC
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING or state == power_state.PAUSED:
self.suspend(context, instance)
def _snapshot_domain(self, context, live_snapshot, virt_dom, state,
instance):
guest = None
# NOTE(dkang): because previous managedSave is not called
# for LXC, _create_domain must not be called.
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING:
guest = self._create_domain(domain=virt_dom)
elif state == power_state.PAUSED:
guest = self._create_domain(domain=virt_dom, pause=True)
if guest is not None:
self._attach_pci_devices(
guest, pci_manager.get_instance_pci_devs(instance))
self._attach_sriov_ports(context, instance, guest)
def _can_set_admin_password(self, image_meta):
if (CONF.libvirt.virt_type not in ('kvm', 'qemu') or
not self._host.has_min_version(MIN_LIBVIRT_SET_ADMIN_PASSWD)):
raise exception.SetAdminPasswdNotSupported()
hw_qga = image_meta.properties.get('hw_qemu_guest_agent', '')
if not strutils.bool_from_string(hw_qga):
raise exception.QemuGuestAgentNotEnabled()
def set_admin_password(self, instance, new_pass):
self._can_set_admin_password(instance.image_meta)
guest = self._host.get_guest(instance)
user = instance.image_meta.properties.get("os_admin_user")
if not user:
if instance.os_type == "windows":
user = "Administrator"
else:
user = "root"
try:
guest.set_user_password(user, new_pass)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
msg = (_('Error from libvirt while set password for username '
'"%(user)s": [Error Code %(error_code)s] %(ex)s')
% {'user': user, 'error_code': error_code, 'ex': ex})
raise exception.NovaException(msg)
def _can_quiesce(self, instance, image_meta):
if (CONF.libvirt.virt_type not in ('kvm', 'qemu') or
not self._host.has_min_version(MIN_LIBVIRT_FSFREEZE_VERSION)):
raise exception.InstanceQuiesceNotSupported(
instance_id=instance.uuid)
if not image_meta.properties.get('hw_qemu_guest_agent', False):
raise exception.QemuGuestAgentNotEnabled()
def _set_quiesced(self, context, instance, image_meta, quiesced):
self._can_quiesce(instance, image_meta)
try:
guest = self._host.get_guest(instance)
if quiesced:
guest.freeze_filesystems()
else:
guest.thaw_filesystems()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
msg = (_('Error from libvirt while quiescing %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s')
% {'instance_name': instance.name,
'error_code': error_code, 'ex': ex})
raise exception.NovaException(msg)
def quiesce(self, context, instance, image_meta):
self._set_quiesced(context, instance, image_meta, True)
def unquiesce(self, context, instance, image_meta):
self._set_quiesced(context, instance, image_meta, False)
def _live_snapshot(self, context, instance, guest, disk_path, out_path,
source_format, image_format, image_meta):
dev = guest.get_block_device(disk_path)
# Save a copy of the domain's persistent XML file
xml = guest.get_xml_desc(dump_inactive=True, dump_sensitive=True)
try:
dev.abort_job()
except Exception:
pass
src_disk_size = libvirt_utils.get_disk_size(disk_path,
format=source_format)
src_back_path = libvirt_utils.get_disk_backing_file(disk_path,
format=source_format,
basename=False)
disk_delta = out_path + '.delta'
libvirt_utils.create_cow_image(src_back_path, disk_delta,
src_disk_size)
quiesced = False
try:
self._set_quiesced(context, instance, image_meta, True)
quiesced = True
except exception.NovaException as err:
if image_meta.properties.get('os_require_quiesce', False):
raise
LOG.info(_LI('Skipping quiescing instance: %(reason)s.'),
{'reason': err}, instance=instance)
try:
if guest.has_persistent_configuration():
guest.delete_configuration()
dev.rebase(disk_delta, copy=True, reuse_ext=True, shallow=True)
while dev.wait_for_job():
time.sleep(0.5)
dev.abort_job()
libvirt_utils.chown(disk_delta, os.getuid())
finally:
self._host.write_instance_config(xml)
if quiesced:
self._set_quiesced(context, instance, image_meta, False)
libvirt_utils.extract_snapshot(disk_delta, 'qcow2',
out_path, image_format)
def cdrom_list(self, context, instance):
cdroms = []
guest = self._host.get_guest(instance)
domain = guest._domain
xml = domain.XMLDesc(0)
xml_doc = etree.fromstring(xml)
disks = xml_doc.findall('devices/disk')
for disk in disks:
if disk.get('device') == 'cdrom':
source = disk.find('source')
target = disk.find('target')
if target is not None:
cdrom ={}
device = target.get('dev')
cdrom['device_name'] = device
if source is not None:
disk_path = source.get('file')
image_id = os.path.basename(disk_path)
else:
image_id = ''
cdrom['image_id'] = image_id
cdroms.append(cdrom)
return cdroms
def attach_cdrom(self, context, instance, device, image_id):
cdrom_device={}
guest = self._host.get_guest(instance)
domain = guest._domain
cdrom_config = self._get_guest_cdrom_config(context, instance, image_id, device)
is_updated = False
is_active = domain.isActive()
if is_active == 1:
domain.updateDeviceFlags(cdrom_config.to_xml(),
libvirt.VIR_DOMAIN_AFFECT_LIVE | libvirt.VIR_DOMAIN_AFFECT_CONFIG)
is_updated = True
elif is_active == 0:
domain.updateDeviceFlags(cdrom_config.to_xml(), libvirt.VIR_DOMAIN_AFFECT_CONFIG)
is_updated = True
if is_updated:
instance_dir = libvirt_utils.get_instance_path(instance)
xml_path = os.path.join(instance_dir, 'libvirt.xml')
xml = domain.XMLDesc(0)
libvirt_utils.write_to_file(xml_path, xml)
cdrom_device['device_name'] = device
cdrom_device['image_id'] = image_id
return cdrom_device
def has_cdrom(self,instance, disk_info):
disk_mapping = disk_info['mapping']
cdxml = None
inst_type = objects.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
if 'disk' in disk_mapping:
disk = disk_mapping['disk']
if disk['type'] == 'cdrom':
image = self.image_backend.image(instance,
'disk',
None)
cdxml = image.libvirt_info(disk['bus'],
disk['dev'],
disk['type'],
self.disk_cachemode,
inst_type['extra_specs'],
self._get_hypervisor_version())
if 'disk.local' in disk_mapping:
disklocal = disk_mapping['disk.local']
if disklocal['type'] == 'cdrom':
image = self.image_backend.image(instance,
'disk.local',
None)
cdxml = image.libvirt_info(disklocal['bus'],
disklocal['dev'],
disklocal['type'],
self.disk_cachemode,
inst_type['extra_specs'],
self._get_hypervisor_version())
return cdxml
def _get_guest_cdrom_config(self, context, instance, image_id,
device, enable_cache_image=True):
cdrom_config = vconfig.LibvirtConfigGuestDisk()
cdrom_config.source_type = 'file'
cdrom_config.source_device = 'cdrom'
cdrom_config.target_bus = 'ide'
if device:
cdrom_config.target_dev = device
cdrom_config.readonly = True
cdrom_config.driver_name = 'qemu'
cdrom_config.driver_format = 'raw'
if image_id != '0':
fake_image_id = imagecache.get_cache_fname(image_id)
if enable_cache_image:
imagecache.cache_image(libvirt_utils.fetch_image,
fake_image_id,
context=context,
image_id=image_id)
base_url = self.image_cache_manager._get_base()
image_url = os.path.join(base_url, fake_image_id)
else:
image_url = ''
cdrom_config.source_path = image_url
return cdrom_config
def dev_snapshot_create(self, context, instance, name):
guest = self._host.get_guest(instance)
domain = guest._domain
xml = domain.XMLDesc(0)
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
disks_to_snap = []
network_disks_to_snap = []
disks_to_skip = []
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None):
continue
disk_info = {
'dev': guest_disk.target_dev,
'serial': guest_disk.serial,
'current_file': guest_disk.source_path,
'source_protocol': guest_disk.source_protocol,
'source_name': guest_disk.source_name,
'source_hosts': guest_disk.source_hosts,
'source_ports': guest_disk.source_ports
}
if guest_disk.target_dev == 'vda':
xwl_target_dev='vda'
disks_to_snap.append(guest_disk.source_path)
if guest_disk.target_dev == 'hda':
xwl_target_dev='hda'
disks_to_snap.append(guest_disk.source_path)
if not disks_to_snap :
msg = _('Found no disk to snapshot.')
raise exception.NovaException(msg)
snapshot = vconfig.LibvirtConfigGuestSnapshot()
if name:
uname = repr(name)
snapshot_name =unicode(uname, 'unicode-escape')
else:
snapshot_name = None
if xwl_target_dev == 'hda':
for current_name in disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
if snapshot_name:
snapshot.name = snapshot_name
snap_disk.name = 'hda'
snap_disk.snapshot = 'internal'
snap_disk.driver_name = 'qcow2'
snapshot.add_disk(snap_disk)
for dev in disks_to_skip:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = dev
snap_disk.snapshot = 'no'
snapshot.add_disk(snap_disk)
else:
for current_name in disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
if snapshot_name:
snapshot.name = snapshot_name
snap_disk.name = 'vda'
snap_disk.snapshot = 'internal'
snap_disk.driver_name = 'qcow2'
snapshot.add_disk(snap_disk)
for dev in disks_to_skip:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = dev
snap_disk.snapshot = 'no'
snapshot.add_disk(snap_disk)
snapshot_xml = snapshot.to_xml()
snap_flags = 0
try:
guest._domain.snapshotCreateXML(snapshot_xml, snap_flags)
return
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create quiesced dev_snapshot, '
'attempting again with quiescing disabled.'))
try:
guest._domain.snapshotCreateXML(snapshot_xml, snap_flags )
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create dev_snapshot, '
'failing dev_snapshot operation.'))
raise
def dev_snapshot_list(self, context, instance):
snaps = []
try:
guest = self._host.get_guest(instance)
snapshotlist=guest._domain.listAllSnapshots(0)
except exception.InstanceNotFound:
return snaps
for snapshot in snapshotlist:
Desc = snapshot.getName()
try:
Desctime = time.strftime("%y-%m-%d %H:%M:%S", time.localtime(string.atof(Desc)))
name = {}
name['dev_snapshot_name'] = Desctime
except:
name = {}
Desctime = Desc[2:-1]
name['dev_snapshot_name'] = Desctime
snaps.append(name)
return snaps
def dev_snapshot_delete(self, context, instance, name):
try:
guest = self._host.get_guest(instance)
timeName = time.mktime(time.strptime(name, "%y-%m-%d %H:%M:%S"))
tem='%.0f' % timeName
snapshot = guest._domain.snapshotLookupByName(tem,0)
snapshot.delete(0)
except:
stringName = repr(name)
unicodeName = unicode(stringName,'unicode-escape')
tem =unicodeName.encode('utf8')
snapshot = guest._domain.snapshotLookupByName(tem,0)
snapshot.delete(0)
def dev_snapshot_revert(self, context, instance, name):
try:
guest = self._host.get_guest(instance)
timeName = time.mktime(time.strptime(name, "%y-%m-%d %H:%M:%S"))
tem='%.0f' % timeName
snapshot = guest._domain.snapshotLookupByName(tem,0)
guest._domain.revertToSnapshot(snapshot,0)
except:
stringName = repr(name)
unicodeName = unicode(stringName,'unicode-escape')
tem =unicodeName.encode('utf8')
snapshot = guest._domain.snapshotLookupByName(tem,0)
guest._domain.revertToSnapshot(snapshot,0)
def _volume_snapshot_update_status(self, context, snapshot_id, status):
try:
self._volume_api.update_snapshot_status(context,
snapshot_id,
status)
except Exception:
LOG.exception(_LE('Failed to send updated snapshot status '
'to volume service.'))
def _volume_snapshot_create(self, context, instance, guest,
volume_id, new_file):
xml = guest.get_xml_desc()
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
disks_to_snap = []
network_disks_to_snap = []
disks_to_skip = []
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None):
continue
if (guest_disk.serial is None or guest_disk.serial != volume_id):
disks_to_skip.append(guest_disk.target_dev)
continue
disk_info = {
'dev': guest_disk.target_dev,
'serial': guest_disk.serial,
'current_file': guest_disk.source_path,
'source_protocol': guest_disk.source_protocol,
'source_name': guest_disk.source_name,
'source_hosts': guest_disk.source_hosts,
'source_ports': guest_disk.source_ports
}
if disk_info['current_file'] is not None:
current_file = disk_info['current_file']
new_file_path = os.path.join(os.path.dirname(current_file),
new_file)
disks_to_snap.append((current_file, new_file_path))
elif disk_info['source_protocol'] in ('gluster', 'netfs'):
network_disks_to_snap.append((disk_info, new_file))
if not disks_to_snap and not network_disks_to_snap:
msg = _('Found no disk to snapshot.')
raise exception.NovaException(msg)
snapshot = vconfig.LibvirtConfigGuestSnapshot()
for current_name, new_filename in disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = current_name
snap_disk.source_path = new_filename
snap_disk.source_type = 'file'
snap_disk.snapshot = 'external'
snap_disk.driver_name = 'qcow2'
snapshot.add_disk(snap_disk)
for disk_info, new_filename in network_disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = disk_info['dev']
snap_disk.source_type = 'network'
snap_disk.source_protocol = disk_info['source_protocol']
snap_disk.snapshot = 'external'
snap_disk.source_path = new_filename
old_dir = disk_info['source_name'].split('/')[0]
snap_disk.source_name = '%s/%s' % (old_dir, new_filename)
snap_disk.source_hosts = disk_info['source_hosts']
snap_disk.source_ports = disk_info['source_ports']
snapshot.add_disk(snap_disk)
for dev in disks_to_skip:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = dev
snap_disk.snapshot = 'no'
snapshot.add_disk(snap_disk)
snapshot_xml = snapshot.to_xml()
LOG.debug("snap xml: %s", snapshot_xml, instance=instance)
try:
guest.snapshot(snapshot, no_metadata=True, disk_only=True,
reuse_ext=True, quiesce=True)
return
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create quiesced VM snapshot, '
'attempting again with quiescing disabled.'),
instance=instance)
try:
guest.snapshot(snapshot, no_metadata=True, disk_only=True,
reuse_ext=True, quiesce=False)
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create VM snapshot, '
'failing volume_snapshot operation.'),
instance=instance)
raise
def _volume_refresh_connection_info(self, context, instance, volume_id):
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, volume_id, instance.uuid)
driver_bdm = driver_block_device.convert_volume(bdm)
if driver_bdm:
driver_bdm.refresh_connection_info(context, instance,
self._volume_api, self)
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
LOG.debug("volume_snapshot_create: create_info: %(c_info)s",
{'c_info': create_info}, instance=instance)
try:
guest = self._host.get_guest(instance)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
if create_info['type'] != 'qcow2':
raise exception.NovaException(_('Unknown type: %s') %
create_info['type'])
snapshot_id = create_info.get('snapshot_id', None)
if snapshot_id is None:
raise exception.NovaException(_('snapshot_id required '
'in create_info'))
try:
self._volume_snapshot_create(context, instance, guest,
volume_id, create_info['new_file'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error occurred during '
'volume_snapshot_create, '
'sending error status to Cinder.'),
instance=instance)
self._volume_snapshot_update_status(
context, snapshot_id, 'error')
self._volume_snapshot_update_status(
context, snapshot_id, 'creating')
def _wait_for_snapshot():
snapshot = self._volume_api.get_snapshot(context, snapshot_id)
if snapshot.get('status') != 'creating':
self._volume_refresh_connection_info(context, instance,
volume_id)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_snapshot)
timer.start(interval=0.5).wait()
@staticmethod
def _rebase_with_qemu_img(guest, device, active_disk_object,
rebase_base):
# every protocol. So let's be safe.
active_protocol = active_disk_object.source_protocol
if active_protocol is not None:
msg = _("Something went wrong when deleting a volume snapshot: "
"rebasing a %(protocol)s network disk using qemu-img "
"has not been fully tested") % {'protocol':
active_protocol}
LOG.error(msg)
raise exception.NovaException(msg)
if rebase_base is None:
backing_file = ""
qemu_img_extra_arg = []
else:
backing_file = rebase_base
b_file_fmt = images.qemu_img_info(backing_file).file_format
qemu_img_extra_arg = ['-F', b_file_fmt]
qemu_img_extra_arg.append(active_disk_object.source_path)
utils.execute("qemu-img", "rebase", "-b", backing_file,
*qemu_img_extra_arg)
def _volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info=None):
LOG.debug('volume_snapshot_delete: delete_info: %s', delete_info,
instance=instance)
if delete_info['type'] != 'qcow2':
msg = _('Unknown delete_info type %s') % delete_info['type']
raise exception.NovaException(msg)
try:
guest = self._host.get_guest(instance)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
my_dev = None
active_disk = None
xml = guest.get_xml_desc()
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
active_disk_object = None
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None or guest_disk.serial is None):
continue
if guest_disk.serial == volume_id:
my_dev = guest_disk.target_dev
active_disk = guest_disk.source_path
active_protocol = guest_disk.source_protocol
active_disk_object = guest_disk
break
if my_dev is None or (active_disk is None and active_protocol is None):
msg = _('Disk with id: %s '
'not found attached to instance.') % volume_id
LOG.debug('Domain XML: %s', xml, instance=instance)
raise exception.NovaException(msg)
LOG.debug("found device at %s", my_dev, instance=instance)
def _get_snap_dev(filename, backing_store):
if filename is None:
msg = _('filename cannot be None')
raise exception.NovaException(msg)
LOG.debug("XML: %s", xml)
LOG.debug("active disk object: %s", active_disk_object)
filename_to_merge = filename
matched_name = None
b = backing_store
index = None
current_filename = active_disk_object.source_name.split('/')[1]
if current_filename == filename_to_merge:
return my_dev + '[0]'
while b is not None:
source_filename = b.source_name.split('/')[1]
if source_filename == filename_to_merge:
LOG.debug('found match: %s', b.source_name)
matched_name = b.source_name
index = b.index
break
b = b.backing_store
if matched_name is None:
msg = _('no match found for %s') % (filename_to_merge)
raise exception.NovaException(msg)
LOG.debug('index of match (%s) is %s', b.source_name, index)
my_snap_dev = '%s[%s]' % (my_dev, index)
return my_snap_dev
if delete_info['merge_target_file'] is None:
rebase_disk = my_dev
rebase_base = delete_info['file_to_merge']
if (active_protocol is not None) and (rebase_base is not None):
rebase_base = _get_snap_dev(rebase_base,
active_disk_object.backing_store)
try:
libvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE
relative = rebase_base is not None
except AttributeError:
LOG.warning(_LW(
"Relative blockrebase support was not detected. "
"Continuing with old behaviour."))
relative = False
LOG.debug(
'disk: %(disk)s, base: %(base)s, '
'bw: %(bw)s, relative: %(relative)s',
{'disk': rebase_disk,
'base': rebase_base,
'bw': libvirt_guest.BlockDevice.REBASE_DEFAULT_BANDWIDTH,
'relative': str(relative)}, instance=instance)
dev = guest.get_block_device(rebase_disk)
if guest.is_active():
result = dev.rebase(rebase_base, relative=relative)
if result == 0:
LOG.debug('blockRebase started successfully',
instance=instance)
while dev.wait_for_job(abort_on_error=True):
LOG.debug('waiting for blockRebase job completion',
instance=instance)
time.sleep(0.5)
# In that case, let's ask qemu-img to rebase the disk.
else:
LOG.debug('Guest is not running so doing a block rebase '
'using "qemu-img rebase"', instance=instance)
self._rebase_with_qemu_img(guest, dev, active_disk_object,
rebase_base)
else:
my_snap_base = None
my_snap_top = None
commit_disk = my_dev
try:
libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
except AttributeError:
ver = '.'.join(
[str(x) for x in
MIN_LIBVIRT_BLOCKJOB_RELATIVE_VERSION])
msg = _("Relative blockcommit support was not detected. "
"Libvirt '%s' or later is required for online "
"deletion of file/network storage-backed volume "
"snapshots.") % ver
raise exception.Invalid(msg)
if active_protocol is not None:
my_snap_base = _get_snap_dev(delete_info['merge_target_file'],
active_disk_object.backing_store)
my_snap_top = _get_snap_dev(delete_info['file_to_merge'],
active_disk_object.backing_store)
commit_base = my_snap_base or delete_info['merge_target_file']
commit_top = my_snap_top or delete_info['file_to_merge']
LOG.debug('will call blockCommit with commit_disk=%(commit_disk)s '
'commit_base=%(commit_base)s '
'commit_top=%(commit_top)s ',
{'commit_disk': commit_disk,
'commit_base': commit_base,
'commit_top': commit_top}, instance=instance)
dev = guest.get_block_device(commit_disk)
result = dev.commit(commit_base, commit_top, relative=True)
if result == 0:
LOG.debug('blockCommit started successfully',
instance=instance)
while dev.wait_for_job(abort_on_error=True):
LOG.debug('waiting for blockCommit job completion',
instance=instance)
time.sleep(0.5)
def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id,
delete_info):
try:
self._volume_snapshot_delete(context, instance, volume_id,
snapshot_id, delete_info=delete_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error occurred during '
'volume_snapshot_delete, '
'sending error status to Cinder.'),
instance=instance)
self._volume_snapshot_update_status(
context, snapshot_id, 'error_deleting')
self._volume_snapshot_update_status(context, snapshot_id, 'deleting')
self._volume_refresh_connection_info(context, instance, volume_id)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
if reboot_type == 'SOFT':
try:
soft_reboot_success = self._soft_reboot(instance)
except libvirt.libvirtError as e:
LOG.debug("Instance soft reboot failed: %s", e,
instance=instance)
soft_reboot_success = False
if soft_reboot_success:
LOG.info(_LI("Instance soft rebooted successfully."),
instance=instance)
return
else:
LOG.warning(_LW("Failed to soft reboot instance. "
"Trying hard reboot."),
instance=instance)
return self._hard_reboot(context, instance, network_info,
block_device_info)
def _soft_reboot(self, instance):
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
old_domid = guest.id
if state == power_state.RUNNING:
guest.shutdown()
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance, 'all'))
for x in range(CONF.libvirt.wait_soft_reboot_seconds):
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
new_domid = guest.id
if old_domid != new_domid:
if state in [power_state.SHUTDOWN,
power_state.CRASHED]:
LOG.info(_LI("Instance shutdown successfully."),
instance=instance)
self._create_domain(domain=guest._domain)
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running, instance)
timer.start(interval=0.5).wait()
return True
else:
LOG.info(_LI("Instance may have been rebooted during soft "
"reboot, so return now."), instance=instance)
return True
greenthread.sleep(1)
return False
def _hard_reboot(self, context, instance, network_info,
block_device_info=None):
self._destroy(instance)
# Domain XML will be redefined so we can safely undefine it
# from libvirt. This ensure that such process as create serial
# console for guest will run smoothly.
self._undefine_domain(instance)
# Convert the system metadata to image metadata
instance_dir = libvirt_utils.get_instance_path(instance)
fileutils.ensure_tree(instance_dir)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
instance.image_meta,
block_device_info)
# NOTE(vish): This could generate the wrong device_format if we are
# using the raw backend and the images don't exist yet.
# regenerate raw backend images, however, so when it
# does we need to (re)generate the xml after the images
# are in place.
xml = self._get_guest_xml(context, instance, network_info, disk_info,
instance.image_meta,
block_device_info=block_device_info,
write_to_disk=True)
if context.auth_token is not None:
# NOTE (rmk): Re-populate any missing backing files.
backing_disk_info = self._get_instance_disk_info(instance.name,
xml,
block_device_info)
self._create_images_and_backing(context, instance, instance_dir,
backing_disk_info)
# Initialize all the necessary networking, block devices and
# start the instance.
self._create_domain_and_network(context, xml, instance, network_info,
disk_info,
block_device_info=block_device_info,
reboot=True,
vifs_already_plugged=True)
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance, 'all'))
def _wait_for_reboot():
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info(_LI("Instance rebooted successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
def pause(self, instance):
self._host.get_guest(instance).pause()
def unpause(self, instance):
self._host.get_guest(instance).resume()
def _clean_shutdown(self, instance, timeout, retry_interval):
# List of states that represent a shutdown instance
SHUTDOWN_STATES = [power_state.SHUTDOWN,
power_state.CRASHED]
try:
guest = self._host.get_guest(instance)
except exception.InstanceNotFound:
# If the instance has gone then we don't need to
return True
state = guest.get_power_state(self._host)
if state in SHUTDOWN_STATES:
LOG.info(_LI("Instance already shutdown."),
instance=instance)
return True
LOG.debug("Shutting down instance from state %s", state,
instance=instance)
guest.shutdown()
retry_countdown = retry_interval
for sec in six.moves.range(timeout):
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
if state in SHUTDOWN_STATES:
LOG.info(_LI("Instance shutdown successfully after %d "
"seconds."), sec, instance=instance)
return True
# any previous shutdown signal (for example it may
# have still been startingup, so within the overall
# timeout we re-trigger the shutdown every
# retry_interval
if retry_countdown == 0:
retry_countdown = retry_interval
# Instance could shutdown at any time, in which case we
# will get an exception when we call shutdown
try:
LOG.debug("Instance in state %s after %d seconds - "
"resending shutdown", state, sec,
instance=instance)
guest.shutdown()
except libvirt.libvirtError:
# Assume this is because its now shutdown, so loop
# one more time to clean up.
LOG.debug("Ignoring libvirt exception from shutdown "
"request.", instance=instance)
continue
else:
retry_countdown -= 1
time.sleep(1)
LOG.info(_LI("Instance failed to shutdown in %d seconds."),
timeout, instance=instance)
return False
def power_off(self, instance, timeout=0, retry_interval=0):
if timeout:
self._clean_shutdown(instance, timeout, retry_interval)
self._destroy(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
# We use _hard_reboot here to ensure that all backing files,
# network, and block device connections, etc. are established
# and available before we attempt to start the instance.
self._hard_reboot(context, instance, network_info, block_device_info)
def trigger_crash_dump(self, instance):
try:
self._host.get_guest(instance).inject_nmi()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
raise exception.TriggerCrashDumpNotSupported()
elif error_code == libvirt.VIR_ERR_OPERATION_INVALID:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
LOG.exception(_LE('Error from libvirt while injecting an NMI to '
'%(instance_uuid)s: '
'[Error Code %(error_code)s] %(ex)s'),
{'instance_uuid': instance.uuid,
'error_code': error_code, 'ex': ex})
raise
def suspend(self, context, instance):
guest = self._host.get_guest(instance)
self._detach_pci_devices(guest,
pci_manager.get_instance_pci_devs(instance))
self._detach_sriov_ports(context, instance, guest)
guest.save_memory_state()
def resume(self, context, instance, network_info, block_device_info=None):
disk_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance, instance.image_meta,
block_device_info=block_device_info)
xml = self._get_existing_domain_xml(instance, network_info,
block_device_info)
guest = self._create_domain_and_network(context, xml, instance,
network_info, disk_info,
block_device_info=block_device_info,
vifs_already_plugged=True)
self._attach_pci_devices(guest,
pci_manager.get_instance_pci_devs(instance))
self._attach_sriov_ports(context, instance, guest, network_info)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
# Check if the instance is running already and avoid doing
# anything if it is.
try:
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
if state in ignored_states:
return
except exception.NovaException:
pass
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self._hard_reboot(context, instance, network_info, block_device_info)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_existing_domain_xml(instance, network_info)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
rescue_image_id = None
if image_meta.obj_attr_is_set("id"):
rescue_image_id = image_meta.id
rescue_images = {
'image_id': (rescue_image_id or
CONF.libvirt.rescue_image_id or instance.image_ref),
'kernel_id': (CONF.libvirt.rescue_kernel_id or
instance.kernel_id),
'ramdisk_id': (CONF.libvirt.rescue_ramdisk_id or
instance.ramdisk_id),
}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
rescue=True)
gen_confdrive = functools.partial(self._create_configdrive,
context, instance,
admin_pass=rescue_password,
network_info=network_info,
suffix='.rescue')
self._create_image(context, instance, disk_info['mapping'],
suffix='.rescue', disk_images=rescue_images,
network_info=network_info,
admin_pass=rescue_password)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta, rescue=rescue_images,
write_to_disk=True)
self._destroy(instance)
self._create_domain(xml, post_xml_callback=gen_confdrive)
def unrescue(self, instance, network_info):
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
xml_path = os.path.join(instance_dir, 'libvirt.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
libvirt_utils.write_to_file(xml_path, xml)
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove virt_dom at the end.
virt_dom = guest._domain
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
rescue_files = os.path.join(instance_dir, "*.rescue")
for rescue_file in glob.iglob(rescue_files):
if os.path.isdir(rescue_file):
shutil.rmtree(rescue_file)
else:
libvirt_utils.file_delete(rescue_file)
# cleanup rescue volume
lvm.remove_volumes([lvmdisk for lvmdisk in self._lvm_disks(instance)
if lvmdisk.endswith('.rescue')])
if CONF.libvirt.images_type == 'rbd':
filter_fn = lambda disk: (disk.startswith(instance.uuid) and
disk.endswith('.rescue'))
LibvirtDriver._get_rbd_driver().cleanup_volumes(filter_fn)
# def poll_rebooting_instances(self, timeout, instances):
# pass
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
block_device_info)
gen_confdrive = functools.partial(self._create_configdrive,
context, instance,
admin_pass=admin_password,
files=injected_files,
network_info=network_info)
self._create_image(context, instance,
disk_info['mapping'],
network_info=network_info,
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
# Required by Quobyte CI
self._ensure_console_log_for_instance(instance)
xml = self._get_guest_xml(context, instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(
context, xml, instance, network_info, disk_info,
block_device_info=block_device_info,
post_xml_callback=gen_confdrive)
LOG.debug("Instance is running", instance=instance)
def _wait_for_boot():
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info(_LI("Instance spawned successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()
def _flush_libvirt_console(self, pty):
out, err = utils.execute('dd',
'if=%s' % pty,
'iflag=nonblock',
run_as_root=True,
check_exit_code=False)
return out
def _append_to_file(self, data, fpath):
LOG.info(_LI('data: %(data)r, fpath: %(fpath)r'),
{'data': data, 'fpath': fpath})
with open(fpath, 'a+') as fp:
fp.write(data)
return fpath
def get_console_output(self, context, instance):
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
tree = etree.fromstring(xml)
console_types = {}
# NOTE(comstud): We want to try 'file' types first, then try 'pty'
# types. We can't use Python 2.7 syntax of:
console_nodes = tree.findall('./devices/console')
for console_node in console_nodes:
console_type = console_node.get('type')
console_types.setdefault(console_type, [])
console_types[console_type].append(console_node)
if console_types.get('file'):
for file_console in console_types.get('file'):
source_node = file_console.find('./source')
if source_node is None:
continue
path = source_node.get("path")
if not path:
continue
if not os.path.exists(path):
LOG.info(_LI('Instance is configured with a file console, '
'but the backing file is not (yet?) present'),
instance=instance)
return ""
libvirt_utils.chown(path, os.getuid())
with libvirt_utils.file_open(path, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp,
MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_LI('Truncated console log returned, '
'%d bytes ignored'), remaining,
instance=instance)
return log_data
if console_types.get('pty'):
for pty_console in console_types.get('pty'):
source_node = pty_console.find('./source')
if source_node is None:
continue
pty = source_node.get("path")
if not pty:
continue
break
else:
raise exception.ConsoleNotAvailable()
console_log = self._get_console_log_path(instance)
if os.path.exists(console_log):
libvirt_utils.chown(console_log, os.getuid())
data = self._flush_libvirt_console(pty)
fpath = self._append_to_file(data, console_log)
with libvirt_utils.file_open(fpath, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_LI('Truncated console log returned, '
'%d bytes ignored'),
remaining, instance=instance)
return log_data
def get_host_ip_addr(self):
ips = compute_utils.get_machine_ips()
if CONF.my_ip not in ips:
LOG.warning(_LW('my_ip address (%(my_ip)s) was not found on '
'any of the interfaces: %(ifaces)s'),
{'my_ip': CONF.my_ip, 'ifaces': ", ".join(ips)})
return CONF.my_ip
def get_vnc_console(self, context, instance):
def get_vnc_port_for_instance(instance_name):
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
xml_dom = etree.fromstring(xml)
graphic = xml_dom.find("./devices/graphics[@type='vnc']")
if graphic is not None:
return graphic.get('port')
raise exception.ConsoleTypeUnavailable(console_type='vnc')
port = get_vnc_port_for_instance(instance.name)
host = CONF.vnc.vncserver_proxyclient_address
return ctype.ConsoleVNC(host=host, port=port)
def get_spice_console(self, context, instance):
def get_spice_ports_for_instance(instance_name):
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
xml_dom = etree.fromstring(xml)
graphic = xml_dom.find("./devices/graphics[@type='spice']")
if graphic is not None:
return (graphic.get('port'), graphic.get('tlsPort'))
raise exception.ConsoleTypeUnavailable(console_type='spice')
ports = get_spice_ports_for_instance(instance.name)
host = CONF.spice.server_proxyclient_address
return ctype.ConsoleSpice(host=host, port=ports[0], tlsPort=ports[1])
def get_serial_console(self, context, instance):
guest = self._host.get_guest(instance)
for hostname, port in self._get_serial_ports_from_guest(
guest, mode='bind'):
return ctype.ConsoleSerial(host=hostname, port=port)
raise exception.ConsoleTypeUnavailable(console_type='serial')
@staticmethod
def _supports_direct_io(dirpath):
if not hasattr(os, 'O_DIRECT'):
LOG.debug("This python runtime does not support direct I/O")
return False
testfile = os.path.join(dirpath, ".directio.test")
hasDirectIO = True
fd = None
try:
fd = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
align_size = 512
m = mmap.mmap(-1, align_size)
m.write(r"x" * align_size)
os.write(fd, m)
LOG.debug("Path '%(path)s' supports direct I/O",
{'path': dirpath})
except OSError as e:
if e.errno == errno.EINVAL:
LOG.debug("Path '%(path)s' does not support direct I/O: "
"'%(ex)s'", {'path': dirpath, 'ex': e})
hasDirectIO = False
else:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error on '%(path)s' while checking "
"direct I/O: '%(ex)s'"),
{'path': dirpath, 'ex': e})
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error on '%(path)s' while checking direct I/O: "
"'%(ex)s'"), {'path': dirpath, 'ex': e})
finally:
if fd is not None:
os.close(fd)
try:
os.unlink(testfile)
except Exception:
pass
return hasDirectIO
@staticmethod
def _create_ephemeral(target, ephemeral_size,
fs_label, os_type, is_block_dev=False,
context=None, specified_fs=None):
if not is_block_dev:
libvirt_utils.create_image('raw', target, '%dG' % ephemeral_size)
disk_api.mkfs(os_type, fs_label, target, run_as_root=is_block_dev,
specified_fs=specified_fs)
@staticmethod
def _create_swap(target, swap_mb, context=None):
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
utils.mkfs('swap', target)
@staticmethod
def _get_console_log_path(instance):
return os.path.join(libvirt_utils.get_instance_path(instance),
'console.log')
def _ensure_console_log_for_instance(self, instance):
# Consequently when the domain starts it is unable to write to its
# console.log. See bug https://bugs.launchpad.net/nova/+bug/1597644
#
# To work around this, we create the file manually before starting
# the domain so it has the same ownership as Nova. This works
# for Quobyte CI because it is also configured to run qemu as the same
# user as the Nova service. Installations which don't set
console_file = self._get_console_log_path(instance)
LOG.debug('Ensure instance console log exists: %s', console_file,
instance=instance)
libvirt_utils.file_open(console_file, 'a').close()
@staticmethod
def _get_disk_config_path(instance, suffix=''):
return os.path.join(libvirt_utils.get_instance_path(instance),
'disk.config' + suffix)
@staticmethod
def _get_disk_config_image_type():
return 'rbd' if CONF.libvirt.images_type == 'rbd' else 'raw'
@staticmethod
def _is_booted_from_volume(instance, disk_mapping):
return ((not bool(instance.get('image_ref')))
or 'disk' not in disk_mapping)
@staticmethod
def _has_local_disk(instance, disk_mapping):
if disk_mapping:
if ('disk.local' in disk_mapping or
'disk.swap' in disk_mapping or
'disk.config' in disk_mapping):
return True
return False
def _inject_data(self, injection_image, instance, network_info,
admin_pass, files):
target_partition = None
if not instance.kernel_id:
target_partition = CONF.libvirt.inject_partition
if target_partition == 0:
target_partition = None
if CONF.libvirt.virt_type == 'lxc':
target_partition = None
if CONF.libvirt.inject_key and instance.get('key_data'):
key = str(instance.key_data)
else:
key = None
if not CONF.libvirt.inject_password:
admin_pass = None
net = netutils.get_injected_network_template(
network_info, libvirt_virt_type=CONF.libvirt.virt_type)
metadata = instance.get('metadata')
if any((key, net, metadata, admin_pass, files)):
img_id = instance.image_ref
try:
disk_api.inject_data(injection_image.get_model(self._conn),
key, net, metadata, admin_pass, files,
partition=target_partition,
mandatory=('files',))
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error injecting data into image '
'%(img_id)s (%(e)s)'),
{'img_id': img_id, 'e': e},
instance=instance)
# think that it will be reused (ie: (live)-migration/resize)
def _create_image(self, context, instance,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, files=None,
admin_pass=None, inject_files=True,
fallback_from_host=None):
booted_from_volume = self._is_booted_from_volume(
instance, disk_mapping)
def image(fname, image_type=CONF.libvirt.images_type):
return self.image_backend.image(instance,
fname + suffix, image_type)
def raw(fname):
return image(fname, image_type='raw')
# ensure directories exist and are writable
fileutils.ensure_tree(libvirt_utils.get_instance_path(instance))
LOG.info(_LI('Creating image'), instance=instance)
if not disk_images:
disk_images = {'image_id': instance.image_ref,
'kernel_id': instance.kernel_id,
'ramdisk_id': instance.ramdisk_id}
if disk_images['kernel_id']:
fname = imagecache.get_cache_fname(disk_images['kernel_id'])
raw('kernel').cache(fetch_func=libvirt_utils.fetch_raw_image,
context=context,
filename=fname,
image_id=disk_images['kernel_id'])
if disk_images['ramdisk_id']:
fname = imagecache.get_cache_fname(disk_images['ramdisk_id'])
raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_raw_image,
context=context,
filename=fname,
image_id=disk_images['ramdisk_id'])
inst_type = instance.get_flavor()
if CONF.libvirt.virt_type == 'uml':
libvirt_utils.chown(image('disk').path, 'root')
self._create_and_inject_local_root(context, instance,
booted_from_volume, suffix, disk_images,
network_info, admin_pass, files, inject_files,
fallback_from_host)
# Lookup the filesystem type if required
os_type_with_default = disk_api.get_fs_type_for_os_type(
instance.os_type)
# Generate a file extension based on the file system
# type and the mkfs commands configured if any
file_extension = disk_api.get_file_extension_for_os_type(
os_type_with_default)
ephemeral_gb = instance.flavor.ephemeral_gb
if 'disk.local' in disk_mapping:
disk_image = image('disk.local')
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance.os_type,
is_block_dev=disk_image.is_block_dev)
fname = "ephemeral_%s_%s" % (ephemeral_gb, file_extension)
size = ephemeral_gb * units.Gi
disk_image.cache(fetch_func=fn,
context=context,
filename=fname,
size=size,
ephemeral_size=ephemeral_gb)
for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
disk_image = image(blockinfo.get_eph_disk(idx))
specified_fs = eph.get('guest_format')
if specified_fs and not self.is_supported_fs_format(specified_fs):
msg = _("%s format is not supported") % specified_fs
raise exception.InvalidBDMFormat(details=msg)
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral%d' % idx,
os_type=instance.os_type,
is_block_dev=disk_image.is_block_dev)
size = eph['size'] * units.Gi
fname = "ephemeral_%s_%s" % (eph['size'], file_extension)
disk_image.cache(fetch_func=fn,
context=context,
filename=fname,
size=size,
ephemeral_size=eph['size'],
specified_fs=specified_fs)
if 'disk.swap' in disk_mapping:
mapping = disk_mapping['disk.swap']
swap_mb = 0
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
mapping['dev'], block_device_info)):
swap_mb = inst_type['swap']
if swap_mb > 0:
size = swap_mb * units.Mi
image('disk.swap').cache(fetch_func=self._create_swap,
context=context,
filename="swap_%s" % swap_mb,
size=size,
swap_mb=swap_mb)
def _create_and_inject_local_root(self, context, instance,
booted_from_volume, suffix, disk_images,
network_info, admin_pass, files, inject_files,
fallback_from_host):
# File injection only if needed
need_inject = (not configdrive.required_by(instance) and
inject_files and CONF.libvirt.inject_partition != -2)
# NOTE(ndipanov): Even if disk_mapping was passed in, which
# currently happens only on rescue - we still don't want to
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images['image_id'])
size = instance.flavor.root_gb * units.Gi
if size == 0 or suffix == '.rescue':
size = None
backend = self.image_backend.image(instance, 'disk' + suffix,
CONF.libvirt.images_type)
if instance.task_state == task_states.RESIZE_FINISH:
backend.create_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME)
if backend.SUPPORTS_CLONE:
def clone_fallback_to_fetch(*args, **kwargs):
try:
backend.clone(context, disk_images['image_id'])
except exception.ImageUnacceptable:
libvirt_utils.fetch_image(*args, **kwargs)
fetch_func = clone_fallback_to_fetch
else:
fetch_func = libvirt_utils.fetch_image
self._try_fetch_image_cache(backend, fetch_func, context,
root_fname, disk_images['image_id'],
instance, size, fallback_from_host)
if need_inject:
self._inject_data(backend, instance, network_info, admin_pass,
files)
elif need_inject:
LOG.warning(_LW('File injection into a boot from volume '
'instance is not supported'), instance=instance)
def _create_configdrive(self, context, instance, admin_pass=None,
files=None, network_info=None, suffix=''):
instance.device_metadata = self._build_device_metadata(context,
instance)
config_drive_image = None
if configdrive.required_by(instance):
LOG.info(_LI('Using config drive'), instance=instance)
config_drive_image = self.image_backend.image(
instance, 'disk.config' + suffix,
self._get_disk_config_image_type())
if not config_drive_image.exists():
extra_md = {}
if admin_pass:
extra_md['admin_pass'] = admin_pass
inst_md = instance_metadata.InstanceMetadata(
instance, content=files, extra_md=extra_md,
network_info=network_info, request_context=context)
cdb = configdrive.ConfigDriveBuilder(instance_md=inst_md)
with cdb:
config_drive_local_path = self._get_disk_config_path(
instance, suffix)
LOG.info(_LI('Creating config drive at %(path)s'),
{'path': config_drive_local_path},
instance=instance)
try:
cdb.make_drive(config_drive_local_path)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive failed '
'with error: %s'),
e, instance=instance)
try:
config_drive_image.import_file(
instance, config_drive_local_path,
'disk.config' + suffix)
finally:
# NOTE(mikal): if the config drive was imported into RBD,
# then we no longer need the local copy
if CONF.libvirt.images_type == 'rbd':
os.unlink(config_drive_local_path)
def _prepare_pci_devices_for_use(self, pci_devices):
# kvm , qemu support managed mode
# In managed mode, the configured device will be automatically
# detached from the host OS drivers when the guest is started,
# and then re-attached when the guest shuts down.
if CONF.libvirt.virt_type != 'xen':
# we do manual detach only for xen
return
try:
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._host.device_lookup_by_name(libvirt_dev_addr)
# Note(yjiang5) Spelling for 'dettach' is correct, see
# http://libvirt.org/html/libvirt-libvirt.html.
libvirt_dev.dettach()
# Note(yjiang5): A reset of one PCI device may impact other
# devices on the same bus, thus we need two separated loops
# to detach and then reset it.
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._host.device_lookup_by_name(libvirt_dev_addr)
libvirt_dev.reset()
except libvirt.libvirtError as exc:
raise exception.PciDevicePrepareFailed(id=dev['id'],
instance_uuid=
dev['instance_uuid'],
reason=six.text_type(exc))
def _detach_pci_devices(self, guest, pci_devs):
try:
for dev in pci_devs:
guest.detach_device(self._get_guest_pci_device(dev), live=True)
# after detachDeviceFlags returned, we should check the dom to
# ensure the detaching is finished
xml = guest.get_xml_desc()
xml_doc = etree.fromstring(xml)
guest_config = vconfig.LibvirtConfigGuest()
guest_config.parse_dom(xml_doc)
for hdev in [d for d in guest_config.devices
if isinstance(d, vconfig.LibvirtConfigGuestHostdevPCI)]:
hdbsf = [hdev.domain, hdev.bus, hdev.slot, hdev.function]
dbsf = pci_utils.parse_address(dev.address)
if [int(x, 16) for x in hdbsf] ==\
[int(x, 16) for x in dbsf]:
raise exception.PciDeviceDetachFailed(reason=
"timeout",
dev=dev)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warning(_LW("Instance disappeared while detaching "
"a PCI device from it."))
else:
raise
def _attach_pci_devices(self, guest, pci_devs):
try:
for dev in pci_devs:
guest.attach_device(self._get_guest_pci_device(dev))
except libvirt.libvirtError:
LOG.error(_LE('Attaching PCI devices %(dev)s to %(dom)s failed.'),
{'dev': pci_devs, 'dom': guest.id})
raise
@staticmethod
def _has_sriov_port(network_info):
for vif in network_info:
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
return True
return False
def _attach_sriov_ports(self, context, instance, guest, network_info=None):
if network_info is None:
network_info = instance.info_cache.network_info
if network_info is None:
return
if self._has_sriov_port(network_info):
for vif in network_info:
if vif['vnic_type'] in network_model.VNIC_TYPES_SRIOV:
cfg = self.vif_driver.get_config(instance,
vif,
instance.image_meta,
instance.flavor,
CONF.libvirt.virt_type,
self._host)
LOG.debug('Attaching SR-IOV port %(port)s to %(dom)s',
{'port': vif, 'dom': guest.id},
instance=instance)
guest.attach_device(cfg)
def _detach_sriov_ports(self, context, instance, guest):
network_info = instance.info_cache.network_info
if network_info is None:
return
if self._has_sriov_port(network_info):
# In case of SR-IOV vif types we create pci request per SR-IOV port
# Therefore we can trust that pci_slot value in the vif is correct.
sriov_pci_addresses = [
vif['profile']['pci_slot']
for vif in network_info
if vif['vnic_type'] in network_model.VNIC_TYPES_SRIOV and
vif['profile'].get('pci_slot') is not None
]
# use detach_pci_devices to avoid failure in case of
# multiple guest SRIOV ports with the same MAC
# (protection use-case, ports are on different physical
# interfaces)
pci_devs = pci_manager.get_instance_pci_devs(instance, 'all')
sriov_devs = [pci_dev for pci_dev in pci_devs
if pci_dev.address in sriov_pci_addresses]
self._detach_pci_devices(guest, sriov_devs)
def _set_host_enabled(self, enabled,
disable_reason=DISABLE_REASON_UNDEFINED):
status_name = {True: 'disabled',
False: 'enabled'}
disable_service = not enabled
ctx = nova_context.get_admin_context()
try:
service = objects.Service.get_by_compute_host(ctx, CONF.host)
if service.disabled != disable_service:
# Note(jang): this is a quick fix to stop operator-
# disabled compute hosts from re-enabling themselves
# automatically. We prefix any automatic reason code
# with a fixed string. We only re-enable a host
# automatically if we find that string in place.
# This should probably be replaced with a separate flag.
if not service.disabled or (
service.disabled_reason and
service.disabled_reason.startswith(DISABLE_PREFIX)):
service.disabled = disable_service
service.disabled_reason = (
DISABLE_PREFIX + disable_reason
if disable_service else DISABLE_REASON_UNDEFINED)
service.save()
LOG.debug('Updating compute service status to %s',
status_name[disable_service])
else:
LOG.debug('Not overriding manual compute service '
'status with: %s',
status_name[disable_service])
except exception.ComputeHostNotFound:
LOG.warning(_LW('Cannot update service status on host "%s" '
'since it is not registered.'), CONF.host)
except Exception:
LOG.warning(_LW('Cannot update service status on host "%s" '
'due to an unexpected exception.'), CONF.host,
exc_info=True)
def _get_guest_cpu_model_config(self):
mode = CONF.libvirt.cpu_mode
model = CONF.libvirt.cpu_model
if (CONF.libvirt.virt_type == "kvm" or
CONF.libvirt.virt_type == "qemu"):
if mode is None:
mode = "host-model"
if mode == "none":
return vconfig.LibvirtConfigGuestCPU()
else:
if mode is None or mode == "none":
return None
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Config requested an explicit CPU model, but "
"the current libvirt hypervisor '%s' does not "
"support selecting CPU models") % CONF.libvirt.virt_type
raise exception.Invalid(msg)
if mode == "custom" and model is None:
msg = _("Config requested a custom CPU model, but no "
"model name was provided")
raise exception.Invalid(msg)
elif mode != "custom" and model is not None:
msg = _("A CPU model name should not be set when a "
"host CPU model is requested")
raise exception.Invalid(msg)
LOG.debug("CPU mode '%(mode)s' model '%(model)s' was chosen",
{'mode': mode, 'model': (model or "")})
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.mode = mode
cpu.model = model
return cpu
def _get_guest_cpu_config(self, flavor, image_meta,
guest_cpu_numa_config, instance_numa_topology):
cpu = self._get_guest_cpu_model_config()
if cpu is None:
return None
topology = hardware.get_best_cpu_topology(
flavor, image_meta, numa_topology=instance_numa_topology)
cpu.sockets = topology.sockets
cpu.cores = topology.cores
cpu.threads = topology.threads
cpu.numa = guest_cpu_numa_config
return cpu
def _get_guest_disk_config(self, instance, name, disk_mapping, inst_type,
image_type=None):
if CONF.libvirt.hw_disk_discard:
if not self._host.has_min_version(hv_ver=MIN_QEMU_DISCARD_VERSION,
hv_type=host.HV_DRIVER_QEMU):
msg = (_('Volume sets discard option, qemu %(qemu)s'
' or later is required.') %
{'qemu': MIN_QEMU_DISCARD_VERSION})
raise exception.Invalid(msg)
image = self.image_backend.image(instance,
name,
image_type)
if (name == 'disk.config' and image_type == 'rbd' and
not image.exists()):
# This is likely an older config drive that has not been migrated
# to rbd yet. Try to fall back on 'flat' image type.
# TODO(melwitt): Add online migration of some sort so we can
# remove this fall back once we know all config drives are in rbd.
# NOTE(vladikr): make sure that the flat image exist, otherwise
# the image will be created after the domain definition.
flat_image = self.image_backend.image(instance, name, 'flat')
if flat_image.exists():
image = flat_image
LOG.debug('Config drive not found in RBD, falling back to the '
'instance directory', instance=instance)
disk_info = disk_mapping[name]
return image.libvirt_info(disk_info['bus'],
disk_info['dev'],
disk_info['type'],
self.disk_cachemode,
inst_type['extra_specs'],
self._host.get_version())
def _get_guest_fs_config(self, instance, name, image_type=None):
image = self.image_backend.image(instance,
name,
image_type)
return image.libvirt_fs_info("/", "ploop")
def _get_guest_storage_config(self, instance, image_meta,
disk_info,
rescue, block_device_info,
inst_type, os_type):
devices = []
disk_mapping = disk_info['mapping']
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
mount_rootfs = CONF.libvirt.virt_type == "lxc"
if mount_rootfs:
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
fs.source_dir = os.path.join(
libvirt_utils.get_instance_path(instance), 'rootfs')
devices.append(fs)
elif os_type == vm_mode.EXE and CONF.libvirt.virt_type == "parallels":
if rescue:
fsrescue = self._get_guest_fs_config(instance, "disk.rescue")
devices.append(fsrescue)
fsos = self._get_guest_fs_config(instance, "disk")
fsos.target_dir = "/mnt/rescue"
devices.append(fsos)
else:
if 'disk' in disk_mapping:
fs = self._get_guest_fs_config(instance, "disk")
devices.append(fs)
else:
if rescue:
diskrescue = self._get_guest_disk_config(instance,
'disk.rescue',
disk_mapping,
inst_type)
devices.append(diskrescue)
diskos = self._get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
else:
if 'disk' in disk_mapping:
diskos = self._get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
if 'disk.local' in disk_mapping:
disklocal = self._get_guest_disk_config(instance,
'disk.local',
disk_mapping,
inst_type)
devices.append(disklocal)
instance.default_ephemeral_device = (
block_device.prepend_dev(disklocal.target_dev))
for idx, eph in enumerate(
driver.block_device_info_get_ephemerals(
block_device_info)):
diskeph = self._get_guest_disk_config(
instance,
blockinfo.get_eph_disk(idx),
disk_mapping, inst_type)
devices.append(diskeph)
if 'disk.swap' in disk_mapping:
diskswap = self._get_guest_disk_config(instance,
'disk.swap',
disk_mapping,
inst_type)
devices.append(diskswap)
instance.default_swap_device = (
block_device.prepend_dev(diskswap.target_dev))
if 'disk.config' in disk_mapping:
diskconfig = self._get_guest_disk_config(
instance, 'disk.config', disk_mapping, inst_type,
self._get_disk_config_image_type())
devices.append(diskconfig)
for vol in block_device.get_bdms_to_connect(block_device_mapping,
mount_rootfs):
connection_info = vol['connection_info']
vol_dev = block_device.prepend_dev(vol['mount_device'])
info = disk_mapping[vol_dev]
self._connect_volume(connection_info, info)
cfg = self._get_volume_config(connection_info, info)
devices.append(cfg)
vol['connection_info'] = connection_info
vol.save()
for d in devices:
self._set_cache_mode(d)
if image_meta.properties.get('hw_scsi_model'):
hw_scsi_model = image_meta.properties.hw_scsi_model
scsi_controller = vconfig.LibvirtConfigGuestController()
scsi_controller.type = 'scsi'
scsi_controller.model = hw_scsi_model
devices.append(scsi_controller)
return devices
def _get_host_sysinfo_serial_hardware(self):
caps = self._host.get_capabilities()
return caps.host.uuid
def _get_host_sysinfo_serial_os(self):
if not os.path.exists("/etc/machine-id"):
msg = _("Unable to get host UUID: /etc/machine-id does not exist")
raise exception.NovaException(msg)
with open("/etc/machine-id") as f:
# We want to have '-' in the right place
# so we parse & reformat the value
lines = f.read().split()
if not lines:
msg = _("Unable to get host UUID: /etc/machine-id is empty")
raise exception.NovaException(msg)
return str(uuid.UUID(lines[0]))
def _get_host_sysinfo_serial_auto(self):
if os.path.exists("/etc/machine-id"):
return self._get_host_sysinfo_serial_os()
else:
return self._get_host_sysinfo_serial_hardware()
def _get_guest_config_sysinfo(self, instance):
sysinfo = vconfig.LibvirtConfigGuestSysinfo()
sysinfo.system_manufacturer = version.vendor_string()
sysinfo.system_product = version.product_string()
sysinfo.system_version = version.version_string_with_package()
sysinfo.system_serial = self._sysinfo_serial_func()
sysinfo.system_uuid = instance.uuid
sysinfo.system_family = "Virtual Machine"
return sysinfo
def _get_guest_pci_device(self, pci_device):
dbsf = pci_utils.parse_address(pci_device.address)
dev = vconfig.LibvirtConfigGuestHostdevPCI()
dev.domain, dev.bus, dev.slot, dev.function = dbsf
# only kvm support managed mode
if CONF.libvirt.virt_type in ('xen', 'parallels',):
dev.managed = 'no'
if CONF.libvirt.virt_type in ('kvm', 'qemu'):
dev.managed = 'yes'
return dev
def _get_guest_config_meta(self, context, instance):
meta = vconfig.LibvirtConfigGuestMetaNovaInstance()
meta.package = version.version_string_with_package()
meta.name = instance.display_name
meta.creationTime = time.time()
if instance.image_ref not in ("", None):
meta.roottype = "image"
meta.rootid = instance.image_ref
if context is not None:
ometa = vconfig.LibvirtConfigGuestMetaNovaOwner()
ometa.userid = context.user_id
ometa.username = context.user_name
ometa.projectid = context.project_id
ometa.projectname = context.project_name
meta.owner = ometa
fmeta = vconfig.LibvirtConfigGuestMetaNovaFlavor()
flavor = instance.flavor
fmeta.name = flavor.name
fmeta.memory = flavor.memory_mb
fmeta.vcpus = flavor.vcpus
fmeta.ephemeral = flavor.ephemeral_gb
fmeta.disk = flavor.root_gb
fmeta.swap = flavor.swap
meta.flavor = fmeta
return meta
def _machine_type_mappings(self):
mappings = {}
for mapping in CONF.libvirt.hw_machine_type:
host_arch, _, machine_type = mapping.partition('=')
mappings[host_arch] = machine_type
return mappings
def _get_machine_type(self, image_meta, caps):
# The underlying machine type can be set as an image attribute,
# or otherwise based on some architecture specific defaults
mach_type = None
if image_meta.properties.get('hw_machine_type') is not None:
mach_type = image_meta.properties.hw_machine_type
else:
# For ARM systems we will default to vexpress-a15 for armv7
# and virt for aarch64
if caps.host.cpu.arch == arch.ARMV7:
mach_type = "vexpress-a15"
if caps.host.cpu.arch == arch.AARCH64:
mach_type = "virt"
if caps.host.cpu.arch in (arch.S390, arch.S390X):
mach_type = 's390-ccw-virtio'
# If set in the config, use that as the default.
if CONF.libvirt.hw_machine_type:
mappings = self._machine_type_mappings()
mach_type = mappings.get(caps.host.cpu.arch)
return mach_type
@staticmethod
def _create_idmaps(klass, map_strings):
idmaps = []
if len(map_strings) > 5:
map_strings = map_strings[0:5]
LOG.warning(_LW("Too many id maps, only included first five."))
for map_string in map_strings:
try:
idmap = klass()
values = [int(i) for i in map_string.split(":")]
idmap.start = values[0]
idmap.target = values[1]
idmap.count = values[2]
idmaps.append(idmap)
except (ValueError, IndexError):
LOG.warning(_LW("Invalid value for id mapping %s"), map_string)
return idmaps
def _get_guest_idmaps(self):
id_maps = []
if CONF.libvirt.virt_type == 'lxc' and CONF.libvirt.uid_maps:
uid_maps = self._create_idmaps(vconfig.LibvirtConfigGuestUIDMap,
CONF.libvirt.uid_maps)
id_maps.extend(uid_maps)
if CONF.libvirt.virt_type == 'lxc' and CONF.libvirt.gid_maps:
gid_maps = self._create_idmaps(vconfig.LibvirtConfigGuestGIDMap,
CONF.libvirt.gid_maps)
id_maps.extend(gid_maps)
return id_maps
def _update_guest_cputune(self, guest, flavor, virt_type):
is_able = self._host.is_cpu_control_policy_capable()
cputuning = ['shares', 'period', 'quota']
wants_cputune = any([k for k in cputuning
if "quota:cpu_" + k in flavor.extra_specs.keys()])
if wants_cputune and not is_able:
raise exception.UnsupportedHostCPUControlPolicy()
if not is_able or virt_type not in ('lxc', 'kvm', 'qemu'):
return
if guest.cputune is None:
guest.cputune = vconfig.LibvirtConfigGuestCPUTune()
# Setting the default cpu.shares value to be a value
# dependent on the number of vcpus
guest.cputune.shares = 1024 * guest.vcpus
for name in cputuning:
key = "quota:cpu_" + name
if key in flavor.extra_specs:
setattr(guest.cputune, name,
int(flavor.extra_specs[key]))
def _get_cpu_numa_config_from_instance(self, instance_numa_topology,
wants_hugepages):
if instance_numa_topology:
guest_cpu_numa = vconfig.LibvirtConfigGuestCPUNUMA()
for instance_cell in instance_numa_topology.cells:
guest_cell = vconfig.LibvirtConfigGuestCPUNUMACell()
guest_cell.id = instance_cell.id
guest_cell.cpus = instance_cell.cpuset
guest_cell.memory = instance_cell.memory * units.Ki
# The vhost-user network backend requires file backed
# guest memory (ie huge pages) to be marked as shared
# access, not private, so an external process can read
# and write the pages.
#
# You can't change the shared vs private flag for an
# types of NIC may be hotplugged, we have no choice but
# to unconditionally turn on the shared flag. This has
# no real negative functional effect on the guest, so
# is a reasonable approach to take
if wants_hugepages:
guest_cell.memAccess = "shared"
guest_cpu_numa.cells.append(guest_cell)
return guest_cpu_numa
def _has_cpu_policy_support(self):
for ver in BAD_LIBVIRT_CPU_POLICY_VERSIONS:
if self._host.has_version(ver):
ver_ = self._version_to_string(ver)
raise exception.CPUPinningNotSupported(reason=_(
'Invalid libvirt version %(version)s') % {'version': ver_})
return True
def _wants_hugepages(self, host_topology, instance_topology):
if host_topology is None or instance_topology is None:
return False
avail_pagesize = [page.size_kb
for page in host_topology.cells[0].mempages]
avail_pagesize.sort()
# Remove smallest page size as that's not classed as a largepage
avail_pagesize = avail_pagesize[1:]
for cell in instance_topology.cells:
if (cell.pagesize is not None and
cell.pagesize in avail_pagesize):
return True
return False
def _get_guest_numa_config(self, instance_numa_topology, flavor,
allowed_cpus=None, image_meta=None):
if (not self._has_numa_support() and
instance_numa_topology is not None):
raise exception.NUMATopologyUnsupported()
topology = self._get_host_numa_topology()
guest_cpu_numa_config = self._get_cpu_numa_config_from_instance(
instance_numa_topology,
self._wants_hugepages(topology, instance_numa_topology))
if not guest_cpu_numa_config:
return GuestNumaConfig(allowed_cpus, None, None, None)
else:
if topology:
guest_cpu_tune = vconfig.LibvirtConfigGuestCPUTune()
guest_numa_tune = vconfig.LibvirtConfigGuestNUMATune()
allpcpus = []
numa_mem = vconfig.LibvirtConfigGuestNUMATuneMemory()
numa_memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()
for _ in guest_cpu_numa_config.cells]
for host_cell in topology.cells:
for guest_node_id, guest_config_cell in enumerate(
guest_cpu_numa_config.cells):
if guest_config_cell.id == host_cell.id:
node = numa_memnodes[guest_node_id]
node.cellid = guest_node_id
node.nodeset = [host_cell.id]
node.mode = "strict"
numa_mem.nodeset.append(host_cell.id)
object_numa_cell = (
instance_numa_topology.cells[guest_node_id]
)
for cpu in guest_config_cell.cpus:
pin_cpuset = (
vconfig.LibvirtConfigGuestCPUTuneVCPUPin())
pin_cpuset.id = cpu
if (object_numa_cell.cpu_pinning and
self._has_cpu_policy_support()):
pcpu = object_numa_cell.cpu_pinning[cpu]
pin_cpuset.cpuset = set([pcpu])
else:
pin_cpuset.cpuset = host_cell.cpuset
allpcpus.extend(pin_cpuset.cpuset)
guest_cpu_tune.vcpupin.append(pin_cpuset)
emulatorpin = vconfig.LibvirtConfigGuestCPUTuneEmulatorPin()
emulatorpin.cpuset = set(allpcpus)
guest_cpu_tune.emulatorpin = emulatorpin
guest_cpu_tune.vcpupin.sort(key=operator.attrgetter("id"))
if hardware.is_realtime_enabled(flavor):
if not self._host.has_min_version(
MIN_LIBVIRT_REALTIME_VERSION):
raise exception.RealtimePolicyNotSupported()
vcpus_rt, vcpus_em = hardware.vcpus_realtime_topology(
set(cpu.id for cpu in guest_cpu_tune.vcpupin),
flavor, image_meta)
vcpusched = vconfig.LibvirtConfigGuestCPUTuneVCPUSched()
vcpusched.vcpus = vcpus_rt
vcpusched.scheduler = "fifo"
vcpusched.priority = (
CONF.libvirt.realtime_scheduler_priority)
guest_cpu_tune.vcpusched.append(vcpusched)
guest_cpu_tune.emulatorpin.cpuset = vcpus_em
guest_numa_tune.memory = numa_mem
guest_numa_tune.memnodes = numa_memnodes
for i, (cell, memnode) in enumerate(
zip(guest_cpu_numa_config.cells,
guest_numa_tune.memnodes)):
cell.id = i
memnode.cellid = i
return GuestNumaConfig(None, guest_cpu_tune,
guest_cpu_numa_config,
guest_numa_tune)
else:
return GuestNumaConfig(allowed_cpus, None,
guest_cpu_numa_config, None)
def _get_guest_os_type(self, virt_type):
if virt_type == "lxc":
ret = vm_mode.EXE
elif virt_type == "uml":
ret = vm_mode.UML
elif virt_type == "xen":
ret = vm_mode.XEN
else:
ret = vm_mode.HVM
return ret
def _set_guest_for_rescue(self, rescue, guest, inst_path, virt_type,
root_device_name):
if rescue.get('kernel_id'):
guest.os_kernel = os.path.join(inst_path, "kernel.rescue")
if virt_type == "xen":
guest.os_cmdline = "ro root=%s" % root_device_name
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name, CONSOLE))
if virt_type == "qemu":
guest.os_cmdline += " no_timer_check"
if rescue.get('ramdisk_id'):
guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
def _set_guest_for_inst_kernel(self, instance, guest, inst_path, virt_type,
root_device_name, image_meta):
guest.os_kernel = os.path.join(inst_path, "kernel")
if virt_type == "xen":
guest.os_cmdline = "ro root=%s" % root_device_name
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name, CONSOLE))
if virt_type == "qemu":
guest.os_cmdline += " no_timer_check"
if instance.ramdisk_id:
guest.os_initrd = os.path.join(inst_path, "ramdisk")
if image_meta.properties.get("os_command_line"):
guest.os_cmdline = image_meta.properties.os_command_line
def _set_clock(self, guest, os_type, image_meta, virt_type):
clk = vconfig.LibvirtConfigGuestClock()
if os_type == 'windows':
LOG.info(_LI('Configuring timezone for windows instance to '
'localtime'))
clk.offset = 'localtime'
else:
clk.offset = 'utc'
guest.set_clock(clk)
if virt_type == "kvm":
self._set_kvm_timers(clk, os_type, image_meta)
def _set_kvm_timers(self, clk, os_type, image_meta):
tmpit = vconfig.LibvirtConfigGuestTimer()
tmpit.name = "pit"
tmpit.tickpolicy = "delay"
tmrtc = vconfig.LibvirtConfigGuestTimer()
tmrtc.name = "rtc"
tmrtc.tickpolicy = "catchup"
clk.add_timer(tmpit)
clk.add_timer(tmrtc)
guestarch = libvirt_utils.get_arch(image_meta)
if guestarch in (arch.I686, arch.X86_64):
tmhpet = vconfig.LibvirtConfigGuestTimer()
tmhpet.name = "hpet"
tmhpet.present = False
clk.add_timer(tmhpet)
if (os_type == 'windows' and
self._host.has_min_version(MIN_LIBVIRT_HYPERV_TIMER_VERSION,
MIN_QEMU_HYPERV_TIMER_VERSION)):
tmhyperv = vconfig.LibvirtConfigGuestTimer()
tmhyperv.name = "hypervclock"
tmhyperv.present = True
clk.add_timer(tmhyperv)
def _set_features(self, guest, os_type, caps, virt_type):
if virt_type == "xen":
if caps.host.cpu.arch in (arch.I686, arch.X86_64):
guest.features.append(vconfig.LibvirtConfigGuestFeaturePAE())
if (virt_type not in ("lxc", "uml", "parallels", "xen") or
(virt_type == "xen" and guest.os_type == vm_mode.HVM)):
guest.features.append(vconfig.LibvirtConfigGuestFeatureACPI())
guest.features.append(vconfig.LibvirtConfigGuestFeatureAPIC())
if (virt_type in ("qemu", "kvm") and
os_type == 'windows'):
hv = vconfig.LibvirtConfigGuestFeatureHyperV()
hv.relaxed = True
hv.spinlocks = True
hv.spinlock_retries = 8191
hv.vapic = True
guest.features.append(hv)
def _check_number_of_serial_console(self, num_ports):
virt_type = CONF.libvirt.virt_type
if (virt_type in ("kvm", "qemu") and
num_ports > ALLOWED_QEMU_SERIAL_PORTS):
raise exception.SerialPortNumberLimitExceeded(
allowed=ALLOWED_QEMU_SERIAL_PORTS, virt_type=virt_type)
def _create_serial_console_devices(self, guest, instance, flavor,
image_meta):
guest_arch = libvirt_utils.get_arch(image_meta)
if CONF.serial_console.enabled:
num_ports = hardware.get_number_of_serial_ports(
flavor, image_meta)
if guest_arch in (arch.S390, arch.S390X):
console_cls = vconfig.LibvirtConfigGuestConsole
else:
console_cls = vconfig.LibvirtConfigGuestSerial
self._check_number_of_serial_console(num_ports)
for port in six.moves.range(num_ports):
console = console_cls()
console.port = port
console.type = "tcp"
console.listen_host = (
CONF.serial_console.proxyclient_address)
console.listen_port = (
serial_console.acquire_port(
console.listen_host))
guest.add_device(console)
else:
# with a single type=pty console. Instead we have
# to configure two separate consoles.
if guest_arch in (arch.S390, arch.S390X):
consolelog = vconfig.LibvirtConfigGuestConsole()
consolelog.target_type = "sclplm"
else:
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = "file"
consolelog.source_path = self._get_console_log_path(instance)
guest.add_device(consolelog)
def _add_video_driver(self, guest, image_meta, flavor):
VALID_VIDEO_DEVICES = ("vga", "cirrus", "vmvga", "xen", "qxl")
video = vconfig.LibvirtConfigGuestVideo()
# NOTE(ldbragst): The following logic sets the video.type
# depending on supported defaults given the architecture,
# virtualization type, and features. The video.type attribute can
# be overridden by the user with image_meta.properties, which
# is carried out in the next if statement below this one.
guestarch = libvirt_utils.get_arch(image_meta)
if guest.os_type == vm_mode.XEN:
video.type = 'xen'
elif CONF.libvirt.virt_type == 'parallels':
video.type = 'vga'
elif guestarch in (arch.PPC, arch.PPC64, arch.PPC64LE):
# NOTE(ldbragst): PowerKVM doesn't support 'cirrus' be default
video.type = 'vga'
elif CONF.spice.enabled:
video.type = 'qxl'
if image_meta.properties.get('hw_video_model'):
video.type = image_meta.properties.hw_video_model
if (video.type not in VALID_VIDEO_DEVICES):
raise exception.InvalidVideoMode(model=video.type)
video_ram = image_meta.properties.get('hw_video_ram', 0)
max_vram = int(flavor.extra_specs.get('hw_video:ram_max_mb', 0))
if video_ram > max_vram:
raise exception.RequestedVRamTooHigh(req_vram=video_ram,
max_vram=max_vram)
if max_vram and video_ram:
video.vram = video_ram * units.Mi / units.Ki
guest.add_device(video)
def _add_qga_device(self, guest, instance):
qga = vconfig.LibvirtConfigGuestChannel()
qga.type = "unix"
qga.target_name = "org.qemu.guest_agent.0"
qga.source_path = ("/var/lib/libvirt/qemu/%s.%s.sock" %
("org.qemu.guest_agent.0", instance.name))
guest.add_device(qga)
def _add_rng_device(self, guest, flavor):
rng_device = vconfig.LibvirtConfigGuestRng()
rate_bytes = flavor.extra_specs.get('hw_rng:rate_bytes', 0)
period = flavor.extra_specs.get('hw_rng:rate_period', 0)
if rate_bytes:
rng_device.rate_bytes = int(rate_bytes)
rng_device.rate_period = int(period)
rng_path = CONF.libvirt.rng_dev_path
if (rng_path and not os.path.exists(rng_path)):
raise exception.RngDeviceNotExist(path=rng_path)
rng_device.backend = rng_path
guest.add_device(rng_device)
def _set_qemu_guest_agent(self, guest, flavor, instance, image_meta):
# Enable qga only if the 'hw_qemu_guest_agent' is equal to yes
if image_meta.properties.get('hw_qemu_guest_agent', False):
LOG.debug("Qemu guest agent is enabled through image "
"metadata", instance=instance)
self._add_qga_device(guest, instance)
rng_is_virtio = image_meta.properties.get('hw_rng_model') == 'virtio'
rng_allowed_str = flavor.extra_specs.get('hw_rng:allowed', '')
rng_allowed = strutils.bool_from_string(rng_allowed_str)
if rng_is_virtio and rng_allowed:
self._add_rng_device(guest, flavor)
def _get_guest_memory_backing_config(
self, inst_topology, numatune, flavor):
wantsmempages = False
if inst_topology:
for cell in inst_topology.cells:
if cell.pagesize:
wantsmempages = True
break
wantsrealtime = hardware.is_realtime_enabled(flavor)
membacking = None
if wantsmempages:
pages = self._get_memory_backing_hugepages_support(
inst_topology, numatune)
if pages:
membacking = vconfig.LibvirtConfigGuestMemoryBacking()
membacking.hugepages = pages
if wantsrealtime:
if not membacking:
membacking = vconfig.LibvirtConfigGuestMemoryBacking()
membacking.locked = True
membacking.sharedpages = False
return membacking
def _get_memory_backing_hugepages_support(self, inst_topology, numatune):
if not self._has_hugepage_support():
# We should not get here, since we should have avoided
# reporting NUMA topology from _get_host_numa_topology
# in the first place. Just in case of a scheduler
# mess up though, raise an exception
raise exception.MemoryPagesUnsupported()
host_topology = self._get_host_numa_topology()
if host_topology is None:
# As above, we should not get here but just in case...
raise exception.MemoryPagesUnsupported()
# Currently libvirt does not support the smallest
# pagesize set as a backend memory.
# https://bugzilla.redhat.com/show_bug.cgi?id=1173507
avail_pagesize = [page.size_kb
for page in host_topology.cells[0].mempages]
avail_pagesize.sort()
smallest = avail_pagesize[0]
pages = []
for guest_cellid, inst_cell in enumerate(inst_topology.cells):
if inst_cell.pagesize and inst_cell.pagesize > smallest:
for memnode in numatune.memnodes:
if guest_cellid == memnode.cellid:
page = (
vconfig.LibvirtConfigGuestMemoryBackingPage())
page.nodeset = [guest_cellid]
page.size_kb = inst_cell.pagesize
pages.append(page)
break # Quit early...
return pages
def _get_flavor(self, ctxt, instance, flavor):
if flavor is not None:
return flavor
return instance.flavor
def _has_uefi_support(self):
# This means that the host can support uefi booting for guests
supported_archs = [arch.X86_64, arch.AARCH64]
caps = self._host.get_capabilities()
return ((caps.host.cpu.arch in supported_archs) and
self._host.has_min_version(MIN_LIBVIRT_UEFI_VERSION) and
os.path.exists(DEFAULT_UEFI_LOADER_PATH[caps.host.cpu.arch]))
def _get_supported_perf_events(self):
if (len(CONF.libvirt.enabled_perf_events) == 0 or
not self._host.has_min_version(MIN_LIBVIRT_PERF_VERSION)):
return []
supported_events = []
host_cpu_info = self._get_cpu_info()
for event in CONF.libvirt.enabled_perf_events:
if self._supported_perf_event(event, host_cpu_info['features']):
supported_events.append(event)
return supported_events
def _supported_perf_event(self, event, cpu_features):
libvirt_perf_event_name = LIBVIRT_PERF_EVENT_PREFIX + event.upper()
if not hasattr(libvirt, libvirt_perf_event_name):
LOG.warning(_LW("Libvirt doesn't support event type %s."),
event)
return False
if (event in PERF_EVENTS_CPU_FLAG_MAPPING
and PERF_EVENTS_CPU_FLAG_MAPPING[event] not in cpu_features):
LOG.warning(_LW("Host does not support event type %s."), event)
return False
return True
def _configure_guest_by_virt_type(self, guest, virt_type, caps, instance,
image_meta, flavor, root_device_name):
if virt_type == "xen":
if guest.os_type == vm_mode.HVM:
guest.os_loader = CONF.libvirt.xen_hvmloader_path
elif virt_type in ("kvm", "qemu"):
if caps.host.cpu.arch in (arch.I686, arch.X86_64):
guest.sysinfo = self._get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
hw_firmware_type = image_meta.properties.get('hw_firmware_type')
if hw_firmware_type == fields.FirmwareType.UEFI:
if self._has_uefi_support():
global uefi_logged
if not uefi_logged:
LOG.warning(_LW("uefi support is without some kind of "
"functional testing and therefore "
"considered experimental."))
uefi_logged = True
guest.os_loader = DEFAULT_UEFI_LOADER_PATH[
caps.host.cpu.arch]
guest.os_loader_type = "pflash"
else:
raise exception.UEFINotSupported()
guest.os_mach_type = self._get_machine_type(image_meta, caps)
if image_meta.properties.get('hw_boot_menu') is None:
guest.os_bootmenu = strutils.bool_from_string(
flavor.extra_specs.get('hw:boot_menu', 'no'))
else:
guest.os_bootmenu = image_meta.properties.hw_boot_menu
elif virt_type == "lxc":
guest.os_init_path = "/sbin/init"
guest.os_cmdline = CONSOLE
elif virt_type == "uml":
guest.os_kernel = "/usr/bin/linux"
guest.os_root = root_device_name
elif virt_type == "parallels":
if guest.os_type == vm_mode.EXE:
guest.os_init_path = "/sbin/init"
def _conf_non_lxc_uml(self, virt_type, guest, root_device_name, rescue,
instance, inst_path, image_meta, disk_info):
if rescue:
self._set_guest_for_rescue(rescue, guest, inst_path, virt_type,
root_device_name)
elif instance.kernel_id:
self._set_guest_for_inst_kernel(instance, guest, inst_path,
virt_type, root_device_name,
image_meta)
else:
guest.os_boot_dev = blockinfo.get_boot_order(disk_info)
def _create_consoles(self, virt_type, guest, instance, flavor, image_meta,
caps):
if virt_type in ("qemu", "kvm"):
self._create_serial_console_devices(guest, instance, flavor,
image_meta)
if caps.host.cpu.arch in (arch.S390, arch.S390X):
consolepty = vconfig.LibvirtConfigGuestConsole()
consolepty.target_type = "sclp"
else:
consolepty = vconfig.LibvirtConfigGuestSerial()
else:
consolepty = vconfig.LibvirtConfigGuestConsole()
return consolepty
def _cpu_config_to_vcpu_model(self, cpu_config, vcpu_model):
if not cpu_config:
return
if not vcpu_model:
vcpu_model = objects.VirtCPUModel()
vcpu_model.arch = cpu_config.arch
vcpu_model.vendor = cpu_config.vendor
vcpu_model.model = cpu_config.model
vcpu_model.mode = cpu_config.mode
vcpu_model.match = cpu_config.match
if cpu_config.sockets:
vcpu_model.topology = objects.VirtCPUTopology(
sockets=cpu_config.sockets,
cores=cpu_config.cores,
threads=cpu_config.threads)
else:
vcpu_model.topology = None
features = [objects.VirtCPUFeature(
name=f.name,
policy=f.policy) for f in cpu_config.features]
vcpu_model.features = features
return vcpu_model
def _vcpu_model_to_cpu_config(self, vcpu_model):
cpu_config = vconfig.LibvirtConfigGuestCPU()
cpu_config.arch = vcpu_model.arch
cpu_config.model = vcpu_model.model
cpu_config.mode = vcpu_model.mode
cpu_config.match = vcpu_model.match
cpu_config.vendor = vcpu_model.vendor
if vcpu_model.topology:
cpu_config.sockets = vcpu_model.topology.sockets
cpu_config.cores = vcpu_model.topology.cores
cpu_config.threads = vcpu_model.topology.threads
if vcpu_model.features:
for f in vcpu_model.features:
xf = vconfig.LibvirtConfigGuestCPUFeature()
xf.name = f.name
xf.policy = f.policy
cpu_config.features.add(xf)
return cpu_config
def _get_guest_config(self, instance, network_info, image_meta,
disk_info, rescue=None, block_device_info=None,
context=None):
LOG.warn("_get_guest_config.............instance:%s" % instance)
flavor = instance.flavor
inst_path = libvirt_utils.get_instance_path(instance)
disk_mapping = disk_info['mapping']
virt_type = CONF.libvirt.virt_type
guest = vconfig.LibvirtConfigGuest()
LOG.warn("guest----------------------%s" % guest)
guest.virt_type = virt_type
guest.name = instance.name
guest.uuid = instance.uuid
guest.memory = flavor.memory_mb * units.Ki
guest.vcpus = flavor.vcpus
allowed_cpus = hardware.get_vcpu_pin_set()
pci_devs = pci_manager.get_instance_pci_devs(instance, 'all')
guest_numa_config = self._get_guest_numa_config(
instance.numa_topology, flavor, allowed_cpus, image_meta)
guest.cpuset = guest_numa_config.cpuset
guest.cputune = guest_numa_config.cputune
guest.numatune = guest_numa_config.numatune
guest.membacking = self._get_guest_memory_backing_config(
instance.numa_topology,
guest_numa_config.numatune,
flavor)
guest.metadata.append(self._get_guest_config_meta(context,
instance))
guest.idmaps = self._get_guest_idmaps()
for event in self._supported_perf_events:
guest.add_perf_event(event)
self._update_guest_cputune(guest, flavor, virt_type)
guest.cpu = self._get_guest_cpu_config(
flavor, image_meta, guest_numa_config.numaconfig,
instance.numa_topology)
# the corresponding config file.
instance.vcpu_model = self._cpu_config_to_vcpu_model(
guest.cpu, instance.vcpu_model)
if 'root' in disk_mapping:
root_device_name = block_device.prepend_dev(
disk_mapping['root']['dev'])
else:
root_device_name = None
if root_device_name:
# NOTE(yamahata):
# for nova.api.ec2.cloud.CloudController.get_metadata()
instance.root_device_name = root_device_name
guest.os_type = (vm_mode.get_from_instance(instance) or
self._get_guest_os_type(virt_type))
caps = self._host.get_capabilities()
self._configure_guest_by_virt_type(guest, virt_type, caps, instance,
image_meta, flavor,
root_device_name)
if virt_type not in ('lxc', 'uml'):
self._conf_non_lxc_uml(virt_type, guest, root_device_name, rescue,
instance, inst_path, image_meta, disk_info)
self._set_features(guest, instance.os_type, caps, virt_type)
self._set_clock(guest, instance.os_type, image_meta, virt_type)
storage_configs = self._get_guest_storage_config(
instance, image_meta, disk_info, rescue, block_device_info,
flavor, guest.os_type)
for config in storage_configs:
guest.add_device(config)
#import pdb
#pdb.set_trace()
if self.has_cdrom(instance,disk_info) is None:
cdrom = vconfig.LibvirtConfigGuestDisk()
cdrom.source_type = 'file'
cdrom.source_device = 'cdrom'
cdrom.target_bus = 'ide'
cdrom.target_dev = 'hdc'
cdrom.driver_name = 'qemu'
cdrom.driver_format = 'raw'
def is_iso_image_active(context, fake_image_id):
active_iso_images, flug = libvirt_utils.get_active_images(context, 'iso')
if flug:
fake_active_iso_images = []
for image in active_iso_images:
fake_active_iso_images.append(
hashlib.sha1(image).hexdigest())
if fake_image_id in fake_active_iso_images:
return True
else:
return False
else:
return True
try:
#exist_cdroms = self._list_cdrom(instance)
exist_cdroms = self.cdrom_list(instance)
found_instance = True
except:
found_instance = False
if found_instance:
if exist_cdroms:
image_id = exist_cdroms[0].get('image_id', '')
if image_id:
if not imagecache.iso_base_file_exists(image_id):
image_id = ''
if (image_id and not is_iso_image_active(context, image_id)):
imagecache.remove_base_image(image_id)
image_id = ''
else:
image_id = ''
else:
disk_format = getattr(image_meta, 'disk_format', '')
if disk_format == 'iso':
image_id = image_meta.get('id', '')
if not image_id:
image_id = image_meta['properties'].get('base_image_ref', '')
if image_id:
image_info = {}
image_info['image_id'] = image_id
image_id = imagecache.get_cache_fname(image_info, 'image_id')
else:
image_id = ''
if image_id != '':
base_url = self.image_cache_manager._get_base()
image_url = os.path.join(base_url, image_id)
else:
image_url = ''
cdrom.source_path = image_url
guest.add_device(cdrom)
for vif in network_info:
config = self.vif_driver.get_config(
instance, vif, image_meta,
flavor, virt_type, self._host)
guest.add_device(config)
consolepty = self._create_consoles(virt_type, guest, instance, flavor,
image_meta, caps)
if virt_type != 'parallels':
consolepty.type = "pty"
guest.add_device(consolepty)
pointer = self._get_guest_pointer_model(guest.os_type, image_meta)
if pointer:
guest.add_device(pointer)
if (CONF.spice.enabled and CONF.spice.agent_enabled and
virt_type not in ('lxc', 'uml', 'xen')):
channel = vconfig.LibvirtConfigGuestChannel()
channel.target_name = "com.redhat.spice.0"
guest.add_device(channel)
# NB some versions of libvirt support both SPICE and VNC
# at the same time. We're not trying to second guess which
# errors appropriately if the user enables both.
add_video_driver = False
if ((CONF.vnc.enabled and
virt_type not in ('lxc', 'uml'))):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "vnc"
graphics.passwd = "%s" % instance.get("cipher", "00000")
graphics.keymap = CONF.vnc.keymap
graphics.listen = CONF.vnc.vncserver_listen
guest.add_device(graphics)
add_video_driver = True
if (CONF.spice.enabled and
virt_type not in ('lxc', 'uml', 'xen')):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.passwd = "%s" % instance.get("cipher", "00000")
graphics.keymap = CONF.spice.keymap
graphics.listen = CONF.spice.server_listen
guest.add_device(graphics)
add_video_driver = True
if add_video_driver:
self._add_video_driver(guest, image_meta, flavor)
# Qemu guest agent only support 'qemu' and 'kvm' hypervisor
if virt_type in ('qemu', 'kvm'):
self._set_qemu_guest_agent(guest, flavor, instance, image_meta)
if virt_type in ('xen', 'qemu', 'kvm'):
for pci_dev in pci_manager.get_instance_pci_devs(instance):
guest.add_device(self._get_guest_pci_device(pci_dev))
else:
if len(pci_devs) > 0:
raise exception.PciDeviceUnsupportedHypervisor(
type=virt_type)
if 'hw_watchdog_action' in flavor.extra_specs:
LOG.warning(_LW('Old property name "hw_watchdog_action" is now '
'deprecated and will be removed in the next release. '
'Use updated property name '
'"hw:watchdog_action" instead'), instance=instance)
# TODO(pkholkin): accepting old property name 'hw_watchdog_action'
# should be removed in the next release
watchdog_action = (flavor.extra_specs.get('hw_watchdog_action') or
flavor.extra_specs.get('hw:watchdog_action')
or 'disabled')
watchdog_action = image_meta.properties.get('hw_watchdog_action',
watchdog_action)
# NB(sross): currently only actually supported by KVM/QEmu
if watchdog_action != 'disabled':
if watchdog_actions.is_valid_watchdog_action(watchdog_action):
bark = vconfig.LibvirtConfigGuestWatchdog()
bark.action = watchdog_action
guest.add_device(bark)
else:
raise exception.InvalidWatchdogAction(action=watchdog_action)
# Memory balloon device only support 'qemu/kvm' and 'xen' hypervisor
if (virt_type in ('xen', 'qemu', 'kvm') and
CONF.libvirt.mem_stats_period_seconds > 0):
balloon = vconfig.LibvirtConfigMemoryBalloon()
if virt_type in ('qemu', 'kvm'):
balloon.model = 'virtio'
else:
balloon.model = 'xen'
balloon.period = CONF.libvirt.mem_stats_period_seconds
guest.add_device(balloon)
return guest
def _get_guest_pointer_model(self, os_type, image_meta):
pointer_model = image_meta.properties.get(
'hw_pointer_model', CONF.pointer_model)
if pointer_model is None and CONF.libvirt.use_usb_tablet:
# TODO(sahid): We set pointer_model to keep compatibility
# until the next release O*. It means operators can continue
# to use the deprecated option "use_usb_tablet" or set a
# specific device to use
pointer_model = "usbtablet"
LOG.warning(_LW('The option "use_usb_tablet" has been '
'deprecated for Newton in favor of the more '
'generic "pointer_model". Please update '
'nova.conf to address this change.'))
if pointer_model == "usbtablet":
# We want a tablet if VNC is enabled, or SPICE is enabled and
# the SPICE agent is disabled. If the SPICE agent is enabled
# it provides a paravirt mouse which drastically reduces
# overhead (by eliminating USB polling).
if CONF.vnc.enabled or (
CONF.spice.enabled and not CONF.spice.agent_enabled):
return self._get_guest_usb_tablet(os_type)
else:
if CONF.pointer_model or CONF.libvirt.use_usb_tablet:
# For backward compatibility We don't want to break
LOG.warning(_LW('USB tablet requested for guests by host '
'configuration. In order to accept this '
'request VNC should be enabled or SPICE '
'and SPICE agent disabled on host.'))
else:
raise exception.UnsupportedPointerModelRequested(
model="usbtablet")
def _get_guest_usb_tablet(self, os_type):
tablet = None
if os_type == vm_mode.HVM:
tablet = vconfig.LibvirtConfigGuestInput()
tablet.type = "tablet"
tablet.bus = "usb"
else:
if CONF.pointer_model or CONF.libvirt.use_usb_tablet:
# process of booting an instance if virtual machine mode
# is not configured as HVM.
LOG.warning(_LW('USB tablet requested for guests by host '
'configuration. In order to accept this '
'request the machine mode should be '
'configured as HVM.'))
else:
raise exception.UnsupportedPointerModelRequested(
model="usbtablet")
return tablet
def _get_guest_xml(self, context, instance, network_info, disk_info,
image_meta, rescue=None,
block_device_info=None, write_to_disk=False):
# NOTE(danms): Stringifying a NetworkInfo will take a lock. Do
# this ahead of time so that we don't acquire it while also
network_info_str = str(network_info)
msg = ('Start _get_guest_xml '
'network_info=%(network_info)s '
'disk_info=%(disk_info)s '
'image_meta=%(image_meta)s rescue=%(rescue)s '
'block_device_info=%(block_device_info)s' %
{'network_info': network_info_str, 'disk_info': disk_info,
'image_meta': image_meta, 'rescue': rescue,
'block_device_info': block_device_info})
LOG.debug(strutils.mask_password(msg), instance=instance)
conf = self._get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info,
context)
xml = conf.to_xml()
if write_to_disk:
instance_dir = libvirt_utils.get_instance_path(instance)
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
LOG.debug('End _get_guest_xml xml=%(xml)s',
{'xml': xml}, instance=instance)
return xml
def get_info(self, instance):
guest = self._host.get_guest(instance)
return guest.get_info(self._host)
def _create_domain_setup_lxc(self, instance, image_meta,
block_device_info, disk_info):
inst_path = libvirt_utils.get_instance_path(instance)
disk_info = disk_info or {}
disk_mapping = disk_info.get('mapping', {})
if self._is_booted_from_volume(instance, disk_mapping):
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
root_disk = block_device.get_root_bdm(block_device_mapping)
disk_info = blockinfo.get_info_from_bdm(
instance, CONF.libvirt.virt_type, image_meta, root_disk)
self._connect_volume(root_disk['connection_info'], disk_info)
disk_path = root_disk['connection_info']['data']['device_path']
# disk is backed by a local block device.
image_model = imgmodel.LocalBlockImage(disk_path)
else:
image = self.image_backend.image(instance, 'disk')
image_model = image.get_model(self._conn)
container_dir = os.path.join(inst_path, 'rootfs')
fileutils.ensure_tree(container_dir)
rootfs_dev = disk_api.setup_container(image_model,
container_dir=container_dir)
try:
# Save rootfs device to disconnect it when deleting the instance
if rootfs_dev:
instance.system_metadata['rootfs_device_name'] = rootfs_dev
if CONF.libvirt.uid_maps or CONF.libvirt.gid_maps:
id_maps = self._get_guest_idmaps()
libvirt_utils.chown_for_id_maps(container_dir, id_maps)
except Exception:
with excutils.save_and_reraise_exception():
self._create_domain_cleanup_lxc(instance)
def _create_domain_cleanup_lxc(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
try:
state = self.get_info(instance).state
except exception.InstanceNotFound:
# The domain may not be present if the instance failed to start
state = None
if state == power_state.RUNNING:
# NOTE(uni): Now the container is running with its own private
# mount namespace and so there is no need to keep the container
# rootfs mounted in the host namespace
LOG.debug('Attempting to unmount container filesystem: %s',
container_dir, instance=instance)
disk_api.clean_lxc_namespace(container_dir=container_dir)
else:
disk_api.teardown_container(container_dir=container_dir)
@contextlib.contextmanager
def _lxc_disk_handler(self, instance, image_meta,
block_device_info, disk_info):
if CONF.libvirt.virt_type != 'lxc':
yield
return
self._create_domain_setup_lxc(instance, image_meta,
block_device_info, disk_info)
try:
yield
finally:
self._create_domain_cleanup_lxc(instance)
# TODO(sahid): Consider renaming this to _create_guest.
def _create_domain(self, xml=None, domain=None,
power_on=True, pause=False, post_xml_callback=None):
if xml:
guest = libvirt_guest.Guest.create(xml, self._host)
if post_xml_callback is not None:
post_xml_callback()
else:
guest = libvirt_guest.Guest(domain)
if power_on or pause:
guest.launch(pause=pause)
if not utils.is_neutron():
guest.enable_hairpin()
return guest
def _neutron_failed_callback(self, event_name, instance):
LOG.error(_LE('Neutron Reported failure on event '
'%(event)s for instance %(uuid)s'),
{'event': event_name, 'uuid': instance.uuid},
instance=instance)
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
def _get_neutron_events(self, network_info):
# NOTE(danms): We need to collect any VIFs that are currently
# down that we expect a down->up event for. Anything that is
# already up will not undergo that transition, and for
# anything that might be stale (cache-wise) assume it's
return [('network-vif-plugged', vif['id'])
for vif in network_info if vif.get('active', True) is False]
def _create_domain_and_network(self, context, xml, instance, network_info,
disk_info, block_device_info=None,
power_on=True, reboot=False,
vifs_already_plugged=False,
post_xml_callback=None):
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
if (not reboot and 'data' in connection_info and
'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
timeout = CONF.vif_plugging_timeout
if (self._conn_supports_start_paused and
utils.is_neutron() and not
vifs_already_plugged and power_on and timeout):
events = self._get_neutron_events(network_info)
else:
events = []
pause = bool(events)
guest = None
try:
with self.virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
error_callback=self._neutron_failed_callback):
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance,
network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
with self._lxc_disk_handler(instance, instance.image_meta,
block_device_info, disk_info):
guest = self._create_domain(
xml, pause=pause, power_on=power_on,
post_xml_callback=post_xml_callback)
self.firewall_driver.apply_instance_filter(instance,
network_info)
except exception.VirtualInterfaceCreateException:
# Neutron reported failure and we didn't swallow it, so
with excutils.save_and_reraise_exception():
if guest:
guest.poweroff()
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info)
except eventlet.timeout.Timeout:
LOG.warning(_LW('Timeout waiting for vif plugging callback for '
'instance %(uuid)s'), {'uuid': instance.uuid},
instance=instance)
if CONF.vif_plugging_is_fatal:
if guest:
guest.poweroff()
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info)
raise exception.VirtualInterfaceCreateException()
if pause:
guest.resume()
return guest
def _get_vcpu_total(self):
try:
total_pcpus = self._host.get_cpu_count()
except libvirt.libvirtError:
LOG.warning(_LW("Cannot get the number of cpu, because this "
"function is not implemented for this platform. "))
return 0
if not CONF.vcpu_pin_set:
return total_pcpus
available_ids = hardware.get_vcpu_pin_set()
online_pcpus = None
try:
online_pcpus = self._host.get_online_cpus()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
LOG.warning(
_LW("Couldn't retrieve the online CPUs due to a Libvirt "
"error: %(error)s with error code: %(error_code)s"),
{'error': ex, 'error_code': error_code})
if online_pcpus:
if not (available_ids <= online_pcpus):
msg = (_("Invalid vcpu_pin_set config, one or more of the "
"specified cpuset is not online. Online cpuset(s): "
"%(online)s, requested cpuset(s): %(req)s"),
{'online': sorted(online_pcpus),
'req': sorted(available_ids)})
raise exception.Invalid(msg)
elif sorted(available_ids)[-1] >= total_pcpus:
raise exception.Invalid(_("Invalid vcpu_pin_set config, "
"out of hypervisor cpu range."))
return len(available_ids)
@staticmethod
def _get_local_gb_info():
if CONF.libvirt.images_type == 'lvm':
info = lvm.get_volume_group_info(
CONF.libvirt.images_volume_group)
elif CONF.libvirt.images_type == 'rbd':
info = LibvirtDriver._get_rbd_driver().get_pool_info()
else:
info = libvirt_utils.get_fs_info(CONF.instances_path)
for (k, v) in six.iteritems(info):
info[k] = v / units.Gi
return info
def _get_vcpu_used(self):
total = 0
if CONF.libvirt.virt_type == 'lxc':
return total + 1
for guest in self._host.list_guests():
try:
vcpus = guest.get_vcpus_info()
if vcpus is not None:
total += len(list(vcpus))
except libvirt.libvirtError as e:
LOG.warning(
_LW("couldn't obtain the vcpu count from domain id:"
" %(uuid)s, exception: %(ex)s"),
{"uuid": guest.uuid, "ex": e})
greenthread.sleep(0)
return total
def _get_instance_capabilities(self):
caps = self._host.get_capabilities()
instance_caps = list()
for g in caps.guests:
for dt in g.domtype:
instance_cap = (
arch.canonicalize(g.arch),
hv_type.canonicalize(dt),
vm_mode.canonicalize(g.ostype))
instance_caps.append(instance_cap)
return instance_caps
def _get_cpu_info(self):
caps = self._host.get_capabilities()
cpu_info = dict()
cpu_info['arch'] = caps.host.cpu.arch
cpu_info['model'] = caps.host.cpu.model
cpu_info['vendor'] = caps.host.cpu.vendor
topology = dict()
topology['cells'] = len(getattr(caps.host.topology, 'cells', [1]))
topology['sockets'] = caps.host.cpu.sockets
topology['cores'] = caps.host.cpu.cores
topology['threads'] = caps.host.cpu.threads
cpu_info['topology'] = topology
features = set()
for f in caps.host.cpu.features:
features.add(f.name)
cpu_info['features'] = features
return cpu_info
def _get_pcidev_info(self, devname):
def _get_device_type(cfgdev, pci_address):
for fun_cap in cfgdev.pci_capability.fun_capability:
if fun_cap.type == 'virt_functions':
return {
'dev_type': fields.PciDeviceType.SRIOV_PF,
}
if (fun_cap.type == 'phys_function' and
len(fun_cap.device_addrs) != 0):
phys_address = "%04x:%02x:%02x.%01x" % (
fun_cap.device_addrs[0][0],
fun_cap.device_addrs[0][1],
fun_cap.device_addrs[0][2],
fun_cap.device_addrs[0][3])
return {
'dev_type': fields.PciDeviceType.SRIOV_VF,
'parent_addr': phys_address,
}
if not self._host.has_min_version(
MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION):
is_physical_function = pci_utils.is_physical_function(
*pci_utils.get_pci_address_fields(pci_address))
if is_physical_function:
return {'dev_type': fields.PciDeviceType.SRIOV_PF}
return {'dev_type': fields.PciDeviceType.STANDARD}
virtdev = self._host.device_lookup_by_name(devname)
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
address = "%04x:%02x:%02x.%1x" % (
cfgdev.pci_capability.domain,
cfgdev.pci_capability.bus,
cfgdev.pci_capability.slot,
cfgdev.pci_capability.function)
device = {
"dev_id": cfgdev.name,
"address": address,
"product_id": "%04x" % cfgdev.pci_capability.product_id,
"vendor_id": "%04x" % cfgdev.pci_capability.vendor_id,
}
device["numa_node"] = cfgdev.pci_capability.numa_node
device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device
device.update(_get_device_type(cfgdev, address))
return device
def _get_pci_passthrough_devices(self):
# repeated warnings within a periodic task
if not getattr(self, '_list_devices_supported', True):
return jsonutils.dumps([])
try:
dev_names = self._host.list_pci_devices() or []
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
self._list_devices_supported = False
LOG.warning(_LW("URI %(uri)s does not support "
"listDevices: %(error)s"),
{'uri': self._uri(), 'error': ex})
return jsonutils.dumps([])
else:
raise
pci_info = []
for name in dev_names:
pci_info.append(self._get_pcidev_info(name))
return jsonutils.dumps(pci_info)
def _has_numa_support(self):
# This means that the host can support LibvirtConfigGuestNUMATune
# and the nodeset field in LibvirtConfigGuestMemoryBackingPage
for ver in BAD_LIBVIRT_NUMA_VERSIONS:
if self._host.has_version(ver):
if not getattr(self, '_bad_libvirt_numa_version_warn', False):
LOG.warning(_LW('You are running with libvirt version %s '
'which is known to have broken NUMA support. '
'Consider patching or updating libvirt on '
'this host if you need NUMA support.'),
self._version_to_string(ver))
self._bad_libvirt_numa_version_warn = True
return False
support_matrix = {(arch.I686, arch.X86_64): MIN_LIBVIRT_NUMA_VERSION,
(arch.PPC64,
arch.PPC64LE): MIN_LIBVIRT_NUMA_VERSION_PPC}
caps = self._host.get_capabilities()
is_supported = False
for archs, libvirt_ver in support_matrix.items():
if ((caps.host.cpu.arch in archs) and
self._host.has_min_version(libvirt_ver,
MIN_QEMU_NUMA_HUGEPAGE_VERSION,
host.HV_DRIVER_QEMU)):
is_supported = True
return is_supported
def _has_hugepage_support(self):
# This means that the host can support multiple values for the size
# field in LibvirtConfigGuestMemoryBackingPage
supported_archs = [arch.I686, arch.X86_64, arch.PPC64LE, arch.PPC64]
caps = self._host.get_capabilities()
return ((caps.host.cpu.arch in supported_archs) and
self._host.has_min_version(MIN_LIBVIRT_HUGEPAGE_VERSION,
MIN_QEMU_NUMA_HUGEPAGE_VERSION,
host.HV_DRIVER_QEMU))
def _get_host_numa_topology(self):
if not self._has_numa_support():
return
caps = self._host.get_capabilities()
topology = caps.host.topology
if topology is None or not topology.cells:
return
cells = []
allowed_cpus = hardware.get_vcpu_pin_set()
online_cpus = self._host.get_online_cpus()
if allowed_cpus:
allowed_cpus &= online_cpus
else:
allowed_cpus = online_cpus
def _get_reserved_memory_for_cell(self, cell_id, page_size):
cell = self._reserved_hugepages.get(cell_id, {})
return cell.get(page_size, 0)
for cell in topology.cells:
cpuset = set(cpu.id for cpu in cell.cpus)
siblings = sorted(map(set,
set(tuple(cpu.siblings)
if cpu.siblings else ()
for cpu in cell.cpus)
))
cpuset &= allowed_cpus
siblings = [sib & allowed_cpus for sib in siblings]
# Filter out singles and empty sibling sets that may be left
siblings = [sib for sib in siblings if len(sib) > 1]
mempages = []
if self._has_hugepage_support():
mempages = [
objects.NUMAPagesTopology(
size_kb=pages.size,
total=pages.total,
used=0,
reserved=_get_reserved_memory_for_cell(
self, cell.id, pages.size))
for pages in cell.mempages]
cell = objects.NUMACell(id=cell.id, cpuset=cpuset,
memory=cell.memory / units.Ki,
cpu_usage=0, memory_usage=0,
siblings=siblings,
pinned_cpus=set([]),
mempages=mempages)
cells.append(cell)
return objects.NUMATopology(cells=cells)
def get_all_volume_usage(self, context, compute_host_bdms):
vol_usage = []
for instance_bdms in compute_host_bdms:
instance = instance_bdms['instance']
for bdm in instance_bdms['instance_bdms']:
mountpoint = bdm['device_name']
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
volume_id = bdm['volume_id']
LOG.debug("Trying to get stats for the volume %s",
volume_id, instance=instance)
vol_stats = self.block_stats(instance, mountpoint)
if vol_stats:
stats = dict(volume=volume_id,
instance=instance,
rd_req=vol_stats[0],
rd_bytes=vol_stats[1],
wr_req=vol_stats[2],
wr_bytes=vol_stats[3])
LOG.debug(
"Got volume usage stats for the volume=%(volume)s,"
" rd_req=%(rd_req)d, rd_bytes=%(rd_bytes)d, "
"wr_req=%(wr_req)d, wr_bytes=%(wr_bytes)d",
stats, instance=instance)
vol_usage.append(stats)
return vol_usage
def block_stats(self, instance, disk_id):
try:
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove domain at the end.
domain = guest._domain
return domain.blockStats(disk_id)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.info(_LI('Getting block stats failed, device might have '
'been detached. Instance=%(instance_name)s '
'Disk=%(disk)s Code=%(errcode)s Error=%(e)s'),
{'instance_name': instance.name, 'disk': disk_id,
'errcode': errcode, 'e': e},
instance=instance)
except exception.InstanceNotFound:
LOG.info(_LI('Could not find domain in libvirt for instance %s. '
'Cannot get block stats for device'), instance.name,
instance=instance)
def get_console_pool_info(self, console_type):
# TODO(mdragon): console proxy should be implemented for libvirt,
# in case someone wants to use it with kvm or
# such. For now return fake data.
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
def get_available_resource(self, nodename):
disk_info_dict = self._get_local_gb_info()
data = {}
# NOTE(dprince): calling capabilities before getVersion works around
# an initialization issue with some versions of Libvirt (1.0.5.5).
# See: https://bugzilla.redhat.com/show_bug.cgi?id=1000116
# See: https://bugs.launchpad.net/nova/+bug/1215593
data["supported_instances"] = self._get_instance_capabilities()
data["vcpus"] = self._get_vcpu_total()
data["memory_mb"] = self._host.get_memory_mb_total()
data["local_gb"] = disk_info_dict['total']
data["vcpus_used"] = self._get_vcpu_used()
data["memory_mb_used"] = self._host.get_memory_mb_used()
data["local_gb_used"] = disk_info_dict['used']
data["hypervisor_type"] = self._host.get_driver_type()
data["hypervisor_version"] = self._host.get_version()
data["hypervisor_hostname"] = self._host.get_hostname()
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
# so we could just return the raw capabilities XML
# which 'compare_cpu' could use directly
#
# That said, arch_filter.py now seems to rely on
# the libvirt drivers format which suggests this
# data format needs to be standardized across drivers
data["cpu_info"] = jsonutils.dumps(self._get_cpu_info())
disk_free_gb = disk_info_dict['free']
disk_over_committed = self._get_disk_over_committed_size_total()
available_least = disk_free_gb * units.Gi - disk_over_committed
data['disk_available_least'] = available_least / units.Gi
data['pci_passthrough_devices'] = \
self._get_pci_passthrough_devices()
numa_topology = self._get_host_numa_topology()
if numa_topology:
data['numa_topology'] = numa_topology._to_json()
else:
data['numa_topology'] = None
return data
def check_instance_shared_storage_local(self, context, instance):
if self.image_backend.backend().is_shared_block_storage():
return None
dirpath = libvirt_utils.get_instance_path(instance)
if not os.path.exists(dirpath):
return None
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug("Creating tmpfile %s to verify with other "
"compute node that the instance is on "
"the same shared storage.",
tmp_file, instance=instance)
os.close(fd)
return {"filename": tmp_file}
def check_instance_shared_storage_remote(self, context, data):
return os.path.exists(data['filename'])
def check_instance_shared_storage_cleanup(self, context, data):
fileutils.delete_if_exists(data["filename"])
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
disk_available_gb = dst_compute_info['disk_available_least']
disk_available_mb = (
(disk_available_gb * units.Ki) - CONF.reserved_host_disk_mb)
# Compare CPU
if not instance.vcpu_model or not instance.vcpu_model.model:
source_cpu_info = src_compute_info['cpu_info']
self._compare_cpu(None, source_cpu_info, instance)
else:
self._compare_cpu(instance.vcpu_model, None, instance)
# Create file on storage, to be checked on source host
filename = self._create_shared_storage_test_file(instance)
data = objects.LibvirtLiveMigrateData()
data.filename = filename
data.image_type = CONF.libvirt.images_type
# Notes(eliqiao): block_migration and disk_over_commit are not
# nullable, so just don't set them if they are None
if block_migration is not None:
data.block_migration = block_migration
if disk_over_commit is not None:
data.disk_over_commit = disk_over_commit
data.disk_available_mb = disk_available_mb
return data
def cleanup_live_migration_destination_check(self, context,
dest_check_data):
filename = dest_check_data.filename
self._cleanup_shared_storage_test_file(filename)
def check_can_live_migrate_source(self, context, instance,
dest_check_data,
block_device_info=None):
if not isinstance(dest_check_data, migrate_data_obj.LiveMigrateData):
md_obj = objects.LibvirtLiveMigrateData()
md_obj.from_legacy_dict(dest_check_data)
dest_check_data = md_obj
source = CONF.host
dest_check_data.is_shared_instance_path = (
self._check_shared_storage_test_file(
dest_check_data.filename, instance))
dest_check_data.is_shared_block_storage = (
self._is_shared_block_storage(instance, dest_check_data,
block_device_info))
disk_info_text = self.get_instance_disk_info(
instance, block_device_info=block_device_info)
booted_from_volume = self._is_booted_from_volume(instance,
disk_info_text)
has_local_disk = self._has_local_disk(instance, disk_info_text)
if 'block_migration' not in dest_check_data:
dest_check_data.block_migration = (
not dest_check_data.is_on_shared_storage())
if dest_check_data.block_migration:
if dest_check_data.is_on_shared_storage():
reason = _("Block migration can not be used "
"with shared storage.")
raise exception.InvalidLocalStorage(reason=reason, path=source)
if 'disk_over_commit' in dest_check_data:
self._assert_dest_node_has_enough_disk(context, instance,
dest_check_data.disk_available_mb,
dest_check_data.disk_over_commit,
block_device_info)
if block_device_info:
bdm = block_device_info.get('block_device_mapping')
# migration is tunnelled through libvirt.
if bdm and not self._host.has_min_version(
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION):
# NOTE(stpierre): if this instance has mapped volumes,
# we can't do a block migration, since that will result
ver = ".".join([str(x) for x in
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION])
msg = (_('Cannot block migrate instance %(uuid)s with'
' mapped volumes. Selective block device'
' migration feature requires libvirt version'
' %(libvirt_ver)s') %
{'uuid': instance.uuid, 'libvirt_ver': ver})
LOG.error(msg, instance=instance)
raise exception.MigrationPreCheckError(reason=msg)
if (bdm and
(self._block_migration_flags &
libvirt.VIR_MIGRATE_TUNNELLED != 0)):
msg = (_('Cannot block migrate instance %(uuid)s with'
' mapped volumes. Selective block device'
' migration is not supported with tunnelled'
' block migrations.') % {'uuid': instance.uuid})
LOG.error(msg, instance=instance)
raise exception.MigrationPreCheckError(reason=msg)
elif not (dest_check_data.is_shared_block_storage or
dest_check_data.is_shared_instance_path or
(booted_from_volume and not has_local_disk)):
reason = _("Live migration can not be used "
"without shared storage except "
"a booted from volume VM which "
"does not have a local disk.")
raise exception.InvalidSharedStorage(reason=reason, path=source)
# same name to be used
instance_path = libvirt_utils.get_instance_path(instance,
relative=True)
dest_check_data.instance_relative_path = instance_path
return dest_check_data
def _is_shared_block_storage(self, instance, dest_check_data,
block_device_info=None):
if (dest_check_data.obj_attr_is_set('image_type') and
CONF.libvirt.images_type == dest_check_data.image_type and
self.image_backend.backend().is_shared_block_storage()):
# NOTE(dgenin): currently true only for RBD image backend
return True
if (dest_check_data.is_shared_instance_path and
self.image_backend.backend().is_file_in_instance_path()):
# NOTE(angdraug): file based image backends (Flat, Qcow2)
# place block device files under the instance path
return True
if (dest_check_data.is_volume_backed and
not bool(jsonutils.loads(
self.get_instance_disk_info(instance,
block_device_info)))):
return True
return False
def _assert_dest_node_has_enough_disk(self, context, instance,
available_mb, disk_over_commit,
block_device_info=None):
# Libvirt supports qcow2 disk format,which is usually compressed
# on compute nodes.
# Real disk image (compressed) may enlarged to "virtual disk size",
# that is specified as the maximum disk size.
# (See qemu-img -f path-to-disk)
# Scheduler recognizes destination host still has enough disk space
# if real disk size < available disk size
# if disk_over_commit is True,
# otherwise virtual disk size < available disk size.
available = 0
if available_mb:
available = available_mb * units.Mi
ret = self.get_instance_disk_info(instance,
block_device_info=block_device_info)
disk_infos = jsonutils.loads(ret)
necessary = 0
if disk_over_commit:
for info in disk_infos:
necessary += int(info['disk_size'])
else:
for info in disk_infos:
necessary += int(info['virt_disk_size'])
# Check that available disk > necessary disk
if (available - necessary) < 0:
reason = (_('Unable to migrate %(instance_uuid)s: '
'Disk of instance is too large(available'
' on destination host:%(available)s '
'< need:%(necessary)s)') %
{'instance_uuid': instance.uuid,
'available': available,
'necessary': necessary})
raise exception.MigrationPreCheckError(reason=reason)
def _compare_cpu(self, guest_cpu, host_cpu_str, instance):
# NOTE(kchamart): Comparing host to guest CPU model for emulated
# guests (<domain type='qemu'>) should not matter -- in this
# mode (QEMU "TCG") the CPU is fully emulated in software and no
# hardware acceleration, like KVM, is involved. So, skip the CPU
# compatibility check for the QEMU domain type, and retain it for
# KVM guests.
if CONF.libvirt.virt_type not in ['kvm']:
return
if guest_cpu is None:
info = jsonutils.loads(host_cpu_str)
LOG.info(_LI('Instance launched has CPU info: %s'), host_cpu_str)
cpu = vconfig.LibvirtConfigCPU()
cpu.arch = info['arch']
cpu.model = info['model']
cpu.vendor = info['vendor']
cpu.sockets = info['topology']['sockets']
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
else:
cpu = self._vcpu_model_to_cpu_config(guest_cpu)
u = ("http://libvirt.org/html/libvirt-libvirt-host.html#"
"virCPUCompareResult")
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
try:
cpu_xml = cpu.to_xml()
LOG.debug("cpu compare xml: %s", cpu_xml, instance=instance)
ret = self._host.compare_cpu(cpu_xml)
except libvirt.libvirtError as e:
error_code = e.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.debug("URI %(uri)s does not support cpu comparison. "
"It will be proceeded though. Error: %(error)s",
{'uri': self._uri(), 'error': e})
return
else:
LOG.error(m, {'ret': e, 'u': u})
raise exception.MigrationPreCheckError(
reason=m % {'ret': e, 'u': u})
if ret <= 0:
LOG.error(m, {'ret': ret, 'u': u})
raise exception.InvalidCPUInfo(reason=m % {'ret': ret, 'u': u})
def _create_shared_storage_test_file(self, instance):
dirpath = CONF.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
"the same storage.", tmp_file, instance=instance)
os.close(fd)
return os.path.basename(tmp_file)
def _check_shared_storage_test_file(self, filename, instance):
os.utime(CONF.instances_path, None)
tmp_file = os.path.join(CONF.instances_path, filename)
if not os.path.exists(tmp_file):
exists = False
else:
exists = True
LOG.debug('Check if temp file %s exists to indicate shared storage '
'is being used for migration. Exists? %s', tmp_file, exists,
instance=instance)
return exists
def _cleanup_shared_storage_test_file(self, filename):
tmp_file = os.path.join(CONF.instances_path, filename)
os.remove(tmp_file)
def ensure_filtering_rules_for_instance(self, instance, network_info):
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
timeout_count = list(range(CONF.live_migration_retry_count))
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance,
network_info):
break
timeout_count.pop()
if len(timeout_count) == 0:
msg = _('The firewall filter for %s does not exist')
raise exception.NovaException(msg % instance.name)
greenthread.sleep(1)
def filter_defer_apply_on(self):
self.firewall_driver.filter_defer_apply_on()
def filter_defer_apply_off(self):
self.firewall_driver.filter_defer_apply_off()
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
# exploit the URI accepted by libivrt
if not libvirt_utils.is_valid_hostname(dest):
raise exception.InvalidHostname(hostname=dest)
self._live_migration(context, instance, dest,
post_method, recover_method, block_migration,
migrate_data)
def live_migration_abort(self, instance):
guest = self._host.get_guest(instance)
dom = guest._domain
try:
dom.abortJob()
except libvirt.libvirtError as e:
LOG.error(_LE("Failed to cancel migration %s"),
e, instance=instance)
raise
def _check_graphics_addresses_can_live_migrate(self, listen_addrs):
LOCAL_ADDRS = ('0.0.0.0', '127.0.0.1', '::', '::1')
local_vnc = CONF.vnc.vncserver_listen in LOCAL_ADDRS
local_spice = CONF.spice.server_listen in LOCAL_ADDRS
if ((CONF.vnc.enabled and not local_vnc) or
(CONF.spice.enabled and not local_spice)):
msg = _('Your libvirt version does not support the'
' VIR_DOMAIN_XML_MIGRATABLE flag or your'
' destination node does not support'
' retrieving listen addresses. In order'
' for live migration to work properly, you'
' must configure the graphics (VNC and/or'
' SPICE) listen addresses to be either'
' the catch-all address (0.0.0.0 or ::) or'
' the local address (127.0.0.1 or ::1).')
raise exception.MigrationError(reason=msg)
if listen_addrs:
dest_local_vnc = listen_addrs.get('vnc') in LOCAL_ADDRS
dest_local_spice = listen_addrs.get('spice') in LOCAL_ADDRS
if ((CONF.vnc.enabled and not dest_local_vnc) or
(CONF.spice.enabled and not dest_local_spice)):
LOG.warning(_LW('Your libvirt version does not support the'
' VIR_DOMAIN_XML_MIGRATABLE flag, and the'
' graphics (VNC and/or SPICE) listen'
' addresses on the destination node do not'
' match the addresses on the source node.'
' Since the source node has listen'
' addresses set to either the catch-all'
' address (0.0.0.0 or ::) or the local'
' address (127.0.0.1 or ::1), the live'
' migration will succeed, but the VM will'
' continue to listen on the current'
' addresses.'))
def _verify_serial_console_is_disabled(self):
if CONF.serial_console.enabled:
msg = _('Your libvirt version does not support the'
' VIR_DOMAIN_XML_MIGRATABLE flag or your'
' destination node does not support'
' retrieving listen addresses. In order'
' for live migration to work properly you'
' must either disable serial console or'
' upgrade your libvirt version.')
raise exception.MigrationError(reason=msg)
def _live_migration_operation(self, context, instance, dest,
block_migration, migrate_data, guest,
device_names):
try:
if migrate_data.block_migration:
migration_flags = self._block_migration_flags
else:
migration_flags = self._live_migration_flags
listen_addrs = libvirt_migrate.graphics_listen_addrs(
migrate_data)
migratable_flag = self._host.is_migratable_xml_flag()
if not migratable_flag or not listen_addrs:
# In this context want to ensure we do not have to migrate
# graphic or serial consoles since we can't update guest's
# domain XML to make it handle destination host.
# TODO(alexs-h): These checks could be moved to the
# check_can_live_migrate_destination/source phase
self._check_graphics_addresses_can_live_migrate(listen_addrs)
self._verify_serial_console_is_disabled()
if ('target_connect_addr' in migrate_data and
migrate_data.target_connect_addr is not None):
dest = migrate_data.target_connect_addr
new_xml_str = None
params = None
if (self._host.is_migratable_xml_flag() and (
listen_addrs or migrate_data.bdms)):
new_xml_str = libvirt_migrate.get_updated_guest_xml(
# TODO(sahid): It's not a really well idea to pass
guest, migrate_data, self._get_volume_config)
if self._host.has_min_version(
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION):
params = {
'bandwidth': CONF.libvirt.live_migration_bandwidth,
'destination_xml': new_xml_str,
'migrate_disks': device_names,
}
if (migration_flags &
libvirt.VIR_MIGRATE_TUNNELLED != 0):
params.pop('migrate_disks')
guest.migrate(self._live_migration_uri(dest),
flags=migration_flags,
params=params,
domain_xml=new_xml_str,
bandwidth=CONF.libvirt.live_migration_bandwidth)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Live Migration failure: %s"), e,
instance=instance)
# VM instances on each host are in. Possibilities include
#
# 1. src==running, dst==none
#
# Migration failed & rolled back, or never started
#
# 2. src==running, dst==paused
#
# Migration started but is still ongoing
#
# 3. src==paused, dst==paused
#
# Migration data transfer completed, but switchover
# is still ongoing, or failed
#
# 4. src==paused, dst==running
#
# Migration data transfer completed, switchover
# happened but cleanup on source failed
#
# 5. src==none, dst==running
#
# Migration fully succeeded.
#
# Libvirt will aim to complete any migration operation
# or roll it back. So even if the migrateToURI call has
# returned an error, if the migration was not finished
# libvirt should clean up.
#
# So we take the error raise here with a pinch of salt
# and rely on the domain job info status to figure out
# what really happened to the VM, which is a much more
# reliable indicator.
#
# In particular we need to try very hard to ensure that
# Nova does not "forget" about the guest. ie leaving it
# running on a different host to the one recorded in
# the database, as that would be a serious resource leak
LOG.debug("Migration operation thread has finished",
instance=instance)
@staticmethod
def _migration_downtime_steps(data_gb):
downtime = CONF.libvirt.live_migration_downtime
steps = CONF.libvirt.live_migration_downtime_steps
delay = CONF.libvirt.live_migration_downtime_delay
# TODO(hieulq): Need to move min/max value into the config option,
# currently oslo_config will raise ValueError instead of setting
# option value to its min/max.
if downtime < nova.conf.libvirt.LIVE_MIGRATION_DOWNTIME_MIN:
downtime = nova.conf.libvirt.LIVE_MIGRATION_DOWNTIME_MIN
if steps < nova.conf.libvirt.LIVE_MIGRATION_DOWNTIME_STEPS_MIN:
steps = nova.conf.libvirt.LIVE_MIGRATION_DOWNTIME_STEPS_MIN
if delay < nova.conf.libvirt.LIVE_MIGRATION_DOWNTIME_DELAY_MIN:
delay = nova.conf.libvirt.LIVE_MIGRATION_DOWNTIME_DELAY_MIN
delay = int(delay * data_gb)
offset = downtime / float(steps + 1)
base = (downtime - offset) ** (1 / float(steps))
for i in range(steps + 1):
yield (int(delay * i), int(offset + base ** i))
def _live_migration_copy_disk_paths(self, context, instance, guest):
disk_paths = []
device_names = []
block_devices = []
# TODO(pkoniszewski): Remove version check when we bump min libvirt
# version to >= 1.2.17.
if (self._block_migration_flags &
libvirt.VIR_MIGRATE_TUNNELLED == 0 and
self._host.has_min_version(
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION)):
bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = driver.get_block_device_info(instance,
bdm_list)
block_device_mappings = driver.block_device_info_get_mapping(
block_device_info)
for bdm in block_device_mappings:
device_name = str(bdm['mount_device'].rsplit('/', 1)[1])
block_devices.append(device_name)
for dev in guest.get_all_disks():
if dev.readonly or dev.shareable:
continue
if dev.source_type not in ["file", "block"]:
continue
if dev.target_dev in block_devices:
continue
disk_paths.append(dev.source_path)
device_names.append(dev.target_dev)
return (disk_paths, device_names)
def _live_migration_data_gb(self, instance, disk_paths):
ram_gb = instance.flavor.memory_mb * units.Mi / units.Gi
if ram_gb < 2:
ram_gb = 2
disk_gb = 0
for path in disk_paths:
try:
size = os.stat(path).st_size
size_gb = (size / units.Gi)
if size_gb < 2:
size_gb = 2
disk_gb += size_gb
except OSError as e:
LOG.warning(_LW("Unable to stat %(disk)s: %(ex)s"),
{'disk': path, 'ex': e})
# Ignore error since we don't want to break
return ram_gb + disk_gb
def _get_migration_flags(self, is_block_migration):
if is_block_migration:
return self._block_migration_flags
return self._live_migration_flags
def _live_migration_monitor(self, context, instance, guest,
dest, post_method,
recover_method, block_migration,
migrate_data, finish_event,
disk_paths):
on_migration_failure = deque()
data_gb = self._live_migration_data_gb(instance, disk_paths)
downtime_steps = list(self._migration_downtime_steps(data_gb))
migration = migrate_data.migration
curdowntime = None
migration_flags = self._get_migration_flags(
migrate_data.block_migration)
n = 0
start = time.time()
progress_time = start
progress_watermark = None
previous_data_remaining = -1
is_post_copy_enabled = self._is_post_copy_enabled(migration_flags)
while True:
info = guest.get_job_info()
if info.type == libvirt.VIR_DOMAIN_JOB_NONE:
if not finish_event.ready():
LOG.debug("Operation thread is still running",
instance=instance)
else:
info.type = libvirt_migrate.find_job_type(guest, instance)
LOG.debug("Fixed incorrect job type to be %d",
info.type, instance=instance)
if info.type == libvirt.VIR_DOMAIN_JOB_NONE:
LOG.debug("Migration not running yet",
instance=instance)
elif info.type == libvirt.VIR_DOMAIN_JOB_UNBOUNDED:
libvirt_migrate.run_tasks(guest, instance,
self.active_migrations,
on_migration_failure,
migration,
is_post_copy_enabled)
now = time.time()
elapsed = now - start
if ((progress_watermark is None) or
(progress_watermark == 0) or
(progress_watermark > info.data_remaining)):
progress_watermark = info.data_remaining
progress_time = now
progress_timeout = CONF.libvirt.live_migration_progress_timeout
completion_timeout = int(
CONF.libvirt.live_migration_completion_timeout * data_gb)
if libvirt_migrate.should_abort(instance, now, progress_time,
progress_timeout, elapsed,
completion_timeout,
migration.status):
try:
guest.abort_job()
except libvirt.libvirtError as e:
LOG.warning(_LW("Failed to abort migration %s"),
e, instance=instance)
self._clear_empty_migration(instance)
raise
if (is_post_copy_enabled and
libvirt_migrate.should_switch_to_postcopy(
info.memory_iteration, info.data_remaining,
previous_data_remaining, migration.status)):
libvirt_migrate.trigger_postcopy_switch(guest,
instance,
migration)
previous_data_remaining = info.data_remaining
curdowntime = libvirt_migrate.update_downtime(
guest, instance, curdowntime,
downtime_steps, elapsed)
# iteration to avoid spamming logs for long
# running migrations. Just once every 5 secs
# is sufficient for developers to debug problems.
# We log once every 30 seconds at info to help
# admins see slow running migration operations
# when debug logs are off.
if (n % 10) == 0:
# Ignoring memory_processed, as due to repeated
# dirtying of data, this can be way larger than
# memory_total. Best to just look at what's
#
# TODO(berrange) perhaps we could include disk
# transfer stats in the progress too, but it
# might make memory info more obscure as large
# disk sizes might dwarf memory size
remaining = 100
if info.memory_total != 0:
remaining = round(info.memory_remaining *
100 / info.memory_total)
libvirt_migrate.save_stats(instance, migration,
info, remaining)
lg = LOG.debug
if (n % 60) == 0:
lg = LOG.info
lg(_LI("Migration running for %(secs)d secs, "
"memory %(remaining)d%% remaining; "
"(bytes processed=%(processed_memory)d, "
"remaining=%(remaining_memory)d, "
"total=%(total_memory)d)"),
{"secs": n / 2, "remaining": remaining,
"processed_memory": info.memory_processed,
"remaining_memory": info.memory_remaining,
"total_memory": info.memory_total}, instance=instance)
if info.data_remaining > progress_watermark:
lg(_LI("Data remaining %(remaining)d bytes, "
"low watermark %(watermark)d bytes "
"%(last)d seconds ago"),
{"remaining": info.data_remaining,
"watermark": progress_watermark,
"last": (now - progress_time)}, instance=instance)
n = n + 1
elif info.type == libvirt.VIR_DOMAIN_JOB_COMPLETED:
# Migration is all done
LOG.info(_LI("Migration operation has completed"),
instance=instance)
post_method(context, instance, dest, block_migration,
migrate_data)
break
elif info.type == libvirt.VIR_DOMAIN_JOB_FAILED:
# Migration did not succeed
LOG.error(_LE("Migration operation has aborted"),
instance=instance)
libvirt_migrate.run_recover_tasks(self._host, guest, instance,
on_migration_failure)
recover_method(context, instance, dest, block_migration,
migrate_data)
break
elif info.type == libvirt.VIR_DOMAIN_JOB_CANCELLED:
# Migration was stopped by admin
LOG.warning(_LW("Migration operation was cancelled"),
instance=instance)
libvirt_migrate.run_recover_tasks(self._host, guest, instance,
on_migration_failure)
recover_method(context, instance, dest, block_migration,
migrate_data, migration_status='cancelled')
break
else:
LOG.warning(_LW("Unexpected migration job type: %d"),
info.type, instance=instance)
time.sleep(0.5)
self._clear_empty_migration(instance)
def _clear_empty_migration(self, instance):
try:
del self.active_migrations[instance.uuid]
except KeyError:
LOG.warning(_LW("There are no records in active migrations "
"for instance"), instance=instance)
def _live_migration(self, context, instance, dest, post_method,
recover_method, block_migration,
migrate_data):
guest = self._host.get_guest(instance)
disk_paths = []
device_names = []
if migrate_data.block_migration:
disk_paths, device_names = self._live_migration_copy_disk_paths(
context, instance, guest)
opthread = utils.spawn(self._live_migration_operation,
context, instance, dest,
block_migration,
migrate_data, guest,
device_names)
finish_event = eventlet.event.Event()
self.active_migrations[instance.uuid] = deque()
def thread_finished(thread, event):
LOG.debug("Migration operation thread notification",
instance=instance)
event.send()
opthread.link(thread_finished, finish_event)
# Let eventlet schedule the new thread right away
time.sleep(0)
try:
LOG.debug("Starting monitoring of live migration",
instance=instance)
self._live_migration_monitor(context, instance, guest, dest,
post_method, recover_method,
block_migration, migrate_data,
finish_event, disk_paths)
except Exception as ex:
LOG.warning(_LW("Error monitoring migration: %(ex)s"),
{"ex": ex}, instance=instance, exc_info=True)
raise
finally:
LOG.debug("Live migration monitoring is all done",
instance=instance)
def _is_post_copy_enabled(self, migration_flags):
if self._is_post_copy_available():
if (migration_flags & libvirt.VIR_MIGRATE_POSTCOPY) != 0:
return True
return False
def live_migration_force_complete(self, instance):
try:
self.active_migrations[instance.uuid].append('force-complete')
except KeyError:
raise exception.NoActiveMigrationForInstance(
instance_id=instance.uuid)
def _try_fetch_image(self, context, path, image_id, instance,
fallback_from_host=None):
try:
libvirt_utils.fetch_image(context, path, image_id)
except exception.ImageNotFound:
if not fallback_from_host:
raise
LOG.debug("Image %(image_id)s doesn't exist anymore on "
"image service, attempting to copy image "
"from %(host)s",
{'image_id': image_id, 'host': fallback_from_host})
libvirt_utils.copy_image(src=path, dest=path,
host=fallback_from_host,
receive=True)
def _fetch_instance_kernel_ramdisk(self, context, instance,
fallback_from_host=None):
instance_dir = libvirt_utils.get_instance_path(instance)
if instance.kernel_id:
kernel_path = os.path.join(instance_dir, 'kernel')
# kernel_path. This also avoids ImageNotFound exception if
# the image has been deleted from glance
if not os.path.exists(kernel_path):
self._try_fetch_image(context,
kernel_path,
instance.kernel_id,
instance, fallback_from_host)
if instance.ramdisk_id:
ramdisk_path = os.path.join(instance_dir, 'ramdisk')
# NOTE(dsanders): only fetch image if it's not available at
if not os.path.exists(ramdisk_path):
self._try_fetch_image(context,
ramdisk_path,
instance.ramdisk_id,
instance, fallback_from_host)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
try:
self.destroy(context, instance, network_info, block_device_info,
destroy_disks, migrate_data)
finally:
is_shared_instance_path = True
if migrate_data:
is_shared_instance_path = migrate_data.is_shared_instance_path
if not is_shared_instance_path:
instance_dir = libvirt_utils.get_instance_path_at_destination(
instance, migrate_data)
if os.path.exists(instance_dir):
shutil.rmtree(instance_dir)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data):
if disk_info is not None:
disk_info = jsonutils.loads(disk_info)
LOG.debug('migrate_data in pre_live_migration: %s', migrate_data,
instance=instance)
is_shared_block_storage = migrate_data.is_shared_block_storage
is_shared_instance_path = migrate_data.is_shared_instance_path
is_block_migration = migrate_data.block_migration
if not is_shared_instance_path:
instance_dir = libvirt_utils.get_instance_path_at_destination(
instance, migrate_data)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
LOG.debug('Creating instance directory: %s', instance_dir,
instance=instance)
os.mkdir(instance_dir)
if disk_info:
image_disk_info = {}
for info in disk_info:
image_file = os.path.basename(info['path'])
image_path = os.path.join(instance_dir, image_file)
image_disk_info[image_path] = info['type']
LOG.debug('Creating disk.info with the contents: %s',
image_disk_info, instance=instance)
image_disk_info_path = os.path.join(instance_dir,
'disk.info')
libvirt_utils.write_to_file(image_disk_info_path,
jsonutils.dumps(image_disk_info))
if not is_shared_block_storage:
LOG.debug('Checking to make sure images and backing files are '
'present before live migration.', instance=instance)
self._create_images_and_backing(
context, instance, instance_dir, disk_info,
fallback_from_host=instance.host)
if (configdrive.required_by(instance) and
CONF.config_drive_format == 'iso9660'):
# live migration will fail on copying iso config drive to
# destination and writing to read-only device.
# Please see bug/1246201 for more details.
src = "%s:%s/disk.config" % (instance.host, instance_dir)
self._remotefs.copy_file(src, instance_dir)
if not is_block_migration:
# NOTE(angdraug): when block storage is shared between source
# and destination and instance path isn't (e.g. volume backed
self._ensure_console_log_for_instance(instance)
self._fetch_instance_kernel_ramdisk(context, instance)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
if len(block_device_mapping):
LOG.debug('Connecting volumes before live migration.',
instance=instance)
for bdm in block_device_mapping:
connection_info = bdm['connection_info']
disk_info = blockinfo.get_info_from_bdm(
instance, CONF.libvirt.virt_type,
instance.image_meta, bdm)
self._connect_volume(connection_info, disk_info)
LOG.debug('Plugging VIFs before live migration.', instance=instance)
max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
self.plug_vifs(instance, network_info)
break
except processutils.ProcessExecutionError:
if cnt == max_retry - 1:
raise
else:
LOG.warning(_LW('plug_vifs() failed %(cnt)d. Retry up to '
'%(max_retry)d.'),
{'cnt': cnt,
'max_retry': max_retry},
instance=instance)
greenthread.sleep(1)
if not migrate_data:
migrate_data = objects.LibvirtLiveMigrateData(bdms=[])
else:
migrate_data.bdms = []
migrate_data.graphics_listen_addr_vnc = CONF.vnc.vncserver_listen
migrate_data.graphics_listen_addr_spice = CONF.spice.server_listen
migrate_data.serial_listen_addr = \
CONF.serial_console.proxyclient_address
migrate_data.target_connect_addr = \
CONF.libvirt.live_migration_inbound_addr
migrate_data.supported_perf_events = self._supported_perf_events
for vol in block_device_mapping:
connection_info = vol['connection_info']
if connection_info.get('serial'):
disk_info = blockinfo.get_info_from_bdm(
instance, CONF.libvirt.virt_type,
instance.image_meta, vol)
bdmi = objects.LibvirtLiveMigrateBDMInfo()
bdmi.serial = connection_info['serial']
bdmi.connection_info = connection_info
bdmi.bus = disk_info['bus']
bdmi.dev = disk_info['dev']
bdmi.type = disk_info['type']
bdmi.format = disk_info.get('format')
bdmi.boot_index = disk_info.get('boot_index')
migrate_data.bdms.append(bdmi)
return migrate_data
def _try_fetch_image_cache(self, image, fetch_func, context, filename,
image_id, instance, size,
fallback_from_host=None):
try:
image.cache(fetch_func=fetch_func,
context=context,
filename=filename,
image_id=image_id,
size=size)
except exception.ImageNotFound:
if not fallback_from_host:
raise
LOG.debug("Image %(image_id)s doesn't exist anymore "
"on image service, attempting to copy "
"image from %(host)s",
{'image_id': image_id, 'host': fallback_from_host},
instance=instance)
def copy_from_host(target):
libvirt_utils.copy_image(src=target,
dest=target,
host=fallback_from_host,
receive=True)
image.cache(fetch_func=copy_from_host,
filename=filename)
def _create_images_and_backing(self, context, instance, instance_dir,
disk_info, fallback_from_host=None):
if not disk_info:
disk_info = []
for info in disk_info:
base = os.path.basename(info['path'])
# Get image type and create empty disk image, and
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file'] and not os.path.exists(instance_disk):
libvirt_utils.create_image(info['type'], instance_disk,
info['virt_disk_size'])
elif info['backing_file']:
# Creating backing file follows same way as spawning instances.
cache_name = os.path.basename(info['backing_file'])
image = self.image_backend.image(instance,
instance_disk,
CONF.libvirt.images_type)
if cache_name.startswith('ephemeral'):
# The argument 'size' is used by image.cache to
# validate disk size retrieved from cache against
# the instance disk size (should always return OK)
# and ephemeral_size is used by _create_ephemeral
# to build the image if the disk is not already
# cached.
image.cache(
fetch_func=self._create_ephemeral,
fs_label=cache_name,
os_type=instance.os_type,
filename=cache_name,
size=info['virt_disk_size'],
ephemeral_size=info['virt_disk_size'] / units.Gi)
elif cache_name.startswith('swap'):
inst_type = instance.get_flavor()
swap_mb = inst_type.swap
image.cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=swap_mb * units.Mi,
swap_mb=swap_mb)
else:
self._try_fetch_image_cache(image,
libvirt_utils.fetch_image,
context, cache_name,
instance.image_ref,
instance,
info['virt_disk_size'],
fallback_from_host)
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(
context, instance, fallback_from_host=fallback_from_host)
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
# Disconnect from volume server
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
connector = self.get_volume_connector(instance)
volume_api = self._volume_api
for vol in block_device_mapping:
# Retrieve connection info from Cinder's initialize_connection API.
volume_id = vol['connection_info']['serial']
connection_info = volume_api.initialize_connection(context,
volume_id,
connector)
if 'multipath_id' in vol['connection_info']['data']:
multipath_id = vol['connection_info']['data']['multipath_id']
connection_info['data']['multipath_id'] = multipath_id
disk_dev = vol['mount_device'].rpartition("/")[2]
self._disconnect_volume(connection_info, disk_dev)
def post_live_migration_at_source(self, context, instance, network_info):
self.unplug_vifs(instance, network_info)
def post_live_migration_at_destination(self, context,
instance,
network_info,
block_migration=False,
block_device_info=None):
disk_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance,
instance.image_meta, block_device_info)
xml = self._get_guest_xml(context, instance,
network_info, disk_info,
instance.image_meta,
block_device_info=block_device_info,
write_to_disk=True)
self._host.write_instance_config(xml)
def _get_instance_disk_info(self, instance_name, xml,
block_device_info=None):
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
volume_devices = set()
for vol in block_device_mapping:
disk_dev = vol['mount_device'].rpartition("/")[2]
volume_devices.add(disk_dev)
disk_info = []
doc = etree.fromstring(xml)
def find_nodes(doc, device_type):
return (doc.findall('.//devices/%s' % device_type),
doc.findall('.//devices/%s/source' % device_type),
doc.findall('.//devices/%s/driver' % device_type),
doc.findall('.//devices/%s/target' % device_type))
if (CONF.libvirt.virt_type == 'parallels' and
doc.find('os/type').text == vm_mode.EXE):
node_type = 'filesystem'
else:
node_type = 'disk'
(disk_nodes, path_nodes,
driver_nodes, target_nodes) = find_nodes(doc, node_type)
for cnt, path_node in enumerate(path_nodes):
disk_type = disk_nodes[cnt].get('type')
path = path_node.get('file') or path_node.get('dev')
if (node_type == 'filesystem'):
target = target_nodes[cnt].attrib['dir']
else:
target = target_nodes[cnt].attrib['dev']
if not path:
LOG.debug('skipping disk for %s as it does not have a path',
instance_name)
continue
if disk_type not in ['file', 'block']:
LOG.debug('skipping disk because it looks like a volume', path)
continue
if target in volume_devices:
LOG.debug('skipping disk %(path)s (%(target)s) as it is a '
'volume', {'path': path, 'target': target})
continue
if disk_type == 'file':
if driver_nodes[cnt].get('type') == 'ploop':
dk_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
dk_size += os.path.getsize(fp)
else:
dk_size = int(os.path.getsize(path))
elif disk_type == 'block' and block_device_info:
dk_size = lvm.get_volume_size(path)
else:
LOG.debug('skipping disk %(path)s (%(target)s) - unable to '
'determine if volume',
{'path': path, 'target': target})
continue
disk_type = driver_nodes[cnt].get('type')
if disk_type in ("qcow2", "ploop"):
backing_file = libvirt_utils.get_disk_backing_file(path)
virt_size = disk_api.get_disk_size(path)
over_commit_size = int(virt_size) - dk_size
else:
backing_file = ""
virt_size = dk_size
over_commit_size = 0
disk_info.append({'type': disk_type,
'path': path,
'virt_disk_size': virt_size,
'backing_file': backing_file,
'disk_size': dk_size,
'over_committed_disk_size': over_commit_size})
return disk_info
def get_instance_disk_info(self, instance,
block_device_info=None):
try:
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
LOG.warning(_LW('Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] '
'%(ex)s'),
{'instance_name': instance.name,
'error_code': error_code,
'ex': ex},
instance=instance)
raise exception.InstanceNotFound(instance_id=instance.uuid)
return jsonutils.dumps(
self._get_instance_disk_info(instance.name, xml,
block_device_info))
def _get_disk_over_committed_size_total(self):
disk_over_committed_size = 0
instance_domains = self._host.list_instance_domains()
if not instance_domains:
return disk_over_committed_size
instance_uuids = [dom.UUIDString() for dom in instance_domains]
ctx = nova_context.get_admin_context()
filters = {'uuid': instance_uuids}
local_instance_list = objects.InstanceList.get_by_filters(
ctx, filters, use_slave=True)
local_instances = {inst.uuid: inst for inst in local_instance_list}
bdms = objects.BlockDeviceMappingList.bdms_by_instance_uuid(
ctx, instance_uuids)
for dom in instance_domains:
try:
guest = libvirt_guest.Guest(dom)
xml = guest.get_xml_desc()
block_device_info = None
if guest.uuid in local_instances \
and (bdms and guest.uuid in bdms):
block_device_info = driver.get_block_device_info(
local_instances[guest.uuid], bdms[guest.uuid])
disk_infos = self._get_instance_disk_info(guest.name, xml,
block_device_info=block_device_info)
if not disk_infos:
continue
for info in disk_infos:
disk_over_committed_size += int(
info['over_committed_disk_size'])
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
LOG.warning(_LW(
'Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] %(ex)s'
), {'instance_name': guest.name,
'error_code': error_code,
'ex': ex})
except OSError as e:
if e.errno in (errno.ENOENT, errno.ESTALE):
LOG.warning(_LW('Periodic task is updating the host stat, '
'it is trying to get disk %(i_name)s, '
'but disk file was removed by concurrent '
'operations such as resize.'),
{'i_name': guest.name})
elif e.errno == errno.EACCES:
LOG.warning(_LW('Periodic task is updating the host stat, '
'it is trying to get disk %(i_name)s, '
'but access is denied. It is most likely '
'due to a VM that exists on the compute '
'node but is not managed by Nova.'),
{'i_name': guest.name})
else:
raise
except exception.VolumeBDMPathNotFound as e:
LOG.warning(_LW('Periodic task is updating the host stats, '
'it is trying to get disk info for %(i_name)s, '
'but the backing volume block device was removed '
'by concurrent operations such as resize. '
'Error: %(error)s'),
{'i_name': guest.name,
'error': e})
greenthread.sleep(0)
return disk_over_committed_size
def unfilter_instance(self, instance, network_info):
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
def get_available_nodes(self, refresh=False):
return [self._host.get_hostname()]
def get_host_cpu_stats(self):
return self._host.get_cpu_stats()
def get_host_uptime(self):
out, err = utils.execute('env', 'LANG=C', 'uptime')
return out
def manage_image_cache(self, context, all_instances):
self.image_cache_manager.update(context, all_instances)
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize,
shared_storage=False):
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
utils.execute('mv', inst_base_resize, inst_base)
if not shared_storage:
self._remotefs.remove_dir(dest, inst_base)
except Exception:
pass
def _is_storage_shared_with(self, dest, inst_base):
if CONF.libvirt.images_type == 'rbd':
return True
shared_storage = (dest == self.get_host_ip_addr())
if not shared_storage:
tmp_file = uuid.uuid4().hex + '.tmp'
tmp_path = os.path.join(inst_base, tmp_file)
try:
self._remotefs.create_file(dest, tmp_path)
if os.path.exists(tmp_path):
shared_storage = True
os.unlink(tmp_path)
else:
self._remotefs.remove_file(dest, tmp_path)
except Exception:
pass
return shared_storage
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
LOG.debug("Starting migrate_disk_and_power_off",
instance=instance)
ephemerals = driver.block_device_info_get_ephemerals(block_device_info)
# ephemeral devices. However, we still want to check if
# the original instance's ephemeral_gb property was set and
eph_size = (block_device.get_bdm_ephemeral_disk_size(ephemerals) or
instance.flavor.ephemeral_gb)
root_down = flavor.root_gb < instance.flavor.root_gb
ephemeral_down = flavor.ephemeral_gb < eph_size
disk_info_text = self.get_instance_disk_info(
instance, block_device_info=block_device_info)
booted_from_volume = self._is_booted_from_volume(instance,
disk_info_text)
if (root_down and not booted_from_volume) or ephemeral_down:
reason = _("Unable to resize disk down.")
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
disk_info = jsonutils.loads(disk_info_text)
if CONF.libvirt.images_type == 'lvm' and not booted_from_volume:
reason = _("Migration is not supported for LVM backed instances")
raise exception.InstanceFaultRollback(
exception.MigrationPreCheckError(reason=reason))
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
shared_storage = self._is_storage_shared_with(dest, inst_base)
if not shared_storage:
try:
self._remotefs.create_dir(dest, inst_base)
except processutils.ProcessExecutionError as e:
reason = _("not able to execute ssh command: %s") % e
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
self.power_off(instance, timeout, retry_interval)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self._disconnect_volume(connection_info, disk_dev)
try:
utils.execute('mv', inst_base, inst_base_resize)
if shared_storage:
dest = None
utils.execute('mkdir', '-p', inst_base)
on_execute = lambda process: \
self.job_tracker.add_job(instance, process.pid)
on_completion = lambda process: \
self.job_tracker.remove_job(instance, process.pid)
active_flavor = instance.get_flavor()
for info in disk_info:
img_path = info['path']
fname = os.path.basename(img_path)
from_path = os.path.join(inst_base_resize, fname)
# what is in it.
# We will not copy over the swap disk here, and rely on
# finish_migration/_create_image to re-create it for us.
if not (fname == 'disk.swap' and
active_flavor.get('swap', 0) != flavor.get('swap', 0)):
compression = info['type'] not in NO_COMPRESSION_TYPES
libvirt_utils.copy_image(from_path, img_path, host=dest,
on_execute=on_execute,
on_completion=on_completion,
compression=compression)
# Ensure disk.info is written to the new path to avoid disks being
# reinspected and potentially changing format.
src_disk_info_path = os.path.join(inst_base_resize, 'disk.info')
if os.path.exists(src_disk_info_path):
dst_disk_info_path = os.path.join(inst_base, 'disk.info')
libvirt_utils.copy_image(src_disk_info_path,
dst_disk_info_path,
host=dest, on_execute=on_execute,
on_completion=on_completion)
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_remote_migration(dest, inst_base,
inst_base_resize,
shared_storage)
return disk_info_text
def _wait_for_running(self, instance):
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info(_LI("Instance running successfully."), instance=instance)
raise loopingcall.LoopingCallDone()
@staticmethod
def _disk_size_from_instance(instance, disk_name):
if disk_name == 'disk':
size = instance.flavor.root_gb
elif disk_name == 'disk.local':
size = instance.flavor.ephemeral_gb
# N.B. We don't handle ephemeral disks named disk.ephN here,
# should return if an instance has multiple ephemeral disks.
else:
size = 0
return size * units.Gi
@staticmethod
def _disk_raw_to_qcow2(path):
path_qcow = path + '_qcow'
utils.execute('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', path, path_qcow)
utils.execute('mv', path_qcow, path)
@staticmethod
def _disk_qcow2_to_raw(path):
path_raw = path + '_raw'
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', path, path_raw)
utils.execute('mv', path_raw, path)
def _disk_resize(self, image, size):
if not isinstance(image, imgmodel.LocalFileImage):
LOG.debug("Skipping resize of non-local image")
return
# If we have a non partitioned image that we can extend
# then ensure we're in 'raw' format so we can extend file system.
converted = False
if (size and
image.format == imgmodel.FORMAT_QCOW2 and
disk_api.can_resize_image(image.path, size) and
disk_api.is_image_extendable(image)):
self._disk_qcow2_to_raw(image.path)
converted = True
image = imgmodel.LocalFileImage(image.path,
imgmodel.FORMAT_RAW)
if size:
disk_api.extend(image, size)
if converted:
self._disk_raw_to_qcow2(image.path)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
LOG.debug("Starting finish_migration", instance=instance)
block_disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
block_device_info)
self._create_image(context, instance, block_disk_info['mapping'],
network_info=network_info,
block_device_info=None, inject_files=False,
fallback_from_host=migration.source_compute)
self._ensure_console_log_for_instance(instance)
gen_confdrive = functools.partial(self._create_configdrive,
context, instance,
network_info=network_info)
disk_info = jsonutils.loads(disk_info)
for info in disk_info:
path = info['path']
disk_name = os.path.basename(path)
size = self._disk_size_from_instance(instance, disk_name)
if resize_instance:
image = imgmodel.LocalFileImage(path, info['type'])
self._disk_resize(image, size)
# be incorrectly assumed to be qcow2, which is a severe security
# flaw. The reverse is not true, because the atrociously-named-Raw
# backend supports both qcow2 and raw disks, and will choose
# appropriately between them as long as disk.info exists and is
# correctly populated, which it is because Qcow2 writes to
# disk.info.
#
# In general, we do not yet support format conversion during
# migration. For example:
# * Converting from use_cow_images=True to use_cow_images=False
# isn't handled. This isn't a security bug, but is almost
# certainly buggy in other cases, as the 'Raw' backend doesn't
# need to be converted.
if (disk_name != 'disk.config' and
info['type'] == 'raw' and CONF.use_cow_images):
self._disk_raw_to_qcow2(info['path'])
xml = self._get_guest_xml(context, instance, network_info,
block_disk_info, image_meta,
block_device_info=block_device_info,
write_to_disk=True)
# NOTE(mriedem): vifs_already_plugged=True here, regardless of whether
# or not we've migrated to another host, because we unplug VIFs locally
self._create_domain_and_network(context, xml, instance, network_info,
block_disk_info,
block_device_info=block_device_info,
power_on=power_on,
vifs_already_plugged=True,
post_xml_callback=gen_confdrive)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
LOG.debug("finish_migration finished successfully.", instance=instance)
def _cleanup_failed_migration(self, inst_base):
try:
shutil.rmtree(inst_base)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug("Starting finish_revert_migration",
instance=instance)
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
# make sure we don't have a left-over same-host base directory
# failure happened early.
if os.path.exists(inst_base_resize):
self._cleanup_failed_migration(inst_base)
utils.execute('mv', inst_base_resize, inst_base)
root_disk = self.image_backend.image(instance, 'disk')
# Once we rollback, the snapshot is no longer needed, so remove it
# TODO(nic): Remove the try/except/finally in a future release
# To avoid any upgrade issues surrounding instances being in pending
# resize state when the software is updated, this portion of the
# method logs exceptions rather than failing on them. Once it can be
# reasonably assumed that no such instances exist in the wild
# anymore, the try/except/finally should be removed,
# and ignore_errors should be set back to False (the default) so
# that problems throw errors, like they should.
if root_disk.exists():
try:
root_disk.rollback_to_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME)
except exception.SnapshotNotFound:
LOG.warning(_LW("Failed to rollback snapshot (%s)"),
libvirt_utils.RESIZE_SNAPSHOT_NAME)
finally:
root_disk.remove_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME,
ignore_errors=True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
instance.image_meta,
block_device_info)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
instance.image_meta,
block_device_info=block_device_info)
self._create_domain_and_network(context, xml, instance, network_info,
disk_info,
block_device_info=block_device_info,
power_on=power_on,
vifs_already_plugged=True)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
LOG.debug("finish_revert_migration finished successfully.",
instance=instance)
def confirm_migration(self, migration, instance, network_info):
self._cleanup_resize(instance, network_info)
@staticmethod
def _get_io_devices(xml_doc):
result = {"volumes": [], "ifaces": []}
try:
doc = etree.fromstring(xml_doc)
except Exception:
return result
blocks = [('./devices/disk', 'volumes'),
('./devices/interface', 'ifaces')]
for block, key in blocks:
section = doc.findall(block)
for node in section:
for child in node.getchildren():
if child.tag == 'target' and child.get('dev'):
result[key].append(child.get('dev'))
return result
def get_diagnostics(self, instance):
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove domain at the end.
domain = guest._domain
output = {}
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
for vcpu in guest.get_vcpus_info():
output["cpu" + str(vcpu.id) + "_time"] = vcpu.time
except libvirt.libvirtError:
pass
# get io status
xml = guest.get_xml_desc()
dom_io = LibvirtDriver._get_io_devices(xml)
for guest_disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(guest_disk)
output[guest_disk + "_read_req"] = stats[0]
output[guest_disk + "_read"] = stats[1]
output[guest_disk + "_write_req"] = stats[2]
output[guest_disk + "_write"] = stats[3]
output[guest_disk + "_errors"] = stats[4]
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
output[interface + "_rx"] = stats[0]
output[interface + "_rx_packets"] = stats[1]
output[interface + "_rx_errors"] = stats[2]
output[interface + "_rx_drop"] = stats[3]
output[interface + "_tx"] = stats[4]
output[interface + "_tx_packets"] = stats[5]
output[interface + "_tx_errors"] = stats[6]
output[interface + "_tx_drop"] = stats[7]
except libvirt.libvirtError:
pass
output["memory"] = domain.maxMemory()
# memoryStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
mem = domain.memoryStats()
for key in mem.keys():
output["memory-" + key] = mem[key]
except (libvirt.libvirtError, AttributeError):
pass
return output
def get_instance_diagnostics(self, instance):
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove domain at the end.
domain = guest._domain
xml = guest.get_xml_desc()
xml_doc = etree.fromstring(xml)
# TODO(sahid): Needs to use get_info but more changes have to
# be done since a mapping STATE_MAP LIBVIRT_POWER_STATE is
# needed.
(state, max_mem, mem, num_cpu, cpu_time) = \
guest._get_domain_info(self._host)
config_drive = configdrive.required_by(instance)
launched_at = timeutils.normalize_time(instance.launched_at)
uptime = timeutils.delta_seconds(launched_at,
timeutils.utcnow())
diags = diagnostics.Diagnostics(state=power_state.STATE_MAP[state],
driver='libvirt',
config_drive=config_drive,
hypervisor_os='linux',
uptime=uptime)
diags.memory_details.maximum = max_mem / units.Mi
diags.memory_details.used = mem / units.Mi
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
for vcpu in guest.get_vcpus_info():
diags.add_cpu(time=vcpu.time)
except libvirt.libvirtError:
pass
# get io status
dom_io = LibvirtDriver._get_io_devices(xml)
for guest_disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(guest_disk)
diags.add_disk(read_bytes=stats[1],
read_requests=stats[0],
write_bytes=stats[3],
write_requests=stats[2])
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
diags.add_nic(rx_octets=stats[0],
rx_errors=stats[2],
rx_drop=stats[3],
rx_packets=stats[1],
tx_octets=stats[4],
tx_errors=stats[6],
tx_drop=stats[7],
tx_packets=stats[5])
except libvirt.libvirtError:
pass
# Update mac addresses of interface if stats have been reported
if diags.nic_details:
nodes = xml_doc.findall('./devices/interface/mac')
for index, node in enumerate(nodes):
diags.nic_details[index].mac_address = node.get('address')
return diags
@staticmethod
def _prepare_device_bus(dev):
bus = None
address = (dev.device_addr.format_address() if
dev.device_addr else None)
if isinstance(dev.device_addr,
vconfig.LibvirtConfigGuestDeviceAddressPCI):
bus = objects.PCIDeviceBus()
elif isinstance(dev, vconfig.LibvirtConfigGuestDisk):
if dev.target_bus == 'scsi':
bus = objects.SCSIDeviceBus()
elif dev.target_bus == 'ide':
bus = objects.IDEDeviceBus()
elif dev.target_bus == 'usb':
bus = objects.USBDeviceBus()
if address is not None and bus is not None:
bus.address = address
return bus
def _build_device_metadata(self, context, instance):
def _get_device_name(bdm):
return block_device.strip_dev(bdm.device_name)
vifs = objects.VirtualInterfaceList.get_by_instance_uuid(context,
instance.uuid)
tagged_vifs = {vif.address: vif for vif in vifs if vif.tag}
# TODO(mriedem): We should be able to avoid the DB query here by using
# block_device_info['block_device_mapping'] which is passed into most
# methods that call this function.
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
tagged_bdms = {_get_device_name(bdm): bdm for bdm in bdms if bdm.tag}
devices = []
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
xml_dom = etree.fromstring(xml)
guest_config = vconfig.LibvirtConfigGuest()
guest_config.parse_dom(xml_dom)
for dev in guest_config.devices:
# Build network intefaces related metedata
if isinstance(dev, vconfig.LibvirtConfigGuestInterface):
vif = tagged_vifs.get(dev.mac_addr)
if not vif:
continue
bus = self._prepare_device_bus(dev)
device = objects.NetworkInterfaceMetadata(
mac=vif.address,
tags=[vif.tag]
)
if bus:
device.bus = bus
devices.append(device)
# Build disks related metedata
if isinstance(dev, vconfig.LibvirtConfigGuestDisk):
bdm = tagged_bdms.get(dev.target_dev)
if not bdm:
continue
bus = self._prepare_device_bus(dev)
device = objects.DiskMetadata(tags=[bdm.tag])
if bus:
device.bus = bus
devices.append(device)
if devices:
dev_meta = objects.InstanceDeviceMetadata(devices=devices)
return dev_meta
def instance_on_disk(self, instance):
# ensure directories exist and are writable
instance_path = libvirt_utils.get_instance_path(instance)
LOG.debug('Checking instance files accessibility %s', instance_path,
instance=instance)
shared_instance_path = os.access(instance_path, os.W_OK)
# NOTE(flwang): For shared block storage scenario, the file system is
# not really shared by the two hosts, but the volume of evacuated
# instance is reachable.
shared_block_storage = (self.image_backend.backend().
is_shared_block_storage())
return shared_instance_path or shared_block_storage
def inject_network_info(self, instance, nw_info):
self.firewall_driver.setup_basic_filtering(instance, nw_info)
def delete_instance_files(self, instance):
target = libvirt_utils.get_instance_path(instance)
# A resize may be in progress
target_resize = target + '_resize'
# Other threads may attempt to rename the path, so renaming the path
# to target + '_del' (because it is atomic) and iterating through
# twice in the unlikely event that a concurrent rename occurs between
# the two rename attempts in this method. In general this method
# should be fairly thread-safe without these additional checks, since
# other operations involving renames are not permitted when the task
# state is not None and the task state should be set to something
# other than None by the time this method is invoked.
target_del = target + '_del'
for i in six.moves.range(2):
try:
utils.execute('mv', target, target_del)
break
except Exception:
pass
try:
utils.execute('mv', target_resize, target_del)
break
except Exception:
pass
# Either the target or target_resize path may still exist if all
# rename attempts failed.
remaining_path = None
for p in (target, target_resize):
if os.path.exists(p):
remaining_path = p
break
# A previous delete attempt may have been interrupted, so target_del
# may exist even if all rename attempts during the present method
# invocation failed due to the absence of both target and
# target_resize.
if not remaining_path and os.path.exists(target_del):
self.job_tracker.terminate_jobs(instance)
LOG.info(_LI('Deleting instance files %s'), target_del,
instance=instance)
remaining_path = target_del
try:
shutil.rmtree(target_del)
except OSError as e:
LOG.error(_LE('Failed to cleanup directory %(target)s: '
'%(e)s'), {'target': target_del, 'e': e},
instance=instance)
# It is possible that the delete failed, if so don't mark the instance
if remaining_path and os.path.exists(remaining_path):
LOG.info(_LI('Deletion of %s failed'), remaining_path,
instance=instance)
return False
LOG.info(_LI('Deletion of %s complete'), target_del, instance=instance)
return True
@property
def need_legacy_block_device_info(self):
return False
def default_root_device_name(self, instance, image_meta, root_bdm):
disk_bus = blockinfo.get_disk_bus_for_device_type(
instance, CONF.libvirt.virt_type, image_meta, "disk")
cdrom_bus = blockinfo.get_disk_bus_for_device_type(
instance, CONF.libvirt.virt_type, image_meta, "cdrom")
root_info = blockinfo.get_root_info(
instance, CONF.libvirt.virt_type, image_meta,
root_bdm, disk_bus, cdrom_bus)
return block_device.prepend_dev(root_info['dev'])
def default_device_names_for_instance(self, instance, root_device_name,
*block_device_lists):
block_device_mapping = list(itertools.chain(*block_device_lists))
for bdm in block_device_mapping:
if bdm.device_name is not None:
LOG.warning(
_LW("Ignoring supplied device name: %(device_name)s. "
"Libvirt can't honour user-supplied dev names"),
{'device_name': bdm.device_name}, instance=instance)
bdm.device_name = None
block_device_info = driver.get_block_device_info(instance,
block_device_mapping)
blockinfo.default_device_names(CONF.libvirt.virt_type,
nova_context.get_admin_context(),
instance,
block_device_info,
instance.image_meta)
def get_device_name_for_instance(self, instance, bdms, block_device_obj):
block_device_info = driver.get_block_device_info(instance, bdms)
instance_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance,
instance.image_meta, block_device_info=block_device_info)
suggested_dev_name = block_device_obj.device_name
if suggested_dev_name is not None:
LOG.warning(
_LW('Ignoring supplied device name: %(suggested_dev)s'),
{'suggested_dev': suggested_dev_name}, instance=instance)
# NOTE(ndipanov): get_info_from_bdm will generate the new device name
# only when it's actually not set on the bd object
block_device_obj.device_name = None
disk_info = blockinfo.get_info_from_bdm(
instance, CONF.libvirt.virt_type, instance.image_meta,
block_device_obj, mapping=instance_info['mapping'])
return block_device.prepend_dev(disk_info['dev'])
def is_supported_fs_format(self, fs_type):
return fs_type in [disk_api.FS_FORMAT_EXT2, disk_api.FS_FORMAT_EXT3,
disk_api.FS_FORMAT_EXT4, disk_api.FS_FORMAT_XFS]
| true | true |
f72cd4093b6c2735e85a9e0cc06731f3fd380a07 | 12,960 | py | Python | elastalert/alerts.py | talyian/elastalert | 8ff39d485c0babd098ad659b53ce0f8ad456c6c3 | [
"Apache-2.0"
] | null | null | null | elastalert/alerts.py | talyian/elastalert | 8ff39d485c0babd098ad659b53ce0f8ad456c6c3 | [
"Apache-2.0"
] | 1 | 2021-06-02T04:32:03.000Z | 2021-06-02T04:32:03.000Z | elastalert/alerts.py | talyian/elastalert | 8ff39d485c0babd098ad659b53ce0f8ad456c6c3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
import json
import logging
import subprocess
from email.mime.text import MIMEText
from smtplib import SMTP
from smtplib import SMTPException
from socket import error
from jira.client import JIRA
from jira.exceptions import JIRAError
from staticconf.loader import yaml_loader
from util import EAException
from util import pretty_ts
def get_counts_string(match):
""" Looks for keys matching top_events_X in matches, generated by get_top_counts, and
returns a readable string about the various counts. """
message = ''
for key, counts in match.items():
if key.startswith('top_events_'):
message += '%s:\n' % (key[11:])
top_events = counts.items()
top_events.sort(key=lambda x: x[1], reverse=True)
for term, count in top_events:
message += '%s: %s\n' % (term, count)
message += '\n'
return message
def basic_match_string(rule, match):
""" Returns a string for the given rule and match. """
text = rule['name'] + '\n\n'
# Add custom alert text
alert_text = rule.get('alert_text', '')
if 'alert_text_args' in rule:
alert_text_args = rule.get('alert_text_args')
alert_text_values = [match.get(arg, '<MISSING VALUE>') for arg in alert_text_args]
alert_text = alert_text.format(*alert_text_values)
text += alert_text
while text[-2:] != '\n\n':
text += '\n'
if rule.get('alert_text_type') != 'alert_text_only':
# Add rule text
text += rule['type'].get_match_str(match)
while text[-2:] != '\n\n':
text += '\n'
# Add top_counts
if rule.get('top_count_keys'):
text += get_counts_string(match)
if rule.get('alert_text_type') != 'exclude_fields':
# Add match items
match_items = match.items()
match_items.sort(key=lambda x: x[0])
text += '\n'.join(['%s: %s' % (key, val) for key, val in match.items() if not key.startswith('top_events_')])
return text
class Alerter(object):
""" Base class for types of alerts.
:param rule: The rule configuration.
"""
required_options = frozenset([])
def __init__(self, rule):
self.rule = rule
self.pipeline = None
def alert(self, match):
""" Send an alert. Match is a dictionary of information about the alert.
:param match: A dictionary of relevant information to the alert.
"""
raise NotImplementedError()
def get_info(self):
""" Returns a dictionary of data related to this alert. At minimum, this should contain
a field type corresponding to the type of Alerter. """
return {'type': 'Unknown'}
def create_title(self, matches):
""" Creates custom alert title to be used, e.g. as an e-mail subject or JIRA issue summary.
:param matches: A list of dictionaries of relevant information to the alert.
"""
if 'alert_subject' in self.rule:
return self.create_custom_title(matches)
return self.create_default_title(matches)
def create_custom_title(self, matches):
alert_subject = self.rule['alert_subject']
if 'alert_subject_args' in self.rule:
alert_subject_args = self.rule['alert_subject_args']
alert_subject_values = [matches[0].get(arg, '<MISSING VALUE>') for arg in alert_subject_args]
return alert_subject.format(*alert_subject_values)
return alert_subject
def create_default_title(self, matches):
return self.rule['name']
class DebugAlerter(Alerter):
""" The debug alerter uses a Python logger (by default, alerting to terminal). """
def alert(self, matches):
logging.info('%d match(es)' % (len(matches)))
qk = self.rule.get('query_key', None)
for match in matches:
if qk in match:
logging.info('%s matched %s at %s' % (match[qk], self.rule['name'], match[self.rule['timestamp_field']]))
else:
logging.info('%s at %s' % (self.rule['name'], match[self.rule['timestamp_field']]))
logging.info(basic_match_string(self.rule, match))
def get_info(self):
return {'type': 'debug'}
class EmailAlerter(Alerter):
""" Sends an email alert """
required_options = frozenset(['email'])
def __init__(self, *args):
super(EmailAlerter, self).__init__(*args)
self.smtp_host = self.rule.get('smtp_host', 'localhost')
self.from_addr = self.rule.get('from_addr', 'ElastAlert')
# Convert email to a list if it isn't already
if isinstance(self.rule['email'], str):
self.rule['email'] = [self.rule['email']]
def alert(self, matches):
body = ''
for match in matches:
body += basic_match_string(self.rule, match)
# Separate text of aggregated alerts with dashes
if len(matches) > 1:
body += '\n----------------------------------------\n'
# Add JIRA ticket if it exists
if self.pipeline is not None and 'jira_ticket' in self.pipeline:
url = '%s/browse/%s' % (self.rule['jira_server'], self.pipeline['jira_ticket'])
body += '\nJIRA ticket: %s' % (url)
email_msg = MIMEText(body)
email_msg['Subject'] = self.create_title(matches)
email_msg['To'] = ', '.join(self.rule['email'])
email_msg['From'] = self.from_addr
email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To'])
try:
self.smtp = SMTP(self.smtp_host)
except (SMTPException, error) as e:
raise EAException("Error connecting to SMTP host: %s" % (e))
self.smtp.sendmail(self.from_addr, self.rule['email'], email_msg.as_string())
self.smtp.close()
logging.info("Sent email to %s" % (self.rule['email']))
def create_default_title(self, matches):
subject = 'ElastAlert: %s' % (self.rule['name'])
# If the rule has a query_key, add that value plus timestamp to subject
if 'query_key' in self.rule:
qk = matches[0].get(self.rule['query_key'])
if qk:
subject += ' - %s' % (qk)
return subject
def get_info(self):
return {'type': 'email',
'recipients': self.rule['email']}
class JiraAlerter(Alerter):
""" Creates a Jira ticket for each alert """
required_options = frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype'])
def __init__(self, rule):
super(JiraAlerter, self).__init__(rule)
self.server = self.rule['jira_server']
self.get_jira_account(self.rule['jira_account_file'])
self.project = self.rule['jira_project']
self.issue_type = self.rule['jira_issuetype']
self.component = self.rule.get('jira_component')
self.label = self.rule.get('jira_label')
self.assignee = self.rule.get('jira_assignee')
self.max_age = self.rule.get('jira_max_age', 30)
self.bump_tickets = self.rule.get('jira_bump_tickets', False)
self.jira_args = {'project': {'key': self.project},
'issuetype': {'name': self.issue_type}}
if self.component:
self.jira_args['components'] = [{'name': self.component}]
if self.label:
self.jira_args['labels'] = [self.label]
if self.assignee:
self.jira_args['assignee'] = {'name': self.assignee}
try:
self.client = JIRA(self.server, basic_auth=(self.user, self.password))
except JIRAError as e:
# JIRAError may contain HTML, pass along only first 1024 chars
raise EAException("Error connecting to JIRA: %s" % (str(e)[:1024]))
def set_assignee(self, assignee):
self.assignee = assignee
if assignee:
self.jira_args['assignee'] = {'name': assignee}
elif 'assignee' in self.jira_args:
self.jira_args.pop('assignee')
def get_jira_account(self, account_file):
""" Gets the username and password from a jira account file.
:param account_file: Name of the file which contains user and password information.
"""
account_conf = yaml_loader(account_file)
if 'user' not in account_conf or 'password' not in account_conf:
raise EAException('Jira account file must have user and password fields')
self.user = account_conf['user']
self.password = account_conf['password']
def find_existing_ticket(self, matches):
# Default title, get stripped search version
if 'alert_subject' not in self.rule:
title = self.create_default_title(matches, True)
else:
title = self.create_title(matches)
# This is necessary for search for work. Other special characters and dashes
# directly adjacent to words appear to be ok
title = title.replace(' - ', ' ')
date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y/%m/%d')
jql = 'project=%s AND summary~"%s" and created >= "%s"' % (self.project, title, date)
try:
issues = self.client.search_issues(jql)
except JIRAError as e:
logging.exception("Error while searching for JIRA ticket using jql '%s': %s" % (jql, e))
return None
if len(issues):
return issues[0]
def comment_on_ticket(self, ticket, match):
text = basic_match_string(self.rule, match)
timestamp = pretty_ts(match[self.rule['timestamp_field']])
comment = "This alert was triggered again at %s\n%s" % (timestamp, text)
self.client.add_comment(ticket, comment)
def alert(self, matches):
title = self.create_title(matches)
if self.bump_tickets:
ticket = self.find_existing_ticket(matches)
if ticket:
logging.info('Commenting on existing ticket %s' % (ticket.key))
for match in matches:
self.comment_on_ticket(ticket, match)
return
description = ''
for match in matches:
description += basic_match_string(self.rule, match)
if len(matches) > 1:
description += '\n----------------------------------------\n'
self.jira_args['summary'] = title
self.jira_args['description'] = description
try:
self.issue = self.client.create_issue(**self.jira_args)
except JIRAError as e:
raise EAException("Error creating JIRA ticket: %s" % (e))
logging.info("Opened Jira ticket: %s" % (self.issue))
if self.pipeline is not None:
self.pipeline['jira_ticket'] = self.issue
def create_default_title(self, matches, for_search=False):
# If there is a query_key, use that in the title
if 'query_key' in self.rule and self.rule['query_key'] in matches[0]:
title = 'ElastAlert: %s matched %s' % (matches[0][self.rule['query_key']], self.rule['name'])
else:
title = 'ElastAlert: %s' % (self.rule['name'])
if for_search:
return title
title += ' - %s' % (pretty_ts(matches[0][self.rule['timestamp_field']], self.rule.get('use_local_time')))
# Add count for spikes
count = matches[0].get('spike_count')
if count:
title += ' - %s+ events' % (count)
return title
def get_info(self):
return {'type': 'jira'}
class CommandAlerter(Alerter):
required_options = set(['command'])
def __init__(self, *args):
super(CommandAlerter, self).__init__(*args)
if isinstance(self.rule['command'], basestring) and '%' in self.rule['command']:
logging.warning('Warning! You could be vulnerable to shell injection!')
self.rule['command'] = [self.rule['command']]
def alert(self, matches):
for match in matches:
# Format the command and arguments
try:
command = [command_arg % match for command_arg in self.rule['command']]
self.last_command = command
except KeyError as e:
raise EAException("Error formatting command: %s" % (e))
# Run command and pipe data
try:
subp = subprocess.Popen(command, stdin=subprocess.PIPE)
if self.rule.get('pipe_match_json'):
match_json = json.dumps(match)
stdout, stderr = subp.communicate(input=match_json)
except OSError as e:
raise EAException("Error while running command %s: %s" % (' '.join(command), e))
def get_info(self):
return {'type': 'command',
'command': ' '.join(self.last_command)}
| 37.028571 | 121 | 0.599691 |
import datetime
import json
import logging
import subprocess
from email.mime.text import MIMEText
from smtplib import SMTP
from smtplib import SMTPException
from socket import error
from jira.client import JIRA
from jira.exceptions import JIRAError
from staticconf.loader import yaml_loader
from util import EAException
from util import pretty_ts
def get_counts_string(match):
message = ''
for key, counts in match.items():
if key.startswith('top_events_'):
message += '%s:\n' % (key[11:])
top_events = counts.items()
top_events.sort(key=lambda x: x[1], reverse=True)
for term, count in top_events:
message += '%s: %s\n' % (term, count)
message += '\n'
return message
def basic_match_string(rule, match):
text = rule['name'] + '\n\n'
alert_text = rule.get('alert_text', '')
if 'alert_text_args' in rule:
alert_text_args = rule.get('alert_text_args')
alert_text_values = [match.get(arg, '<MISSING VALUE>') for arg in alert_text_args]
alert_text = alert_text.format(*alert_text_values)
text += alert_text
while text[-2:] != '\n\n':
text += '\n'
if rule.get('alert_text_type') != 'alert_text_only':
text += rule['type'].get_match_str(match)
while text[-2:] != '\n\n':
text += '\n'
if rule.get('top_count_keys'):
text += get_counts_string(match)
if rule.get('alert_text_type') != 'exclude_fields':
match_items = match.items()
match_items.sort(key=lambda x: x[0])
text += '\n'.join(['%s: %s' % (key, val) for key, val in match.items() if not key.startswith('top_events_')])
return text
class Alerter(object):
required_options = frozenset([])
def __init__(self, rule):
self.rule = rule
self.pipeline = None
def alert(self, match):
raise NotImplementedError()
def get_info(self):
return {'type': 'Unknown'}
def create_title(self, matches):
if 'alert_subject' in self.rule:
return self.create_custom_title(matches)
return self.create_default_title(matches)
def create_custom_title(self, matches):
alert_subject = self.rule['alert_subject']
if 'alert_subject_args' in self.rule:
alert_subject_args = self.rule['alert_subject_args']
alert_subject_values = [matches[0].get(arg, '<MISSING VALUE>') for arg in alert_subject_args]
return alert_subject.format(*alert_subject_values)
return alert_subject
def create_default_title(self, matches):
return self.rule['name']
class DebugAlerter(Alerter):
def alert(self, matches):
logging.info('%d match(es)' % (len(matches)))
qk = self.rule.get('query_key', None)
for match in matches:
if qk in match:
logging.info('%s matched %s at %s' % (match[qk], self.rule['name'], match[self.rule['timestamp_field']]))
else:
logging.info('%s at %s' % (self.rule['name'], match[self.rule['timestamp_field']]))
logging.info(basic_match_string(self.rule, match))
def get_info(self):
return {'type': 'debug'}
class EmailAlerter(Alerter):
required_options = frozenset(['email'])
def __init__(self, *args):
super(EmailAlerter, self).__init__(*args)
self.smtp_host = self.rule.get('smtp_host', 'localhost')
self.from_addr = self.rule.get('from_addr', 'ElastAlert')
if isinstance(self.rule['email'], str):
self.rule['email'] = [self.rule['email']]
def alert(self, matches):
body = ''
for match in matches:
body += basic_match_string(self.rule, match)
# Separate text of aggregated alerts with dashes
if len(matches) > 1:
body += '\n----------------------------------------\n'
# Add JIRA ticket if it exists
if self.pipeline is not None and 'jira_ticket' in self.pipeline:
url = '%s/browse/%s' % (self.rule['jira_server'], self.pipeline['jira_ticket'])
body += '\nJIRA ticket: %s' % (url)
email_msg = MIMEText(body)
email_msg['Subject'] = self.create_title(matches)
email_msg['To'] = ', '.join(self.rule['email'])
email_msg['From'] = self.from_addr
email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To'])
try:
self.smtp = SMTP(self.smtp_host)
except (SMTPException, error) as e:
raise EAException("Error connecting to SMTP host: %s" % (e))
self.smtp.sendmail(self.from_addr, self.rule['email'], email_msg.as_string())
self.smtp.close()
logging.info("Sent email to %s" % (self.rule['email']))
def create_default_title(self, matches):
subject = 'ElastAlert: %s' % (self.rule['name'])
# If the rule has a query_key, add that value plus timestamp to subject
if 'query_key' in self.rule:
qk = matches[0].get(self.rule['query_key'])
if qk:
subject += ' - %s' % (qk)
return subject
def get_info(self):
return {'type': 'email',
'recipients': self.rule['email']}
class JiraAlerter(Alerter):
required_options = frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype'])
def __init__(self, rule):
super(JiraAlerter, self).__init__(rule)
self.server = self.rule['jira_server']
self.get_jira_account(self.rule['jira_account_file'])
self.project = self.rule['jira_project']
self.issue_type = self.rule['jira_issuetype']
self.component = self.rule.get('jira_component')
self.label = self.rule.get('jira_label')
self.assignee = self.rule.get('jira_assignee')
self.max_age = self.rule.get('jira_max_age', 30)
self.bump_tickets = self.rule.get('jira_bump_tickets', False)
self.jira_args = {'project': {'key': self.project},
'issuetype': {'name': self.issue_type}}
if self.component:
self.jira_args['components'] = [{'name': self.component}]
if self.label:
self.jira_args['labels'] = [self.label]
if self.assignee:
self.jira_args['assignee'] = {'name': self.assignee}
try:
self.client = JIRA(self.server, basic_auth=(self.user, self.password))
except JIRAError as e:
# JIRAError may contain HTML, pass along only first 1024 chars
raise EAException("Error connecting to JIRA: %s" % (str(e)[:1024]))
def set_assignee(self, assignee):
self.assignee = assignee
if assignee:
self.jira_args['assignee'] = {'name': assignee}
elif 'assignee' in self.jira_args:
self.jira_args.pop('assignee')
def get_jira_account(self, account_file):
account_conf = yaml_loader(account_file)
if 'user' not in account_conf or 'password' not in account_conf:
raise EAException('Jira account file must have user and password fields')
self.user = account_conf['user']
self.password = account_conf['password']
def find_existing_ticket(self, matches):
# Default title, get stripped search version
if 'alert_subject' not in self.rule:
title = self.create_default_title(matches, True)
else:
title = self.create_title(matches)
# This is necessary for search for work. Other special characters and dashes
# directly adjacent to words appear to be ok
title = title.replace(' - ', ' ')
date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y/%m/%d')
jql = 'project=%s AND summary~"%s" and created >= "%s"' % (self.project, title, date)
try:
issues = self.client.search_issues(jql)
except JIRAError as e:
logging.exception("Error while searching for JIRA ticket using jql '%s': %s" % (jql, e))
return None
if len(issues):
return issues[0]
def comment_on_ticket(self, ticket, match):
text = basic_match_string(self.rule, match)
timestamp = pretty_ts(match[self.rule['timestamp_field']])
comment = "This alert was triggered again at %s\n%s" % (timestamp, text)
self.client.add_comment(ticket, comment)
def alert(self, matches):
title = self.create_title(matches)
if self.bump_tickets:
ticket = self.find_existing_ticket(matches)
if ticket:
logging.info('Commenting on existing ticket %s' % (ticket.key))
for match in matches:
self.comment_on_ticket(ticket, match)
return
description = ''
for match in matches:
description += basic_match_string(self.rule, match)
if len(matches) > 1:
description += '\n----------------------------------------\n'
self.jira_args['summary'] = title
self.jira_args['description'] = description
try:
self.issue = self.client.create_issue(**self.jira_args)
except JIRAError as e:
raise EAException("Error creating JIRA ticket: %s" % (e))
logging.info("Opened Jira ticket: %s" % (self.issue))
if self.pipeline is not None:
self.pipeline['jira_ticket'] = self.issue
def create_default_title(self, matches, for_search=False):
# If there is a query_key, use that in the title
if 'query_key' in self.rule and self.rule['query_key'] in matches[0]:
title = 'ElastAlert: %s matched %s' % (matches[0][self.rule['query_key']], self.rule['name'])
else:
title = 'ElastAlert: %s' % (self.rule['name'])
if for_search:
return title
title += ' - %s' % (pretty_ts(matches[0][self.rule['timestamp_field']], self.rule.get('use_local_time')))
# Add count for spikes
count = matches[0].get('spike_count')
if count:
title += ' - %s+ events' % (count)
return title
def get_info(self):
return {'type': 'jira'}
class CommandAlerter(Alerter):
required_options = set(['command'])
def __init__(self, *args):
super(CommandAlerter, self).__init__(*args)
if isinstance(self.rule['command'], basestring) and '%' in self.rule['command']:
logging.warning('Warning! You could be vulnerable to shell injection!')
self.rule['command'] = [self.rule['command']]
def alert(self, matches):
for match in matches:
# Format the command and arguments
try:
command = [command_arg % match for command_arg in self.rule['command']]
self.last_command = command
except KeyError as e:
raise EAException("Error formatting command: %s" % (e))
# Run command and pipe data
try:
subp = subprocess.Popen(command, stdin=subprocess.PIPE)
if self.rule.get('pipe_match_json'):
match_json = json.dumps(match)
stdout, stderr = subp.communicate(input=match_json)
except OSError as e:
raise EAException("Error while running command %s: %s" % (' '.join(command), e))
def get_info(self):
return {'type': 'command',
'command': ' '.join(self.last_command)}
| true | true |
f72cd428667980c9fdb4e230f0d8a948590c845d | 1,070 | py | Python | kubernetes/test/test_v1beta1_rolling_update_daemon_set.py | TomasTomecek/kubernetes-python | c37c074303a13c72662b9201ccc023fb0ca45755 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1beta1_rolling_update_daemon_set.py | TomasTomecek/kubernetes-python | c37c074303a13c72662b9201ccc023fb0ca45755 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1beta1_rolling_update_daemon_set.py | TomasTomecek/kubernetes-python | c37c074303a13c72662b9201ccc023fb0ca45755 | [
"Apache-2.0"
] | 1 | 2020-05-09T07:16:55.000Z | 2020-05-09T07:16:55.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_rolling_update_daemon_set import V1beta1RollingUpdateDaemonSet
class TestV1beta1RollingUpdateDaemonSet(unittest.TestCase):
""" V1beta1RollingUpdateDaemonSet unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1RollingUpdateDaemonSet(self):
"""
Test V1beta1RollingUpdateDaemonSet
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1beta1_rolling_update_daemon_set.V1beta1RollingUpdateDaemonSet()
pass
if __name__ == '__main__':
unittest.main()
| 23.777778 | 107 | 0.738318 |
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_rolling_update_daemon_set import V1beta1RollingUpdateDaemonSet
class TestV1beta1RollingUpdateDaemonSet(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1RollingUpdateDaemonSet(self):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
f72cd49b263b57b7fcb9084dfd4139d1f60dc573 | 22,741 | py | Python | django/contrib/contenttypes/fields.py | tomviner/django | 87fed9444033533ad7105c4b1e4ffc5d7854a2c6 | [
"BSD-3-Clause"
] | null | null | null | django/contrib/contenttypes/fields.py | tomviner/django | 87fed9444033533ad7105c4b1e4ffc5d7854a2c6 | [
"BSD-3-Clause"
] | null | null | null | django/contrib/contenttypes/fields.py | tomviner/django | 87fed9444033533ad7105c4b1e4ffc5d7854a2c6 | [
"BSD-3-Clause"
] | 1 | 2022-03-26T09:05:09.000Z | 2022-03-26T09:05:09.000Z | from __future__ import unicode_literals
from collections import defaultdict
from django.contrib.contenttypes.models import ContentType
from django.core import checks
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, connection, models, router, transaction
from django.db.models import DO_NOTHING, signals
from django.db.models.base import ModelBase
from django.db.models.fields.related import (
ForeignObject, ForeignObjectRel, ForeignRelatedObjectsDescriptor,
)
from django.db.models.query_utils import PathInfo
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.functional import cached_property
@python_2_unicode_compatible
class GenericForeignKey(object):
"""
Provide a generic many-to-one relation through the ``content_type`` and
``object_id`` fields.
This class also doubles as an accessor to the related object (similar to
ReverseSingleRelatedObjectDescriptor) by adding itself as a model
attribute.
"""
# Field flags
auto_created = False
concrete = False
editable = False
hidden = False
is_relation = True
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
related_model = None
allow_unsaved_instance_assignment = False
def __init__(self, ct_field='content_type', fk_field='object_id', for_concrete_model=True):
self.ct_field = ct_field
self.fk_field = fk_field
self.for_concrete_model = for_concrete_model
self.editable = False
self.rel = None
self.column = None
def contribute_to_class(self, cls, name, **kwargs):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_field(self, virtual=True)
# Only run pre-initialization field assignment on non-abstract models
if not cls._meta.abstract:
signals.pre_init.connect(self.instance_pre_init, sender=cls)
setattr(cls, name, self)
def __str__(self):
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_object_id_field())
errors.extend(self._check_content_type_field())
return errors
def _check_field_name(self):
if self.name.endswith("_"):
return [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=self,
id='fields.E001',
)
]
else:
return []
def _check_object_id_field(self):
try:
self.model._meta.get_field(self.fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey object ID references the non-existent field '%s'." % self.fk_field,
hint=None,
obj=self,
id='contenttypes.E001',
)
]
else:
return []
def _check_content_type_field(self):
"""
Check if field named `field_name` in model `model` exists and is a
valid content_type field (is a ForeignKey to ContentType).
"""
try:
field = self.model._meta.get_field(self.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey content type references the non-existent field '%s.%s'." % (
self.model._meta.object_name, self.ct_field
),
hint=None,
obj=self,
id='contenttypes.E002',
)
]
else:
if not isinstance(field, models.ForeignKey):
return [
checks.Error(
"'%s.%s' is not a ForeignKey." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E003',
)
]
elif field.rel.to != ContentType:
return [
checks.Error(
"'%s.%s' is not a ForeignKey to 'contenttypes.ContentType'." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handle initializing an object with the generic FK instead of
content_type and object_id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
if value is not None:
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
else:
kwargs[self.ct_field] = None
kwargs[self.fk_field] = None
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(
obj, for_concrete_model=self.for_concrete_model)
elif id is not None:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
# For efficiency, group the instances by content type and then do one
# query per model
fk_dict = defaultdict(set)
# We need one instance for each group in order to get the right db:
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
# We avoid looking for values if either ct_id or fkey value is None
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
# For doing the join in Python, we have to match both the FK val and the
# content type, so we use a callable that returns a (fk, class) pair.
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(id=ct_id,
using=obj._state.db).model_class()
return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model)
return (ret_val,
lambda obj: (obj._get_pk_val(), obj.__class__),
gfk_key,
True,
self.cache_attr)
def is_cached(self, instance):
return hasattr(instance, self.cache_attr)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
# Make sure to use ContentType.objects.get_for_id() to ensure that
# lookups are cached (see ticket #5570). This takes more code than
# the naive ``getattr(instance, self.ct_field)``, but has better
# performance when dealing with GFKs in loops and such.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id is not None:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
if not self.allow_unsaved_instance_assignment and fk is None:
raise ValueError(
'Cannot assign "%r": "%s" instance isn\'t saved in the database.' %
(value, value._meta.object_name)
)
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRel(ForeignObjectRel):
"""
Used by GenericRelation to store information about the relation.
"""
def __init__(self, field, to, related_name=None, related_query_name=None, limit_choices_to=None):
super(GenericRel, self).__init__(
field, to,
related_name=related_query_name or '+',
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
on_delete=DO_NOTHING,
)
class GenericRelation(ForeignObject):
"""
Provide a reverse to a relation created by a GenericForeignKey.
"""
# Field flags
auto_created = False
many_to_many = False
many_to_one = False
one_to_many = True
one_to_one = False
rel_class = GenericRel
def __init__(self, to, object_id_field='object_id', content_type_field='content_type',
for_concrete_model=True, related_query_name=None, limit_choices_to=None, **kwargs):
kwargs['rel'] = self.rel_class(
self, to,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
)
kwargs['blank'] = True
kwargs['editable'] = False
kwargs['serialize'] = False
# This construct is somewhat of an abuse of ForeignObject. This field
# represents a relation from pk to object_id field. But, this relation
# isn't direct, the join is generated reverse along foreign key. So,
# the from_field is object_id field, to_field is pk because of the
# reverse join.
super(GenericRelation, self).__init__(
to, from_fields=[object_id_field], to_fields=[], **kwargs)
self.object_id_field_name = object_id_field
self.content_type_field_name = content_type_field
self.for_concrete_model = for_concrete_model
def check(self, **kwargs):
errors = super(GenericRelation, self).check(**kwargs)
errors.extend(self._check_generic_foreign_key_existence())
return errors
def _check_generic_foreign_key_existence(self):
target = self.rel.to
if isinstance(target, ModelBase):
fields = target._meta.virtual_fields
if any(isinstance(field, GenericForeignKey) and
field.ct_field == self.content_type_field_name and
field.fk_field == self.object_id_field_name
for field in fields):
return []
else:
return [
checks.Error(
("The GenericRelation defines a relation with the model "
"'%s.%s', but that model does not have a GenericForeignKey.") % (
target._meta.app_label, target._meta.object_name
),
hint=None,
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [(self.rel.to._meta.get_field(self.object_id_field_name), self.model._meta.pk)]
def get_path_info(self):
opts = self.rel.to._meta
target = opts.pk
return [PathInfo(self.model._meta, opts, (target,), self.rel, True, False)]
def get_reverse_path_info(self):
opts = self.model._meta
from_opts = self.rel.to._meta
return [PathInfo(from_opts, opts, (opts.pk,), self, not self.unique, False)]
def get_choices_default(self):
return super(GenericRelation, self).get_choices(include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_text([instance._get_pk_val() for instance in qs])
def contribute_to_class(self, cls, name, **kwargs):
kwargs['virtual_only'] = True
super(GenericRelation, self).contribute_to_class(cls, name, **kwargs)
self.model = cls
setattr(cls, self.name, ReverseGenericRelatedObjectsDescriptor(self.rel))
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
"""
Return the content type associated with this field's model.
"""
return ContentType.objects.get_for_model(self.model,
for_concrete_model=self.for_concrete_model)
def get_extra_restriction(self, where_class, alias, remote_alias):
field = self.rel.to._meta.get_field(self.content_type_field_name)
contenttype_pk = self.get_content_type().pk
cond = where_class()
lookup = field.get_lookup('exact')(field.get_col(remote_alias), contenttype_pk)
cond.add(lookup, 'AND')
return cond
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.rel.to._base_manager.db_manager(using).filter(**{
"%s__pk" % self.content_type_field_name: ContentType.objects.db_manager(using).get_for_model(
self.model, for_concrete_model=self.for_concrete_model).pk,
"%s__in" % self.object_id_field_name: [obj.pk for obj in objs]
})
class ReverseGenericRelatedObjectsDescriptor(ForeignRelatedObjectsDescriptor):
"""
Accessor to the related objects manager on the one-to-many relation created
by GenericRelation.
In the example::
class Post(Model):
comments = GenericRelation(Comment)
``post.comments`` is a ReverseGenericRelatedObjectsDescriptor instance.
"""
@cached_property
def related_manager_cls(self):
return create_generic_related_manager(
self.rel.to._default_manager.__class__,
self.rel,
)
def create_generic_related_manager(superclass, rel):
"""
Factory function to create a manager that subclasses another manager
(generally the default manager of a given model) and adds behaviors
specific to generic relations.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, instance=None):
super(GenericRelatedObjectManager, self).__init__()
self.instance = instance
self.model = rel.to
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(
instance, for_concrete_model=rel.field.for_concrete_model)
self.content_type = content_type
qn = connection.ops.quote_name
join_cols = rel.field.get_joining_columns(reverse_join=True)[0]
self.source_col_name = qn(join_cols[0])
self.target_col_name = qn(join_cols[1])
self.content_type_field_name = rel.field.content_type_field_name
self.object_id_field_name = rel.field.object_id_field_name
self.prefetch_cache_name = rel.field.attname
self.pk_val = instance._get_pk_val()
self.core_filters = {
'%s__pk' % self.content_type_field_name: content_type.id,
self.object_id_field_name: self.pk_val,
}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_generic_related_manager(manager.__class__, rel)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def __str__(self):
return repr(self)
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(GenericRelatedObjectManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {
'%s__pk' % self.content_type_field_name: self.content_type.id,
'%s__in' % self.object_id_field_name: set(obj._get_pk_val() for obj in instances)
}
# We (possibly) need to convert object IDs to the type of the
# instances' PK in order to match up instances:
object_id_converter = instances[0]._meta.pk.to_python
return (queryset.filter(**query),
lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)),
lambda obj: obj._get_pk_val(),
False,
self.prefetch_cache_name)
def add(self, *objs):
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
obj.save()
add.alters_data = True
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.delete()` creates its own atomic block which
# contains the `pre_delete` and `post_delete` signal handlers.
queryset.delete()
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
obj.delete()
_clear.alters_data = True
def set(self, objs, **kwargs):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs)
self.add(*new_objs)
set.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).update_or_create(**kwargs)
update_or_create.alters_data = True
return GenericRelatedObjectManager
| 38.873504 | 116 | 0.592146 | from __future__ import unicode_literals
from collections import defaultdict
from django.contrib.contenttypes.models import ContentType
from django.core import checks
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, connection, models, router, transaction
from django.db.models import DO_NOTHING, signals
from django.db.models.base import ModelBase
from django.db.models.fields.related import (
ForeignObject, ForeignObjectRel, ForeignRelatedObjectsDescriptor,
)
from django.db.models.query_utils import PathInfo
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.functional import cached_property
@python_2_unicode_compatible
class GenericForeignKey(object):
auto_created = False
concrete = False
editable = False
hidden = False
is_relation = True
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
related_model = None
allow_unsaved_instance_assignment = False
def __init__(self, ct_field='content_type', fk_field='object_id', for_concrete_model=True):
self.ct_field = ct_field
self.fk_field = fk_field
self.for_concrete_model = for_concrete_model
self.editable = False
self.rel = None
self.column = None
def contribute_to_class(self, cls, name, **kwargs):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_field(self, virtual=True)
if not cls._meta.abstract:
signals.pre_init.connect(self.instance_pre_init, sender=cls)
setattr(cls, name, self)
def __str__(self):
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_object_id_field())
errors.extend(self._check_content_type_field())
return errors
def _check_field_name(self):
if self.name.endswith("_"):
return [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=self,
id='fields.E001',
)
]
else:
return []
def _check_object_id_field(self):
try:
self.model._meta.get_field(self.fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey object ID references the non-existent field '%s'." % self.fk_field,
hint=None,
obj=self,
id='contenttypes.E001',
)
]
else:
return []
def _check_content_type_field(self):
try:
field = self.model._meta.get_field(self.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey content type references the non-existent field '%s.%s'." % (
self.model._meta.object_name, self.ct_field
),
hint=None,
obj=self,
id='contenttypes.E002',
)
]
else:
if not isinstance(field, models.ForeignKey):
return [
checks.Error(
"'%s.%s' is not a ForeignKey." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E003',
)
]
elif field.rel.to != ContentType:
return [
checks.Error(
"'%s.%s' is not a ForeignKey to 'contenttypes.ContentType'." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
if self.name in kwargs:
value = kwargs.pop(self.name)
if value is not None:
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
else:
kwargs[self.ct_field] = None
kwargs[self.fk_field] = None
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(
obj, for_concrete_model=self.for_concrete_model)
elif id is not None:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
fk_dict = defaultdict(set)
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(id=ct_id,
using=obj._state.db).model_class()
return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model)
return (ret_val,
lambda obj: (obj._get_pk_val(), obj.__class__),
gfk_key,
True,
self.cache_attr)
def is_cached(self, instance):
return hasattr(instance, self.cache_attr)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id is not None:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
if not self.allow_unsaved_instance_assignment and fk is None:
raise ValueError(
'Cannot assign "%r": "%s" instance isn\'t saved in the database.' %
(value, value._meta.object_name)
)
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRel(ForeignObjectRel):
def __init__(self, field, to, related_name=None, related_query_name=None, limit_choices_to=None):
super(GenericRel, self).__init__(
field, to,
related_name=related_query_name or '+',
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
on_delete=DO_NOTHING,
)
class GenericRelation(ForeignObject):
# Field flags
auto_created = False
many_to_many = False
many_to_one = False
one_to_many = True
one_to_one = False
rel_class = GenericRel
def __init__(self, to, object_id_field='object_id', content_type_field='content_type',
for_concrete_model=True, related_query_name=None, limit_choices_to=None, **kwargs):
kwargs['rel'] = self.rel_class(
self, to,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
)
kwargs['blank'] = True
kwargs['editable'] = False
kwargs['serialize'] = False
# This construct is somewhat of an abuse of ForeignObject. This field
# represents a relation from pk to object_id field. But, this relation
# isn't direct, the join is generated reverse along foreign key. So,
super(GenericRelation, self).__init__(
to, from_fields=[object_id_field], to_fields=[], **kwargs)
self.object_id_field_name = object_id_field
self.content_type_field_name = content_type_field
self.for_concrete_model = for_concrete_model
def check(self, **kwargs):
errors = super(GenericRelation, self).check(**kwargs)
errors.extend(self._check_generic_foreign_key_existence())
return errors
def _check_generic_foreign_key_existence(self):
target = self.rel.to
if isinstance(target, ModelBase):
fields = target._meta.virtual_fields
if any(isinstance(field, GenericForeignKey) and
field.ct_field == self.content_type_field_name and
field.fk_field == self.object_id_field_name
for field in fields):
return []
else:
return [
checks.Error(
("The GenericRelation defines a relation with the model "
"'%s.%s', but that model does not have a GenericForeignKey.") % (
target._meta.app_label, target._meta.object_name
),
hint=None,
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [(self.rel.to._meta.get_field(self.object_id_field_name), self.model._meta.pk)]
def get_path_info(self):
opts = self.rel.to._meta
target = opts.pk
return [PathInfo(self.model._meta, opts, (target,), self.rel, True, False)]
def get_reverse_path_info(self):
opts = self.model._meta
from_opts = self.rel.to._meta
return [PathInfo(from_opts, opts, (opts.pk,), self, not self.unique, False)]
def get_choices_default(self):
return super(GenericRelation, self).get_choices(include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_text([instance._get_pk_val() for instance in qs])
def contribute_to_class(self, cls, name, **kwargs):
kwargs['virtual_only'] = True
super(GenericRelation, self).contribute_to_class(cls, name, **kwargs)
self.model = cls
setattr(cls, self.name, ReverseGenericRelatedObjectsDescriptor(self.rel))
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
return ContentType.objects.get_for_model(self.model,
for_concrete_model=self.for_concrete_model)
def get_extra_restriction(self, where_class, alias, remote_alias):
field = self.rel.to._meta.get_field(self.content_type_field_name)
contenttype_pk = self.get_content_type().pk
cond = where_class()
lookup = field.get_lookup('exact')(field.get_col(remote_alias), contenttype_pk)
cond.add(lookup, 'AND')
return cond
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
return self.rel.to._base_manager.db_manager(using).filter(**{
"%s__pk" % self.content_type_field_name: ContentType.objects.db_manager(using).get_for_model(
self.model, for_concrete_model=self.for_concrete_model).pk,
"%s__in" % self.object_id_field_name: [obj.pk for obj in objs]
})
class ReverseGenericRelatedObjectsDescriptor(ForeignRelatedObjectsDescriptor):
@cached_property
def related_manager_cls(self):
return create_generic_related_manager(
self.rel.to._default_manager.__class__,
self.rel,
)
def create_generic_related_manager(superclass, rel):
class GenericRelatedObjectManager(superclass):
def __init__(self, instance=None):
super(GenericRelatedObjectManager, self).__init__()
self.instance = instance
self.model = rel.to
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(
instance, for_concrete_model=rel.field.for_concrete_model)
self.content_type = content_type
qn = connection.ops.quote_name
join_cols = rel.field.get_joining_columns(reverse_join=True)[0]
self.source_col_name = qn(join_cols[0])
self.target_col_name = qn(join_cols[1])
self.content_type_field_name = rel.field.content_type_field_name
self.object_id_field_name = rel.field.object_id_field_name
self.prefetch_cache_name = rel.field.attname
self.pk_val = instance._get_pk_val()
self.core_filters = {
'%s__pk' % self.content_type_field_name: content_type.id,
self.object_id_field_name: self.pk_val,
}
def __call__(self, **kwargs):
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_generic_related_manager(manager.__class__, rel)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def __str__(self):
return repr(self)
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(GenericRelatedObjectManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {
'%s__pk' % self.content_type_field_name: self.content_type.id,
'%s__in' % self.object_id_field_name: set(obj._get_pk_val() for obj in instances)
}
object_id_converter = instances[0]._meta.pk.to_python
return (queryset.filter(**query),
lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)),
lambda obj: obj._get_pk_val(),
False,
self.prefetch_cache_name)
def add(self, *objs):
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
obj.save()
add.alters_data = True
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.delete()` creates its own atomic block which
# contains the `pre_delete` and `post_delete` signal handlers.
queryset.delete()
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
obj.delete()
_clear.alters_data = True
def set(self, objs, **kwargs):
# Force evaluation of `objs` in case it's a queryset whose value
objs = tuple(objs)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs)
self.add(*new_objs)
set.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).update_or_create(**kwargs)
update_or_create.alters_data = True
return GenericRelatedObjectManager
| true | true |
f72cd646013ea6a170a5f979dcbd68b1bc930df2 | 8,783 | py | Python | nintendo/nnas.py | elsorino/DiscordAndU | 70e57bbd55daa2b0e24cff0831867daff05822f8 | [
"MIT"
] | 1 | 2021-04-26T19:23:56.000Z | 2021-04-26T19:23:56.000Z | nintendo/nnas.py | elsorino/DiscordAndU | 70e57bbd55daa2b0e24cff0831867daff05822f8 | [
"MIT"
] | null | null | null | nintendo/nnas.py | elsorino/DiscordAndU | 70e57bbd55daa2b0e24cff0831867daff05822f8 | [
"MIT"
] | null | null | null |
from nintendo.common.http import HTTPClient, HTTPRequest
from nintendo.common import xml, ssl, util
import pkg_resources
import collections
import hashlib
import struct
import base64
import urllib.parse
import logging
logger = logging.getLogger(__name__)
CERT = pkg_resources.resource_filename("nintendo", "files/cert/wiiu_common.crt")
KEY = pkg_resources.resource_filename("nintendo", "files/cert/wiiu_common.key")
def calc_password_hash(pid, password):
data = struct.pack("<I", pid) + b"\x02\x65\x43\x46" + password.encode("ascii")
return hashlib.sha256(data).hexdigest()
# Types
NexToken = collections.namedtuple("NexToken", "host port username password token")
Email = collections.namedtuple("Email", "address id parent primary reachable type validated validation_date")
Mii = collections.namedtuple("Mii", "data id images name pid primary nnid")
ProfileMii = collections.namedtuple("Mii", "data id hash images name primary")
Account = collections.namedtuple("Account", "attributes domain type username")
Profile = collections.namedtuple(
"Profile",
"accounts active birthday country creation_date device_attributes gender language "
"updated marketing off_device pid email mii region timezone nnid utc_offset"
)
# Parsers
NexToken.parse = lambda obj: NexToken(
obj["host"].value,
int(obj["port"].value),
obj["pid"].value,
obj["nex_password"].value,
obj["token"].value
)
Email.parse = lambda obj: Email(
obj["address"].value,
int(obj["id"].value),
obj["parent"].value == "Y",
obj["primary"].value == "Y",
obj["reachable"].value == "Y",
obj["type"].value,
obj["validated"].value == "Y",
obj["validated_date"].value
)
Mii.parse = lambda obj: Mii(
base64.b64decode(obj["data"].value),
int(obj["id"].value),
{image["type"].value: image["url"].value for image in obj["images"]},
obj["name"].value,
int(obj["pid"].value),
obj["primary"].value == "Y",
obj["user_id"].value
)
ProfileMii.parse = lambda obj: ProfileMii(
base64.b64decode(obj["data"].value),
int(obj["id"].value),
obj["mii_hash"].value,
{image["type"].value: image["url"].value for image in obj["mii_images"]},
obj["name"].value,
obj["primary"].value == "Y",
)
Account.parse = lambda obj: Account(
obj["attributes"].value,
obj["domain"].value,
obj["type"].value,
obj["username"].value
)
Profile.parse = lambda obj: Profile(
[Account.parse(account) for account in obj["accounts"]],
obj["active_flag"].value == "Y",
obj["birth_date"].value,
obj["country"].value,
obj["create_date"].value,
{attrib["name"].value: attrib["value"].value for attrib in obj["device_attributes"]},
obj["gender"].value,
obj["language"].value,
obj["updated"].value,
obj["marketing_flag"].value == "Y",
obj["off_device_flag"].value == "Y",
int(obj["pid"].value),
Email.parse(obj["email"]),
ProfileMii.parse(obj["mii"]),
int(obj["region"].value),
obj["tz_name"].value,
obj["user_id"].value,
int(obj["utc_offset"].value)
)
class NNASError(Exception):
def __init__(self, *, status_code, text):
self.status_code = status_code
self.text = text
def __str__(self):
return "Account request failed with status %i" %self.status_code
class NNASClient:
def __init__(self):
self.client = HTTPClient()
cert = ssl.SSLCertificate.load(CERT, ssl.TYPE_PEM)
key = ssl.SSLPrivateKey.load(KEY, ssl.TYPE_PEM)
self.cert = cert, key
self.url = "account.nintendo.net"
self.client_id = "a2efa818a34fa16b8afbc8a74eba3eda"
self.client_secret = "c91cdb5658bd4954ade78533a339cf9a"
self.platform_id = 1
self.device_type = 2
self.device_id = None
self.serial_number = None
self.system_version = 0x250
self.device_cert = None
self.region = 4
self.country = "NL"
self.language = "en"
self.fpd_version = 0
self.environment = "L1"
self.title_id = None
self.title_version = None
self.auth_token = None
def set_certificate(self, cert, key): self.cert = cert, key
def set_url(self, url): self.url = url
def set_client_id(self, client_id): self.client_id = client_id
def set_client_secret(self, client_secret): self.client_secret = client_secret
def set_platform_id(self, platform_id): self.platform_id = platform_id
def set_device_type(self, device_type): self.device_type = device_type
def set_device(self, device_id, serial_number, system_version, cert=None):
self.device_id = device_id
self.serial_number = serial_number
self.system_version = system_version
self.device_cert = cert
def set_locale(self, region, country, language):
self.region = region
self.country = country
self.language = language
def set_fpd_version(self, version): self.fpd_version = version
def set_environment(self, environment): self.environment = environment
def set_title(self, title_id, title_version):
self.title_id = title_id
self.title_version = title_version
def prepare(self, req, auth=None, cert=None):
req.certificate = self.cert
req.headers["Host"] = self.url
req.headers["X-Nintendo-Platform-ID"] = self.platform_id
req.headers["X-Nintendo-Device-Type"] = self.device_type
if self.device_id is not None:
req.headers["X-Nintendo-Device-ID"] = self.device_id
if self.serial_number is not None:
req.headers["X-Nintendo-Serial-Number"] = self.serial_number
req.headers["X-Nintendo-System-Version"] = "%04X" %self.system_version
req.headers["X-Nintendo-Region"] = self.region
req.headers["X-Nintendo-Country"] = self.country
req.headers["Accept-Language"] = self.language
if auth is None:
req.headers["X-Nintendo-Client-ID"] = self.client_id
req.headers["X-Nintendo-Client-Secret"] = self.client_secret
req.headers["Accept"] = "*/*"
req.headers["X-Nintendo-FPD-Version"] = "%04X" %self.fpd_version
req.headers["X-Nintendo-Environment"] = self.environment
if self.title_id is not None:
req.headers["X-Nintendo-Title-ID"] = "%016X" %self.title_id
req.headers["X-Nintendo-Unique-ID"] = "%05X" %((self.title_id >> 8) & 0xFFFFF)
if self.title_version is not None:
req.headers["X-Nintendo-Application-Version"] = "%04X" %self.title_version
if cert is not None:
req.headers["X-Nintendo-Device-Cert"] = cert
if auth is not None:
req.headers["Authorization"] = auth
def request(self, req):
response = self.client.request(req, True)
if response.error():
logger.error("Account request returned status code %i\n%s", response.status, response.text)
raise NNASError(status_code=response.status, text=response.text)
return response.xml
def login(self, username, password, password_type=None):
req = HTTPRequest.post("/v1/api/oauth20/access_token/generate")
self.prepare(req, cert=self.device_cert)
req.form["grant_type"] = "password"
req.form["user_id"] = urllib.parse.quote(username)
req.form["password"] = urllib.parse.quote(password)
if password_type is not None:
req.form["password_type"] = password_type
response = self.request(req)
self.auth_token = "Bearer " + response["access_token"]["token"].value
def get_emails(self):
req = HTTPRequest.get("/v1/api/people/@me/emails")
self.prepare(req, self.auth_token)
return [Email.parse(email) for email in self.request(req)]
def get_profile(self):
req = HTTPRequest.get("/v1/api/people/@me/profile")
self.prepare(req, self.auth_token)
return Profile.parse(self.request(req))
def get_nex_token(self, game_server_id):
req = HTTPRequest.get("/v1/api/provider/nex_token/@me")
req.params["game_server_id"] = "%08X" %game_server_id
self.prepare(req, self.auth_token)
return NexToken.parse(self.request(req))
#The following functions can be used without logging in
def get_miis(self, pids):
req = HTTPRequest.get("/v1/api/miis")
req.params["pids"] = urllib.parse.quote(",".join([str(pid) for pid in pids]))
self.prepare(req)
response = self.request(req)
return [Mii.parse(mii) for mii in response]
def get_pids(self, nnids):
req = HTTPRequest.get("/v1/api/admin/mapped_ids")
req.params["input_type"] = "user_id"
req.params["output_type"] = "pid"
req.params["input"] = urllib.parse.quote(",".join(nnids))
self.prepare(req)
response = self.request(req)
return {id["in_id"].value: int(id["out_id"].value) for id in response}
def get_nnids(self, pids):
req = HTTPRequest.get("/v1/api/admin/mapped_ids")
req.params["input_type"] = "pid"
req.params["output_type"] = "user_id"
req.params["input"] = urllib.parse.quote(",".join([str(pid) for pid in pids]))
self.prepare(req)
response = self.request(req)
return {int(id["in_id"].value): id["out_id"].value for id in response}
def get_mii(self, pid): return self.get_miis([pid])[0]
def get_pid(self, nnid): return self.get_pids([nnid])[nnid]
def get_nnid(self, pid): return self.get_nnids([pid])[pid]
| 31.822464 | 109 | 0.710919 |
from nintendo.common.http import HTTPClient, HTTPRequest
from nintendo.common import xml, ssl, util
import pkg_resources
import collections
import hashlib
import struct
import base64
import urllib.parse
import logging
logger = logging.getLogger(__name__)
CERT = pkg_resources.resource_filename("nintendo", "files/cert/wiiu_common.crt")
KEY = pkg_resources.resource_filename("nintendo", "files/cert/wiiu_common.key")
def calc_password_hash(pid, password):
data = struct.pack("<I", pid) + b"\x02\x65\x43\x46" + password.encode("ascii")
return hashlib.sha256(data).hexdigest()
NexToken = collections.namedtuple("NexToken", "host port username password token")
Email = collections.namedtuple("Email", "address id parent primary reachable type validated validation_date")
Mii = collections.namedtuple("Mii", "data id images name pid primary nnid")
ProfileMii = collections.namedtuple("Mii", "data id hash images name primary")
Account = collections.namedtuple("Account", "attributes domain type username")
Profile = collections.namedtuple(
"Profile",
"accounts active birthday country creation_date device_attributes gender language "
"updated marketing off_device pid email mii region timezone nnid utc_offset"
)
NexToken.parse = lambda obj: NexToken(
obj["host"].value,
int(obj["port"].value),
obj["pid"].value,
obj["nex_password"].value,
obj["token"].value
)
Email.parse = lambda obj: Email(
obj["address"].value,
int(obj["id"].value),
obj["parent"].value == "Y",
obj["primary"].value == "Y",
obj["reachable"].value == "Y",
obj["type"].value,
obj["validated"].value == "Y",
obj["validated_date"].value
)
Mii.parse = lambda obj: Mii(
base64.b64decode(obj["data"].value),
int(obj["id"].value),
{image["type"].value: image["url"].value for image in obj["images"]},
obj["name"].value,
int(obj["pid"].value),
obj["primary"].value == "Y",
obj["user_id"].value
)
ProfileMii.parse = lambda obj: ProfileMii(
base64.b64decode(obj["data"].value),
int(obj["id"].value),
obj["mii_hash"].value,
{image["type"].value: image["url"].value for image in obj["mii_images"]},
obj["name"].value,
obj["primary"].value == "Y",
)
Account.parse = lambda obj: Account(
obj["attributes"].value,
obj["domain"].value,
obj["type"].value,
obj["username"].value
)
Profile.parse = lambda obj: Profile(
[Account.parse(account) for account in obj["accounts"]],
obj["active_flag"].value == "Y",
obj["birth_date"].value,
obj["country"].value,
obj["create_date"].value,
{attrib["name"].value: attrib["value"].value for attrib in obj["device_attributes"]},
obj["gender"].value,
obj["language"].value,
obj["updated"].value,
obj["marketing_flag"].value == "Y",
obj["off_device_flag"].value == "Y",
int(obj["pid"].value),
Email.parse(obj["email"]),
ProfileMii.parse(obj["mii"]),
int(obj["region"].value),
obj["tz_name"].value,
obj["user_id"].value,
int(obj["utc_offset"].value)
)
class NNASError(Exception):
def __init__(self, *, status_code, text):
self.status_code = status_code
self.text = text
def __str__(self):
return "Account request failed with status %i" %self.status_code
class NNASClient:
def __init__(self):
self.client = HTTPClient()
cert = ssl.SSLCertificate.load(CERT, ssl.TYPE_PEM)
key = ssl.SSLPrivateKey.load(KEY, ssl.TYPE_PEM)
self.cert = cert, key
self.url = "account.nintendo.net"
self.client_id = "a2efa818a34fa16b8afbc8a74eba3eda"
self.client_secret = "c91cdb5658bd4954ade78533a339cf9a"
self.platform_id = 1
self.device_type = 2
self.device_id = None
self.serial_number = None
self.system_version = 0x250
self.device_cert = None
self.region = 4
self.country = "NL"
self.language = "en"
self.fpd_version = 0
self.environment = "L1"
self.title_id = None
self.title_version = None
self.auth_token = None
def set_certificate(self, cert, key): self.cert = cert, key
def set_url(self, url): self.url = url
def set_client_id(self, client_id): self.client_id = client_id
def set_client_secret(self, client_secret): self.client_secret = client_secret
def set_platform_id(self, platform_id): self.platform_id = platform_id
def set_device_type(self, device_type): self.device_type = device_type
def set_device(self, device_id, serial_number, system_version, cert=None):
self.device_id = device_id
self.serial_number = serial_number
self.system_version = system_version
self.device_cert = cert
def set_locale(self, region, country, language):
self.region = region
self.country = country
self.language = language
def set_fpd_version(self, version): self.fpd_version = version
def set_environment(self, environment): self.environment = environment
def set_title(self, title_id, title_version):
self.title_id = title_id
self.title_version = title_version
def prepare(self, req, auth=None, cert=None):
req.certificate = self.cert
req.headers["Host"] = self.url
req.headers["X-Nintendo-Platform-ID"] = self.platform_id
req.headers["X-Nintendo-Device-Type"] = self.device_type
if self.device_id is not None:
req.headers["X-Nintendo-Device-ID"] = self.device_id
if self.serial_number is not None:
req.headers["X-Nintendo-Serial-Number"] = self.serial_number
req.headers["X-Nintendo-System-Version"] = "%04X" %self.system_version
req.headers["X-Nintendo-Region"] = self.region
req.headers["X-Nintendo-Country"] = self.country
req.headers["Accept-Language"] = self.language
if auth is None:
req.headers["X-Nintendo-Client-ID"] = self.client_id
req.headers["X-Nintendo-Client-Secret"] = self.client_secret
req.headers["Accept"] = "*/*"
req.headers["X-Nintendo-FPD-Version"] = "%04X" %self.fpd_version
req.headers["X-Nintendo-Environment"] = self.environment
if self.title_id is not None:
req.headers["X-Nintendo-Title-ID"] = "%016X" %self.title_id
req.headers["X-Nintendo-Unique-ID"] = "%05X" %((self.title_id >> 8) & 0xFFFFF)
if self.title_version is not None:
req.headers["X-Nintendo-Application-Version"] = "%04X" %self.title_version
if cert is not None:
req.headers["X-Nintendo-Device-Cert"] = cert
if auth is not None:
req.headers["Authorization"] = auth
def request(self, req):
response = self.client.request(req, True)
if response.error():
logger.error("Account request returned status code %i\n%s", response.status, response.text)
raise NNASError(status_code=response.status, text=response.text)
return response.xml
def login(self, username, password, password_type=None):
req = HTTPRequest.post("/v1/api/oauth20/access_token/generate")
self.prepare(req, cert=self.device_cert)
req.form["grant_type"] = "password"
req.form["user_id"] = urllib.parse.quote(username)
req.form["password"] = urllib.parse.quote(password)
if password_type is not None:
req.form["password_type"] = password_type
response = self.request(req)
self.auth_token = "Bearer " + response["access_token"]["token"].value
def get_emails(self):
req = HTTPRequest.get("/v1/api/people/@me/emails")
self.prepare(req, self.auth_token)
return [Email.parse(email) for email in self.request(req)]
def get_profile(self):
req = HTTPRequest.get("/v1/api/people/@me/profile")
self.prepare(req, self.auth_token)
return Profile.parse(self.request(req))
def get_nex_token(self, game_server_id):
req = HTTPRequest.get("/v1/api/provider/nex_token/@me")
req.params["game_server_id"] = "%08X" %game_server_id
self.prepare(req, self.auth_token)
return NexToken.parse(self.request(req))
def get_miis(self, pids):
req = HTTPRequest.get("/v1/api/miis")
req.params["pids"] = urllib.parse.quote(",".join([str(pid) for pid in pids]))
self.prepare(req)
response = self.request(req)
return [Mii.parse(mii) for mii in response]
def get_pids(self, nnids):
req = HTTPRequest.get("/v1/api/admin/mapped_ids")
req.params["input_type"] = "user_id"
req.params["output_type"] = "pid"
req.params["input"] = urllib.parse.quote(",".join(nnids))
self.prepare(req)
response = self.request(req)
return {id["in_id"].value: int(id["out_id"].value) for id in response}
def get_nnids(self, pids):
req = HTTPRequest.get("/v1/api/admin/mapped_ids")
req.params["input_type"] = "pid"
req.params["output_type"] = "user_id"
req.params["input"] = urllib.parse.quote(",".join([str(pid) for pid in pids]))
self.prepare(req)
response = self.request(req)
return {int(id["in_id"].value): id["out_id"].value for id in response}
def get_mii(self, pid): return self.get_miis([pid])[0]
def get_pid(self, nnid): return self.get_pids([nnid])[nnid]
def get_nnid(self, pid): return self.get_nnids([pid])[pid]
| true | true |
f72cd64df372a6dea962a0662deef14976f5aa45 | 507 | py | Python | accounts/urls.py | JhoLee/django-lecture_manager | d74ab1d48c954583ffd509346d7cb30b9214f1dc | [
"MIT"
] | null | null | null | accounts/urls.py | JhoLee/django-lecture_manager | d74ab1d48c954583ffd509346d7cb30b9214f1dc | [
"MIT"
] | 7 | 2020-06-05T20:02:50.000Z | 2021-09-22T18:05:02.000Z | accounts/urls.py | JhoLee/django-lecture_manager | d74ab1d48c954583ffd509346d7cb30b9214f1dc | [
"MIT"
] | null | null | null | from django.contrib.auth import views as auth_views
from django.urls import path
from . import views
app_name = 'accounts'
urlpatterns = [
path('signup/', views.signup, name='signup'),
path('login/', views.signin, name='login'),
path('logout/', views.signout, name='logout'),
path('profile/', views.view_profile, name='view_profile'),
path('profile/update/', views.update_profile, name='update_profile'),
path('change-password/', views.change_password, name='password_change'),
]
| 31.6875 | 76 | 0.704142 | from django.contrib.auth import views as auth_views
from django.urls import path
from . import views
app_name = 'accounts'
urlpatterns = [
path('signup/', views.signup, name='signup'),
path('login/', views.signin, name='login'),
path('logout/', views.signout, name='logout'),
path('profile/', views.view_profile, name='view_profile'),
path('profile/update/', views.update_profile, name='update_profile'),
path('change-password/', views.change_password, name='password_change'),
]
| true | true |
f72cd66faafd23457f38cf7a6f13260a3caed7b6 | 2,894 | py | Python | deps/turtlebot/follow_line_tc_pkg/scripts/follow_line_step_basicimage.py | CARMinesDouai/2021-robot_guide | 226e3279a710c34d6f7b31e4cba4f047ae8aabee | [
"MIT"
] | null | null | null | deps/turtlebot/follow_line_tc_pkg/scripts/follow_line_step_basicimage.py | CARMinesDouai/2021-robot_guide | 226e3279a710c34d6f7b31e4cba4f047ae8aabee | [
"MIT"
] | null | null | null | deps/turtlebot/follow_line_tc_pkg/scripts/follow_line_step_basicimage.py | CARMinesDouai/2021-robot_guide | 226e3279a710c34d6f7b31e4cba4f047ae8aabee | [
"MIT"
] | 1 | 2020-12-15T10:17:24.000Z | 2020-12-15T10:17:24.000Z | #!/usr/bin/env python
import rospy
import cv2
from cv_bridge import CvBridge, CvBridgeError
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Image
class LineFollower(object):
def __init__(self):
self.bridge_object = CvBridge()
self.cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
self.cmd_vel_subs = rospy.Subscriber('/cmd_vel', Twist, self.cmdvel_callback)
self.last_cmdvel_command = Twist()
self._cmdvel_pub_rate = rospy.Rate(10)
self.image_sub = rospy.Subscriber("/camera/rgb/image_raw",Image,self.camera_callback)
def cmdvel_callback(self,msg):
self.last_cmdvel_command = msg
def compare_twist_commands(self,twist1,twist2):
LX = twist1.linear.x == twist2.linear.x
LY = twist1.linear.y == twist2.linear.y
LZ = twist1.linear.z == twist2.linear.z
AX = twist1.angular.x == twist2.angular.x
AY = twist1.angular.y == twist2.angular.y
AZ = twist1.angular.z == twist2.angular.z
equal = LX and LY and LZ and AX and AY and AZ
if not equal:
rospy.logwarn("The Current Twist is not the same as the one sent, Resending")
return equal
def camera_callback(self,data):
try:
# We select bgr8 because its the OpneCV encoding by default
cv_image = self.bridge_object.imgmsg_to_cv2(data, desired_encoding="bgr8")
except CvBridgeError as e:
print(e)
cv2.imshow("Image window", cv_image)
cv2.waitKey(1)
def move_robot(self, twist_object):
# We make this to avoid Topic loss, specially at the start
current_equal_to_new = False
while (not (current_equal_to_new) ):
self.cmd_vel_pub.publish(twist_object)
self._cmdvel_pub_rate.sleep()
current_equal_to_new = self.compare_twist_commands(twist1=self.last_cmdvel_command,
twist2=twist_object)
def clean_class(self):
# Stop Robot
twist_object = Twist()
twist_object.angular.z = 0.0
self.move_robot(twist_object)
def main():
rospy.init_node('line_following_node', anonymous=True)
line_follower_object = LineFollower()
twist_object = Twist()
# Make it start turning
twist_object.angular.z = 0.5
rate = rospy.Rate(5)
ctrl_c = False
def shutdownhook():
# works better than the rospy.is_shut_down()
line_follower_object.clean_class()
cv2.destroyAllWindows()
rospy.loginfo("shutdown time!")
ctrl_c = True
rospy.on_shutdown(shutdownhook)
while not ctrl_c:
line_follower_object.move_robot(twist_object)
rate.sleep()
if __name__ == '__main__':
main() | 31.456522 | 95 | 0.62716 |
import rospy
import cv2
from cv_bridge import CvBridge, CvBridgeError
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Image
class LineFollower(object):
def __init__(self):
self.bridge_object = CvBridge()
self.cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
self.cmd_vel_subs = rospy.Subscriber('/cmd_vel', Twist, self.cmdvel_callback)
self.last_cmdvel_command = Twist()
self._cmdvel_pub_rate = rospy.Rate(10)
self.image_sub = rospy.Subscriber("/camera/rgb/image_raw",Image,self.camera_callback)
def cmdvel_callback(self,msg):
self.last_cmdvel_command = msg
def compare_twist_commands(self,twist1,twist2):
LX = twist1.linear.x == twist2.linear.x
LY = twist1.linear.y == twist2.linear.y
LZ = twist1.linear.z == twist2.linear.z
AX = twist1.angular.x == twist2.angular.x
AY = twist1.angular.y == twist2.angular.y
AZ = twist1.angular.z == twist2.angular.z
equal = LX and LY and LZ and AX and AY and AZ
if not equal:
rospy.logwarn("The Current Twist is not the same as the one sent, Resending")
return equal
def camera_callback(self,data):
try:
cv_image = self.bridge_object.imgmsg_to_cv2(data, desired_encoding="bgr8")
except CvBridgeError as e:
print(e)
cv2.imshow("Image window", cv_image)
cv2.waitKey(1)
def move_robot(self, twist_object):
current_equal_to_new = False
while (not (current_equal_to_new) ):
self.cmd_vel_pub.publish(twist_object)
self._cmdvel_pub_rate.sleep()
current_equal_to_new = self.compare_twist_commands(twist1=self.last_cmdvel_command,
twist2=twist_object)
def clean_class(self):
twist_object = Twist()
twist_object.angular.z = 0.0
self.move_robot(twist_object)
def main():
rospy.init_node('line_following_node', anonymous=True)
line_follower_object = LineFollower()
twist_object = Twist()
twist_object.angular.z = 0.5
rate = rospy.Rate(5)
ctrl_c = False
def shutdownhook():
line_follower_object.clean_class()
cv2.destroyAllWindows()
rospy.loginfo("shutdown time!")
ctrl_c = True
rospy.on_shutdown(shutdownhook)
while not ctrl_c:
line_follower_object.move_robot(twist_object)
rate.sleep()
if __name__ == '__main__':
main() | true | true |
f72cd747e6808eabbbc492f7c29daeb23eb42ad6 | 1,816 | py | Python | core/urls.py | saksham1991999/upscbasicfunda | b17e288081cb4ca9dd79d198cd0b22136c0794bb | [
"MIT"
] | null | null | null | core/urls.py | saksham1991999/upscbasicfunda | b17e288081cb4ca9dd79d198cd0b22136c0794bb | [
"MIT"
] | 7 | 2021-04-08T21:17:18.000Z | 2022-01-13T03:39:23.000Z | core/urls.py | saksham1991999/upscbasicfunda | b17e288081cb4ca9dd79d198cd0b22136c0794bb | [
"MIT"
] | null | null | null | from rest_framework.routers import DefaultRouter
from . import views
from django.urls import path, re_path
app_name = 'core'
urlpatterns = [
path("search/", views.SearchSubscriptionsView.as_view()),
path("general-notification/",views.Notification.as_view()),
path("personal-notification/",views.PersonalNotification.as_view()),
path("promo-code/",views.PromocodeAPI.as_view()),
path("promo-code-view/",views.PromoCodeViewAPI.as_view()),
path("testing-mail/",views.DemoAPI.as_view()),
]
router = DefaultRouter()
router.register('users', views.UserViewSet, basename='users')
router.register('team-members', views.TeamMemberViewSet, basename='team-members')
router.register('team-form', views.TeamFormViewSet, basename='team-form')
router.register('contact-us', views.ContactUsViewSet, basename='contactus')
router.register('feedbacks', views.FeedbackViewSet, basename='feedback')
router.register('faqs', views.FAQViewSet, basename='faqs')
router.register('articles', views.ArticleViewSet, basename='articles')
router.register('news', views.NewsViewSet, basename='news')
router.register('newsletter', views.NewsletterViewSet, basename='newsletter')
#router.register('General Notifications', views.Notification, basename='general-notification')
router.register('categories', views.CategoryViewSet, basename='category')
router.register('sub-categories', views.SubCategoryViewSet, basename='sub-category')
router.register('pdfs', views.PDFSerializer, basename='pdf')
router.register('mcqs', views.MCQSerializer, basename='mcq')
router.register('summaries', views.SummarySerializer, basename='summary')
router.register('sessions', views.SessionSerializer, basename='session')
router.register('user-subscriptions', views.UserSubscriptionsSerializer, basename='user-subscription')
urlpatterns += router.urls
| 42.232558 | 102 | 0.786344 | from rest_framework.routers import DefaultRouter
from . import views
from django.urls import path, re_path
app_name = 'core'
urlpatterns = [
path("search/", views.SearchSubscriptionsView.as_view()),
path("general-notification/",views.Notification.as_view()),
path("personal-notification/",views.PersonalNotification.as_view()),
path("promo-code/",views.PromocodeAPI.as_view()),
path("promo-code-view/",views.PromoCodeViewAPI.as_view()),
path("testing-mail/",views.DemoAPI.as_view()),
]
router = DefaultRouter()
router.register('users', views.UserViewSet, basename='users')
router.register('team-members', views.TeamMemberViewSet, basename='team-members')
router.register('team-form', views.TeamFormViewSet, basename='team-form')
router.register('contact-us', views.ContactUsViewSet, basename='contactus')
router.register('feedbacks', views.FeedbackViewSet, basename='feedback')
router.register('faqs', views.FAQViewSet, basename='faqs')
router.register('articles', views.ArticleViewSet, basename='articles')
router.register('news', views.NewsViewSet, basename='news')
router.register('newsletter', views.NewsletterViewSet, basename='newsletter')
router.register('categories', views.CategoryViewSet, basename='category')
router.register('sub-categories', views.SubCategoryViewSet, basename='sub-category')
router.register('pdfs', views.PDFSerializer, basename='pdf')
router.register('mcqs', views.MCQSerializer, basename='mcq')
router.register('summaries', views.SummarySerializer, basename='summary')
router.register('sessions', views.SessionSerializer, basename='session')
router.register('user-subscriptions', views.UserSubscriptionsSerializer, basename='user-subscription')
urlpatterns += router.urls
| true | true |
f72cd77e800a0e0719a55ec6af8ef51dcd7cbf46 | 3,591 | py | Python | src/classes/item.py | kevin3/cwl-ica | cf706ea42993d563f364c0847ee4b882f8fe067c | [
"MIT"
] | 8 | 2021-12-08T05:33:58.000Z | 2022-03-07T00:40:48.000Z | src/classes/item.py | kevin3/cwl-ica | cf706ea42993d563f364c0847ee4b882f8fe067c | [
"MIT"
] | 34 | 2021-08-11T03:59:33.000Z | 2022-03-10T05:39:26.000Z | src/classes/item.py | kevin3/cwl-ica | cf706ea42993d563f364c0847ee4b882f8fe067c | [
"MIT"
] | 1 | 2022-01-08T07:34:55.000Z | 2022-01-08T07:34:55.000Z | #!/usr/bin/env python3
"""
Item is a terrible name. I whole-heartedly acknowledge this and apologise for any future maintainer.
In this context item represents an element under 'workflow' in workflow.yaml, 'tool' in tool.yaml and so on.
An item itself does not contain much information.
Just a name, a path (this will just be the name relative to the tool root anyway) and the categories associated
with any of the subsequent versions of this tool / workflow.
Categories are only relevant for tools and workflows.
"""
from utils.logging import get_logger
from ruamel.yaml.comments import CommentedMap as OrderedDict
from pathlib import Path
from classes.item_version import ItemVersion
from utils.errors import ItemCreationError, ItemDirectoryNotFoundError
logger = get_logger()
class Item:
"""
Only subclasses are actually used. These comprise ItemTool, ItemWorkflow, ItemExpression...
Item represents an element under workflow.yaml or tool.yaml etc.
"""
def __init__(self, name, path, root_dir=None, versions=None, categories=None):
# Initialise name
self.name = name
self.path = path
self.root_dir = root_dir
# Get versions (these will be of a type that is a subclass of ItemVersion)
if versions is None:
self.versions = []
# Confirm if versions is a list
elif len(versions) == 0:
self.versions = []
elif isinstance(versions[0], ItemVersion):
self.versions = versions
elif isinstance(versions[0], dict):
self.versions = self.get_versions(versions)
else:
# Set default
self.versions = []
# Get categories
self.categories = categories if categories is not None else []
def check_dir(self):
"""
Check that the directory exists for this 'item' class
:param root_dir:
:return:
"""
if not self.root_dir / Path(self.name) is not None:
logger.error(f"Could not get directory \"{self.root_dir}/{self.name}\"")
raise ItemDirectoryNotFoundError
def to_dict(self):
"""
Write an item to a dictionary - redefined in expression and schema class where categories are not defined
:return:
"""
return OrderedDict({
"name": self.name,
"path": str(self.path),
"versions": [
version.to_dict() if isinstance(version, ItemVersion) else version # Still just a dict
for version in self.versions
],
"categories": self.categories
})
def get_versions(self, versions):
"""
Implemented in subclass
:return:
"""
raise NotImplementedError
@classmethod
def from_dict(cls, item_dict):
"""
Returns an item object from a dictionary
:param item_dict:
:return:
"""
# Check the item_dict has the appropriate keys
if item_dict.get("name", None) is None:
logger.error("\"name\" attribute not found, cannot create item")
raise ItemCreationError
if item_dict.get("path", None) is None:
logger.error("\"path\" attribute not found, cannot create item")
raise ItemCreationError
# Return the class object
return cls(name=item_dict.get("name"),
path=item_dict.get("path"),
versions=item_dict.get("versions", None),
categories=item_dict.get("categories", None))
| 32.645455 | 113 | 0.624617 |
from utils.logging import get_logger
from ruamel.yaml.comments import CommentedMap as OrderedDict
from pathlib import Path
from classes.item_version import ItemVersion
from utils.errors import ItemCreationError, ItemDirectoryNotFoundError
logger = get_logger()
class Item:
def __init__(self, name, path, root_dir=None, versions=None, categories=None):
self.name = name
self.path = path
self.root_dir = root_dir
if versions is None:
self.versions = []
elif len(versions) == 0:
self.versions = []
elif isinstance(versions[0], ItemVersion):
self.versions = versions
elif isinstance(versions[0], dict):
self.versions = self.get_versions(versions)
else:
self.versions = []
self.categories = categories if categories is not None else []
def check_dir(self):
if not self.root_dir / Path(self.name) is not None:
logger.error(f"Could not get directory \"{self.root_dir}/{self.name}\"")
raise ItemDirectoryNotFoundError
def to_dict(self):
return OrderedDict({
"name": self.name,
"path": str(self.path),
"versions": [
version.to_dict() if isinstance(version, ItemVersion) else version
for version in self.versions
],
"categories": self.categories
})
def get_versions(self, versions):
raise NotImplementedError
@classmethod
def from_dict(cls, item_dict):
if item_dict.get("name", None) is None:
logger.error("\"name\" attribute not found, cannot create item")
raise ItemCreationError
if item_dict.get("path", None) is None:
logger.error("\"path\" attribute not found, cannot create item")
raise ItemCreationError
return cls(name=item_dict.get("name"),
path=item_dict.get("path"),
versions=item_dict.get("versions", None),
categories=item_dict.get("categories", None))
| true | true |
f72cd844ec376f6444b3a3f87163523a385a0c9f | 23,894 | py | Python | lib/FamaProfiling/FamaProfilingServer.py | aekazakov/FamaProfiling | d9db15ea217e3be2aab65c356564a6d345b4f410 | [
"MIT"
] | null | null | null | lib/FamaProfiling/FamaProfilingServer.py | aekazakov/FamaProfiling | d9db15ea217e3be2aab65c356564a6d345b4f410 | [
"MIT"
] | null | null | null | lib/FamaProfiling/FamaProfilingServer.py | aekazakov/FamaProfiling | d9db15ea217e3be2aab65c356564a6d345b4f410 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'FamaProfiling'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from FamaProfilingImpl import FamaProfiling # noqa @IgnorePep8
impl_FamaProfiling = FamaProfiling(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'FamaProfiling'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_FamaProfiling.run_FamaReadProfiling,
name='FamaProfiling.run_FamaReadProfiling',
types=[dict])
self.method_authentication['FamaProfiling.run_FamaReadProfiling'] = 'required' # noqa
self.rpc_service.add(impl_FamaProfiling.run_FamaGenomeProfiling,
name='FamaProfiling.run_FamaGenomeProfiling',
types=[dict])
self.method_authentication['FamaProfiling.run_FamaGenomeProfiling'] = 'required' # noqa
self.rpc_service.add(impl_FamaProfiling.view_FamaFunctionalProfile,
name='FamaProfiling.view_FamaFunctionalProfile',
types=[dict])
self.method_authentication['FamaProfiling.view_FamaFunctionalProfile'] = 'required' # noqa
self.rpc_service.add(impl_FamaProfiling.status,
name='FamaProfiling.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'FamaProfiling ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
| 37.044961 | 151 | 0.545451 |
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'FamaProfiling'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from FamaProfilingImpl import FamaProfiling
impl_FamaProfiling = FamaProfiling(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else:
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
rdata = jsondata
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
return None
else:
raise InvalidRequestError
def _handle_request(self, ctx, request):
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'FamaProfiling'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_FamaProfiling.run_FamaReadProfiling,
name='FamaProfiling.run_FamaReadProfiling',
types=[dict])
self.method_authentication['FamaProfiling.run_FamaReadProfiling'] = 'required' # noqa
self.rpc_service.add(impl_FamaProfiling.run_FamaGenomeProfiling,
name='FamaProfiling.run_FamaGenomeProfiling',
types=[dict])
self.method_authentication['FamaProfiling.run_FamaGenomeProfiling'] = 'required' # noqa
self.rpc_service.add(impl_FamaProfiling.view_FamaFunctionalProfile,
name='FamaProfiling.view_FamaFunctionalProfile',
types=[dict])
self.method_authentication['FamaProfiling.view_FamaFunctionalProfile'] = 'required' # noqa
self.rpc_service.add(impl_FamaProfiling.status,
name='FamaProfiling.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'FamaProfiling ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
| true | true |
f72cd9912a6e17b5b29517176042c7b23c21a2e0 | 557 | py | Python | DiseaseIdentifier/DiseaseClassify/migrations/0001_initial.py | Rosan93/Disease-Identifier | 6bf311c833ecaa3769ebf09c6d752a9ec7ebfdb4 | [
"Apache-2.0"
] | null | null | null | DiseaseIdentifier/DiseaseClassify/migrations/0001_initial.py | Rosan93/Disease-Identifier | 6bf311c833ecaa3769ebf09c6d752a9ec7ebfdb4 | [
"Apache-2.0"
] | 18 | 2020-01-28T22:44:38.000Z | 2021-06-10T18:55:20.000Z | DiseaseIdentifier/DiseaseClassify/migrations/0001_initial.py | RoshanGurungSr/Disease-Identifier | 6bf311c833ecaa3769ebf09c6d752a9ec7ebfdb4 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.2.1 on 2019-05-15 08:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UploadImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('predict_image', models.FileField(upload_to='')),
],
),
]
| 24.217391 | 114 | 0.574506 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UploadImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('predict_image', models.FileField(upload_to='')),
],
),
]
| true | true |
f72cdab5d2b024368267933b507634792f303004 | 472 | py | Python | venv/Scripts/easy_install-3.8-script.py | rushermonza/CoronavirusWebScraper | 4c7d31dbb51ae7d791c620673ca6f36d1ef43e3e | [
"MIT"
] | 1 | 2020-04-04T04:55:20.000Z | 2020-04-04T04:55:20.000Z | venv/Scripts/easy_install-3.8-script.py | AntoData/CoronavirusWebScraper | 4c7d31dbb51ae7d791c620673ca6f36d1ef43e3e | [
"MIT"
] | null | null | null | venv/Scripts/easy_install-3.8-script.py | AntoData/CoronavirusWebScraper | 4c7d31dbb51ae7d791c620673ca6f36d1ef43e3e | [
"MIT"
] | null | null | null | #!C:\Users\ingov\PycharmProjects\CoronavirusWebScraper\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
| 36.307692 | 87 | 0.70339 |
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
| true | true |
f72cdb329eab3ceed1d52538e15d5d30dc8b84c0 | 9,798 | py | Python | sphinx-doc/conf.py | kmoskovtsev/HOOMD-Blue-fork | 99560563a5ba9e082b513764bae51a84f48fdc70 | [
"BSD-3-Clause"
] | null | null | null | sphinx-doc/conf.py | kmoskovtsev/HOOMD-Blue-fork | 99560563a5ba9e082b513764bae51a84f48fdc70 | [
"BSD-3-Clause"
] | null | null | null | sphinx-doc/conf.py | kmoskovtsev/HOOMD-Blue-fork | 99560563a5ba9e082b513764bae51a84f48fdc70 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# HOOMD-blue documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 13 13:14:54 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax'
]
intersphinx_mapping = {'python': ('https://docs.python.org/3', None)}
autodoc_docstring_signature = True;
autodoc_default_flags = ['inherited-members'];
autodoc_mock_imports = ['numpy'];
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
exclude_patterns = ['_build', '_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'HOOMD-blue'
copyright = '2016, The Regents of the University of Michigan'
author = 'The Regents of the University of Michigan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1.9'
# The full version, including alpha/beta/rc tags.
release = '2.1.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'HOOMD-blue-doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'HOOMD-blue.tex', 'HOOMD-blue Documentation',
'The Regents of the University of Michigan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'hoomd-blue', 'HOOMD-blue Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'HOOMD-blue', 'HOOMD-blue Documentation',
author, 'HOOMD-blue', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
ipython_mplbackend = None;
# ipython_execlines = ['import gsd.fl', 'import gsd.hoomd', 'import gsd.pygsd', 'import numpy']
| 32.66 | 95 | 0.716881 |
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax'
]
intersphinx_mapping = {'python': ('https://docs.python.org/3', None)}
autodoc_docstring_signature = True;
autodoc_default_flags = ['inherited-members'];
autodoc_mock_imports = ['numpy'];
templates_path = ['_templates']
exclude_patterns = ['_build', '_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'HOOMD-blue'
copyright = '2016, The Regents of the University of Michigan'
author = 'The Regents of the University of Michigan'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1.9'
# The full version, including alpha/beta/rc tags.
release = '2.1.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'HOOMD-blue-doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'HOOMD-blue.tex', 'HOOMD-blue Documentation',
'The Regents of the University of Michigan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'hoomd-blue', 'HOOMD-blue Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'HOOMD-blue', 'HOOMD-blue Documentation',
author, 'HOOMD-blue', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
ipython_mplbackend = None;
| true | true |
f72cdbc3ff47642e64c41dd8ddf1126531344739 | 1,481 | py | Python | trac/upgrades/db45.py | NetSpida/trac | 6ad75b926591e114ba504f6a72a38fd305d77fb1 | [
"BSD-3-Clause"
] | null | null | null | trac/upgrades/db45.py | NetSpida/trac | 6ad75b926591e114ba504f6a72a38fd305d77fb1 | [
"BSD-3-Clause"
] | null | null | null | trac/upgrades/db45.py | NetSpida/trac | 6ad75b926591e114ba504f6a72a38fd305d77fb1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
import re
from trac.upgrades import backup_config_file
def do_upgrade(env, version, cursor):
"""Change [notification] ticket_subject_template and [notification]
batch_subject_template to use syntax compatible with Jinja2.
"""
config = env.config
section = 'notification'
re_template_var = re.compile(r'\$([\w.]+)')
def update_template(name):
old_value = config.get(section, name)
if old_value:
if re_template_var.match(old_value):
new_value = re_template_var.sub(r'${\1}', old_value)
env.log.info("Replaced value of [%s] %s: %s -> %s",
section, name, old_value, new_value)
config.set(section, name, new_value)
return True
return False
updated = update_template('ticket_subject_template')
updated |= update_template('batch_subject_template')
if updated:
backup_config_file(env, '.db45.bak')
config.save()
| 33.659091 | 72 | 0.665766 |
import re
from trac.upgrades import backup_config_file
def do_upgrade(env, version, cursor):
config = env.config
section = 'notification'
re_template_var = re.compile(r'\$([\w.]+)')
def update_template(name):
old_value = config.get(section, name)
if old_value:
if re_template_var.match(old_value):
new_value = re_template_var.sub(r'${\1}', old_value)
env.log.info("Replaced value of [%s] %s: %s -> %s",
section, name, old_value, new_value)
config.set(section, name, new_value)
return True
return False
updated = update_template('ticket_subject_template')
updated |= update_template('batch_subject_template')
if updated:
backup_config_file(env, '.db45.bak')
config.save()
| true | true |
f72cdbf3665a2f5be56dde69f5e801552be52144 | 4,726 | py | Python | Code/PostProcessing/Cloud.py | ChimieleCode/OpenSees_Script | 58dcd187e5eda1bf92f8f2c4fc83b74d9108372d | [
"MIT"
] | null | null | null | Code/PostProcessing/Cloud.py | ChimieleCode/OpenSees_Script | 58dcd187e5eda1bf92f8f2c4fc83b74d9108372d | [
"MIT"
] | null | null | null | Code/PostProcessing/Cloud.py | ChimieleCode/OpenSees_Script | 58dcd187e5eda1bf92f8f2c4fc83b74d9108372d | [
"MIT"
] | null | null | null | import csv
import math
from ModelOptions import compute_local_fragility
from PostProcessing.SectionGaps import global_DCR_DS1, global_DCR_DS2, global_DCR_DST, demand_capacity_ratio_DS1_matrix, demand_capacity_ratio_DS2_matrix, demand_capacity_ratio_DST_matrix
from AnalysisDefinition.TimeHistory import spectral_response
# global_DCR_DS1 = [[1, 0.27896174747804386], [2, 0.28126931389396786], [3, 0.44095115696216836], [4, 0.33864425806026355], [5, 0.7645643659027233], [6, 0.8373640081441925], [7, 0.6888659383862444]]
# global_DCR_DS2 = [[1, 0.12933171227895135], [2, 0.13040154181101768], [3, 0.18752803478204755], [4, 0.13911329867854114], [5, 0.31770212049497765], [6, 0.38821710128044673], [7, 0.3193707099542446]]
# spectral_response = [ [1, 0.01], [2, 0.02], [3, 0.03], [4, 0.04], [5, 0.05], [6, 0.06], [7, 0.07], [8, 0.08], [9, 0.09], [10, 0.1], [11, 0.11], [12, 0.12], [13, 0.13], [14, 0.14], [15, 0.15], [16, 0.16], [17, 0.17], [18, 0.18], [19, 0.19], [20, 0.2], [21, 0.21], [22, 0.22], [23, 0.23], [24, 0.24], [25, 0.25], [26, 0.26], [27, 0.27], [28, 0.28], [29, 0.29], [30, 0.3], [31, 0.31], [32, 0.32], [33, 0.33], [34, 0.34], [35, 0.35], [36, 0.36], [37, 0.37], [38, 0.38], [39, 0.39], [40, 0.4], [41, 0.41], [42, 0.42], [43, 0.43], [44, 0.44], [45, 0.45], [46, 0.46], [47, 0.47], [48, 0.48], [49, 0.49], [50, 0.5], [51, 0.51], [52, 0.52], [53, 0.53], [54, 0.54], [55, 0.55], [56, 0.56], [57, 0.57], [58, 0.58], [59, 0.59], [60, 0.6], [61, 0.61], [62, 0.62], [63, 0.63], [64, 0.64], [65, 0.65], [66, 0.66], [67, 0.67], [68, 0.68], [69, 0.69], [70, 0.7], [71, 0.71], [72, 0.72], [73, 0.73], [74, 0.74], [75, 0.75], [76, 0.76], [77, 0.77], [78, 0.78], [79, 0.79], [80, 0.8], [81, 0.81], [82, 0.82], [83, 0.83], [84, 0.84], [85, 0.85], [86, 0.86], [87, 0.87], [88, 0.88], [89, 0.89], [90, 0.9], [91, 0.91], [92, 0.92], [93, 0.93], [94, 0.94], [95, 0.95], [96, 0.96], [97, 0.97], [98, 0.98], [99, 0.99], [100, 1], [101, 1.01], [102, 1.02], [103, 1.03], [104, 1.04], [105, 1.05], [106, 1.06], [107, 1.07], [108, 1.08], [109, 1.09], [110, 1.1], [111, 1.11], [112, 1.12], [113, 1.13], [114, 1.14], [115, 1.15], [116, 1.16], [117, 1.17], [118, 1.18], [119, 1.19], [120, 1.2], [121, 1.21], [122, 1.22], [123, 1.23], [124, 1.24], [125, 1.25], [126, 1.26], [127, 1.27], [128, 1.28], [129, 1.29], [130, 1.3], [131, 1.31], [132, 1.32], [133, 1.33], [134, 1.34], [135, 1.35], [136, 1.36], [137, 1.37], [138, 1.38], [139, 1.39], [140, 1.4], [141, 1.41] ]
cloud_DST = []
cloud_DS1 = []
cloud_DS2 = []
# Header globale
header = ['Time History ID', 'DCR', 'Sa']
# Preparo gli array per scrivere
for i, point in enumerate(spectral_response):
cloud_DS1.append([point[0], global_DCR_DS1[i][1], point[1]])
cloud_DS2.append([point[0], global_DCR_DS2[i][1], point[1]])
cloud_DST.append([point[0], global_DCR_DST[i][1], point[1]])
# Scrivo DS1 globale
with open('Output\Cloud\cloud_DS1.csv', 'w', newline = '') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(header)
for point in cloud_DS1:
writer.writerow(point)
# Scrivo DS2 globale
with open('Output\Cloud\cloud_DS2.csv', 'w', newline = '') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(header)
for point in cloud_DS2:
writer.writerow(point)
# Scrivo DST globale
with open('Output\Cloud\cloud_DST.csv', 'w', newline = '') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(header)
for point in cloud_DST:
writer.writerow(point)
# CLOUD points di singole connessioni
if compute_local_fragility:
# Header locale
header = ['Time History ID', 'Sa', 'DCR1', 'DCR2', 'DRCT']
# Definisco delle funzioni per identificare le connessioni
def floor(i):
return math.floor(i/2)
def vertical(i):
if (i % 2) == 0:
return 'ext'
else:
return 'int'
# Procedo a scrivere in file
for i in range(len(demand_capacity_ratio_DS1_matrix[0])):
with open(f'Output\Connection_Fragility\Data\Cloud\Cloud_{floor(i)}_{vertical(i)}.csv', 'w', newline = '') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(header)
for j in range(len(demand_capacity_ratio_DS1_matrix)):
row = []
row.append(spectral_response[j][0])
row.append(spectral_response[j][1])
row.append(demand_capacity_ratio_DS1_matrix[j][i])
row.append(demand_capacity_ratio_DS2_matrix[j][i])
row.append(demand_capacity_ratio_DST_matrix[j][i])
writer.writerow(row)
| 45.883495 | 1,734 | 0.566018 | import csv
import math
from ModelOptions import compute_local_fragility
from PostProcessing.SectionGaps import global_DCR_DS1, global_DCR_DS2, global_DCR_DST, demand_capacity_ratio_DS1_matrix, demand_capacity_ratio_DS2_matrix, demand_capacity_ratio_DST_matrix
from AnalysisDefinition.TimeHistory import spectral_response
cloud_DST = []
cloud_DS1 = []
cloud_DS2 = []
header = ['Time History ID', 'DCR', 'Sa']
for i, point in enumerate(spectral_response):
cloud_DS1.append([point[0], global_DCR_DS1[i][1], point[1]])
cloud_DS2.append([point[0], global_DCR_DS2[i][1], point[1]])
cloud_DST.append([point[0], global_DCR_DST[i][1], point[1]])
with open('Output\Cloud\cloud_DS1.csv', 'w', newline = '') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(header)
for point in cloud_DS1:
writer.writerow(point)
with open('Output\Cloud\cloud_DS2.csv', 'w', newline = '') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(header)
for point in cloud_DS2:
writer.writerow(point)
with open('Output\Cloud\cloud_DST.csv', 'w', newline = '') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(header)
for point in cloud_DST:
writer.writerow(point)
if compute_local_fragility:
header = ['Time History ID', 'Sa', 'DCR1', 'DCR2', 'DRCT']
def floor(i):
return math.floor(i/2)
def vertical(i):
if (i % 2) == 0:
return 'ext'
else:
return 'int'
for i in range(len(demand_capacity_ratio_DS1_matrix[0])):
with open(f'Output\Connection_Fragility\Data\Cloud\Cloud_{floor(i)}_{vertical(i)}.csv', 'w', newline = '') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(header)
for j in range(len(demand_capacity_ratio_DS1_matrix)):
row = []
row.append(spectral_response[j][0])
row.append(spectral_response[j][1])
row.append(demand_capacity_ratio_DS1_matrix[j][i])
row.append(demand_capacity_ratio_DS2_matrix[j][i])
row.append(demand_capacity_ratio_DST_matrix[j][i])
writer.writerow(row)
| true | true |
f72cdcf0abad5566c247daf49ce163498192f41a | 727 | py | Python | stats/data.py | 1in1/Python-Baseball | 4c76d65330ff7eb88c87057be02bbddb50dd325b | [
"MIT"
] | null | null | null | stats/data.py | 1in1/Python-Baseball | 4c76d65330ff7eb88c87057be02bbddb50dd325b | [
"MIT"
] | null | null | null | stats/data.py | 1in1/Python-Baseball | 4c76d65330ff7eb88c87057be02bbddb50dd325b | [
"MIT"
] | null | null | null | import os
import glob
import pandas as pd
game_files = glob.glob(os.path.join(os.getcwd(), 'games', '*.EVE'))
game_files.sort()
game_frames = []
for game_file in game_files:
game_frame = pd.read_csv(game_file, names=['type','multi2','multi3','multi4','multi5','multi6','event'])
game_frames.append(game_frame)
games = pd.concat(game_frames)
games.loc[games['multi5'] == '??', ['multi5']] = ''
identifiers = games['multi2'].str.extract(r'(.LS(\d{4})\d{5})')
identifiers = identifiers.fillna(method='ffill')
identifiers.columns = ['game_id', 'year']
games = pd.concat([games, identifiers], axis=1, sort=False)
games = games.fillna(' ')
games.loc[:, 'type'] = pd.Categorical(games.loc[:, 'type'])
print(games.head())
| 30.291667 | 108 | 0.678129 | import os
import glob
import pandas as pd
game_files = glob.glob(os.path.join(os.getcwd(), 'games', '*.EVE'))
game_files.sort()
game_frames = []
for game_file in game_files:
game_frame = pd.read_csv(game_file, names=['type','multi2','multi3','multi4','multi5','multi6','event'])
game_frames.append(game_frame)
games = pd.concat(game_frames)
games.loc[games['multi5'] == '??', ['multi5']] = ''
identifiers = games['multi2'].str.extract(r'(.LS(\d{4})\d{5})')
identifiers = identifiers.fillna(method='ffill')
identifiers.columns = ['game_id', 'year']
games = pd.concat([games, identifiers], axis=1, sort=False)
games = games.fillna(' ')
games.loc[:, 'type'] = pd.Categorical(games.loc[:, 'type'])
print(games.head())
| true | true |
f72cdd30f4d4087803dd4184985189860ea51326 | 2,545 | py | Python | examples/other_examples/PyFstat_example_twoF_cumulative.py | RobertRosca/PyFstat | 1c9568bb3dc87c3d33aeb41b3f572e9990665372 | [
"MIT"
] | 16 | 2020-01-28T08:40:02.000Z | 2022-03-02T05:26:50.000Z | examples/other_examples/PyFstat_example_twoF_cumulative.py | RobertRosca/PyFstat | 1c9568bb3dc87c3d33aeb41b3f572e9990665372 | [
"MIT"
] | 294 | 2020-02-04T17:15:26.000Z | 2022-03-30T13:53:48.000Z | examples/other_examples/PyFstat_example_twoF_cumulative.py | RobertRosca/PyFstat | 1c9568bb3dc87c3d33aeb41b3f572e9990665372 | [
"MIT"
] | 10 | 2020-02-04T16:57:55.000Z | 2022-02-03T00:12:25.000Z | """
Cumulative coherent 2F
======================
Compute the cumulative coherent F-statistic of a signal candidate.
"""
import os
import numpy as np
import pyfstat
from pyfstat.helper_functions import get_predict_fstat_parameters_from_dict
label = "PyFstat_example_twoF_cumulative"
outdir = os.path.join("PyFstat_example_data", label)
# Properties of the GW data
gw_data = {
"sqrtSX": 1e-23,
"tstart": 1000000000,
"duration": 100 * 86400,
"detectors": "H1,L1",
"Band": 4,
"Tsft": 1800,
}
# Properties of the signal
depth = 100
phase_parameters = {
"F0": 30.0,
"F1": -1e-10,
"F2": 0,
"Alpha": np.radians(83.6292),
"Delta": np.radians(22.0144),
"tref": gw_data["tstart"],
"asini": 10,
"period": 10 * 3600 * 24,
"tp": gw_data["tstart"] + gw_data["duration"] / 2.0,
"ecc": 0,
"argp": 0,
}
amplitude_parameters = {
"h0": gw_data["sqrtSX"] / depth,
"cosi": 1,
"phi": np.pi,
"psi": np.pi / 8,
}
PFS_input = get_predict_fstat_parameters_from_dict(
{**phase_parameters, **amplitude_parameters}
)
# Let me grab tref here, since it won't really be needed in phase_parameters
tref = phase_parameters.pop("tref")
data = pyfstat.BinaryModulatedWriter(
label=label,
outdir=outdir,
tref=tref,
**gw_data,
**phase_parameters,
**amplitude_parameters,
)
data.make_data()
# The predicted twoF, given by lalapps_predictFstat can be accessed by
twoF = data.predict_fstat()
print("Predicted twoF value: {}\n".format(twoF))
# Create a search object for each of the possible SFT combinations
# (H1 only, L1 only, H1 + L1).
ifo_constraints = ["L1", "H1", None]
compute_fstat_per_ifo = [
pyfstat.ComputeFstat(
sftfilepattern=os.path.join(
data.outdir,
(f"{ifo_constraint[0]}*.sft" if ifo_constraint is not None else "*.sft"),
),
tref=data.tref,
binary=phase_parameters.get("asini", 0),
minCoverFreq=-0.5,
maxCoverFreq=-0.5,
)
for ifo_constraint in ifo_constraints
]
for ind, compute_f_stat in enumerate(compute_fstat_per_ifo):
compute_f_stat.plot_twoF_cumulative(
label=label + (f"_{ifo_constraints[ind]}" if ind < 2 else "_H1L1"),
outdir=outdir,
savefig=True,
CFS_input=phase_parameters,
PFS_input=PFS_input,
custom_ax_kwargs={
"title": "How does 2F accumulate over time?",
"label": "Cumulative 2F"
+ (f" {ifo_constraints[ind]}" if ind < 2 else " H1 + L1"),
},
)
| 25.45 | 85 | 0.633792 |
import os
import numpy as np
import pyfstat
from pyfstat.helper_functions import get_predict_fstat_parameters_from_dict
label = "PyFstat_example_twoF_cumulative"
outdir = os.path.join("PyFstat_example_data", label)
gw_data = {
"sqrtSX": 1e-23,
"tstart": 1000000000,
"duration": 100 * 86400,
"detectors": "H1,L1",
"Band": 4,
"Tsft": 1800,
}
depth = 100
phase_parameters = {
"F0": 30.0,
"F1": -1e-10,
"F2": 0,
"Alpha": np.radians(83.6292),
"Delta": np.radians(22.0144),
"tref": gw_data["tstart"],
"asini": 10,
"period": 10 * 3600 * 24,
"tp": gw_data["tstart"] + gw_data["duration"] / 2.0,
"ecc": 0,
"argp": 0,
}
amplitude_parameters = {
"h0": gw_data["sqrtSX"] / depth,
"cosi": 1,
"phi": np.pi,
"psi": np.pi / 8,
}
PFS_input = get_predict_fstat_parameters_from_dict(
{**phase_parameters, **amplitude_parameters}
)
tref = phase_parameters.pop("tref")
data = pyfstat.BinaryModulatedWriter(
label=label,
outdir=outdir,
tref=tref,
**gw_data,
**phase_parameters,
**amplitude_parameters,
)
data.make_data()
# The predicted twoF, given by lalapps_predictFstat can be accessed by
twoF = data.predict_fstat()
print("Predicted twoF value: {}\n".format(twoF))
# Create a search object for each of the possible SFT combinations
# (H1 only, L1 only, H1 + L1).
ifo_constraints = ["L1", "H1", None]
compute_fstat_per_ifo = [
pyfstat.ComputeFstat(
sftfilepattern=os.path.join(
data.outdir,
(f"{ifo_constraint[0]}*.sft" if ifo_constraint is not None else "*.sft"),
),
tref=data.tref,
binary=phase_parameters.get("asini", 0),
minCoverFreq=-0.5,
maxCoverFreq=-0.5,
)
for ifo_constraint in ifo_constraints
]
for ind, compute_f_stat in enumerate(compute_fstat_per_ifo):
compute_f_stat.plot_twoF_cumulative(
label=label + (f"_{ifo_constraints[ind]}" if ind < 2 else "_H1L1"),
outdir=outdir,
savefig=True,
CFS_input=phase_parameters,
PFS_input=PFS_input,
custom_ax_kwargs={
"title": "How does 2F accumulate over time?",
"label": "Cumulative 2F"
+ (f" {ifo_constraints[ind]}" if ind < 2 else " H1 + L1"),
},
)
| true | true |
f72cdd942be71ea6f27d319f22d0edf089185019 | 69,689 | py | Python | src/transformers/models/unispeech/modeling_unispeech.py | bugface/transformers | ba286fe7d51db12ad663effac83bed8199dd7141 | [
"Apache-2.0"
] | 2 | 2022-01-12T13:10:05.000Z | 2022-01-12T13:10:28.000Z | src/transformers/models/unispeech/modeling_unispeech.py | bugface/transformers | ba286fe7d51db12ad663effac83bed8199dd7141 | [
"Apache-2.0"
] | 2 | 2022-03-08T04:58:59.000Z | 2022-03-19T03:45:14.000Z | src/transformers/models/unispeech/modeling_unispeech.py | bugface/transformers | ba286fe7d51db12ad663effac83bed8199dd7141 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch UniSpeech model."""
import math
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...deepspeed import is_deepspeed_zero3_enabled
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput, Wav2Vec2BaseModelOutput
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import torch_int_div
from ...utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_unispeech import UniSpeechConfig
logger = logging.get_logger(__name__)
_HIDDEN_STATES_START_POSITION = 2
# General docstring
_CONFIG_FOR_DOC = "UniSpeechConfig"
_PROCESSOR_FOR_DOC = "Wav2Vec2Processor"
# Base docstring
_CHECKPOINT_FOR_DOC = "patrickvonplaten/unispeech-large-1500h-cv-timit"
_EXPECTED_OUTPUT_SHAPE = [1, 292, 1024]
# CTC docstring
_CTC_EXPECTED_OUTPUT = "'mister quilter is the apposl of the midle classes and weare glad to welcom his gosepl'"
_CTC_EXPECTED_LOSS = 17.17
# Audio class docstring
_FEAT_EXTRACTOR_FOR_DOC = "Wav2Vec2FeatureExtractor"
_SEQ_CLASS_CHECKPOINT = "hf-internal-testing/tiny-random-unispeech"
_SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_0'" # TODO(anton) - could you quickly fine-tune a KS WavLM Model
_SEQ_CLASS_EXPECTED_LOSS = 0.66 # TODO(anton) - could you quickly fine-tune a KS WavLM Model
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/unispeech-large-1500h-cv",
"microsoft/unispeech-large-multi-lingual-1500h-cv",
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
]
@dataclass
class UniSpeechForPreTrainingOutput(ModelOutput):
"""
Output type of [`UniSpeechForPreTrainingOutput`], with potential hidden states and attentions.
Args:
loss (*optional*, returned when model is in train mode, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official
paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss.
projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked
projected quantized states.
projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive
target vectors for contrastive loss.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
projected_states: torch.FloatTensor = None
projected_quantized_states: torch.FloatTensor = None
codevector_perplexity: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
# Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices
def _compute_mask_indices(
shape: Tuple[int, int],
mask_prob: float,
mask_length: int,
attention_mask: Optional[torch.LongTensor] = None,
min_masks: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
CPU as part of the preprocessing during training.
Args:
shape: The shape for which to compute masks. This should be of a tuple of size 2 where
the first element is the batch size and the second element is the length of the axis to span.
mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
independently generated mask spans of length `mask_length` is computed by
`mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
actual percentage will be smaller.
mask_length: size of the mask
min_masks: minimum number of masked spans
attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
each batch dimension.
"""
batch_size, sequence_length = shape
if mask_length < 1:
raise ValueError("`mask_length` has to be bigger than 0.")
if mask_length > sequence_length:
raise ValueError(
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
f" and `sequence_length`: {sequence_length}`"
)
# epsilon is used for probabilistic rounding
epsilon = np.random.rand(1).item()
def compute_num_masked_span(input_length):
"""Given input length, compute how many spans should be masked"""
num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
num_masked_span = max(num_masked_span, min_masks)
# make sure num masked span <= sequence_length
if num_masked_span * mask_length > sequence_length:
num_masked_span = sequence_length // mask_length
# make sure num_masked span is also <= input_length - (mask_length - 1)
if input_length - (mask_length - 1) < num_masked_span:
num_masked_span = max(input_length - (mask_length - 1), 0)
return num_masked_span
# compute number of masked spans in batch
input_lengths = (
attention_mask.sum(-1).detach().tolist()
if attention_mask is not None
else [sequence_length for _ in range(batch_size)]
)
# SpecAugment mask to fill
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)
spec_aug_mask_idxs = []
max_num_masked_span = compute_num_masked_span(sequence_length)
if max_num_masked_span == 0:
return spec_aug_mask
for input_length in input_lengths:
# compute num of masked spans for this input
num_masked_span = compute_num_masked_span(input_length)
# get random indices to mask
spec_aug_mask_idx = np.random.choice(
np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
)
# pick first sampled index that will serve as a dummy index to pad vector
# to ensure same dimension for all batches due to probabilistic rounding
# Picking first sample just pads those vectors twice.
if len(spec_aug_mask_idx) == 0:
# this case can only happen if `input_length` is strictly smaller then
# `sequence_length` in which case the last token has to be a padding
# token which we can use as a dummy mask id
dummy_mask_idx = sequence_length - 1
else:
dummy_mask_idx = spec_aug_mask_idx[0]
spec_aug_mask_idx = np.concatenate(
[spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
)
spec_aug_mask_idxs.append(spec_aug_mask_idx)
spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
# expand masked indices to masked spans
spec_aug_mask_idxs = np.broadcast_to(
spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
)
spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
# add offset to the starting indexes so that that indexes now create a span
offsets = np.arange(mask_length)[None, None, :]
offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
batch_size, max_num_masked_span * mask_length
)
spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
# ensure that we cannot have indices larger than sequence_length
if spec_aug_mask_idxs.max() > sequence_length - 1:
spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
# scatter indices to mask
np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
return spec_aug_mask
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->UniSpeech
class UniSpeechNoLayerNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->UniSpeech
class UniSpeechLayerNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->UniSpeech
class UniSpeechGroupNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.activation = ACT2FN[config.feat_extract_activation]
self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->UniSpeech
class UniSpeechPositionalConvEmbedding(nn.Module):
def __init__(self, config):
super().__init__()
self.conv = nn.Conv1d(
config.hidden_size,
config.hidden_size,
kernel_size=config.num_conv_pos_embeddings,
padding=config.num_conv_pos_embeddings // 2,
groups=config.num_conv_pos_embedding_groups,
)
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
deepspeed.zero.register_external_parameter(self, self.conv.weight_v)
deepspeed.zero.register_external_parameter(self, self.conv.weight_g)
else:
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
self.padding = UniSpeechSamePadLayer(config.num_conv_pos_embeddings)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.conv(hidden_states)
hidden_states = self.padding(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->UniSpeech
class UniSpeechSamePadLayer(nn.Module):
def __init__(self, num_conv_pos_embeddings):
super().__init__()
self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
def forward(self, hidden_states):
if self.num_pad_remove > 0:
hidden_states = hidden_states[:, :, : -self.num_pad_remove]
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->UniSpeech
class UniSpeechFeatureEncoder(nn.Module):
"""Construct the features from raw audio waveform"""
def __init__(self, config):
super().__init__()
if config.feat_extract_norm == "group":
conv_layers = [UniSpeechGroupNormConvLayer(config, layer_id=0)] + [
UniSpeechNoLayerNormConvLayer(config, layer_id=i + 1)
for i in range(config.num_feat_extract_layers - 1)
]
elif config.feat_extract_norm == "layer":
conv_layers = [
UniSpeechLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)
]
else:
raise ValueError(
f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
)
self.conv_layers = nn.ModuleList(conv_layers)
self.gradient_checkpointing = False
self._requires_grad = True
def _freeze_parameters(self):
for param in self.parameters():
param.requires_grad = False
self._requires_grad = False
def forward(self, input_values):
hidden_states = input_values[:, None]
# make sure hidden_states require grad for gradient_checkpointing
if self._requires_grad and self.training:
hidden_states.requires_grad = True
for conv_layer in self.conv_layers:
if self._requires_grad and self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(conv_layer),
hidden_states,
)
else:
hidden_states = conv_layer(hidden_states)
return hidden_states
class UniSpeechFeatureExtractor(UniSpeechFeatureEncoder):
def __init__(self, config):
super().__init__(config)
warnings.warn(
f"The class `{self.__class__.__name__}` has been depreciated "
"and will be removed in Transformers v5. "
f"Use `{self.__class__.__bases__[0].__name__}` instead.",
FutureWarning,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->UniSpeech
class UniSpeechFeatureProjection(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
self.dropout = nn.Dropout(config.feat_proj_dropout)
def forward(self, hidden_states):
# non-projected hidden states are needed for quantization
norm_hidden_states = self.layer_norm(hidden_states)
hidden_states = self.projection(norm_hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states, norm_hidden_states
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->UniSpeech
class UniSpeechAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->UniSpeech
class UniSpeechFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.intermediate_dropout = nn.Dropout(config.activation_dropout)
self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.output_dropout = nn.Dropout(config.hidden_dropout)
def forward(self, hidden_states):
hidden_states = self.intermediate_dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.intermediate_dropout(hidden_states)
hidden_states = self.output_dense(hidden_states)
hidden_states = self.output_dropout(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayer with Wav2Vec2->UniSpeech
class UniSpeechEncoderLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = UniSpeechAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = UniSpeechFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
attn_residual = hidden_states
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states + self.feed_forward(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->UniSpeech
class UniSpeechEncoderLayerStableLayerNorm(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = UniSpeechAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = UniSpeechFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
attn_residual = hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Encoder with Wav2Vec2->UniSpeech
class UniSpeechEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = UniSpeechPositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList([UniSpeechEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens output 0
hidden_states[~attention_mask] = 0.0
# extend attention_mask
attention_mask = (1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)) * -10000.0
attention_mask = attention_mask.expand(
attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
)
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
if not skip_the_layer or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
if self.gradient_checkpointing and self.training:
# create gradient checkpointing function
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer),
hidden_states,
attention_mask,
)
else:
layer_outputs = layer(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderStableLayerNorm with Wav2Vec2->UniSpeech
class UniSpeechEncoderStableLayerNorm(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = UniSpeechPositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList(
[UniSpeechEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens are not attended to
hidden_states[~attention_mask] = 0
# extend attention_mask
attention_mask = (1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)) * -10000.0
attention_mask = attention_mask.expand(
attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
)
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.dropout(hidden_states)
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
if not skip_the_layer or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
# XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication
if self.gradient_checkpointing and self.training:
# create gradient checkpointing function
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer),
hidden_states,
attention_mask,
)
else:
layer_outputs = layer(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class UniSpeechGumbelVectorQuantizer(nn.Module):
"""
Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH
GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information.
"""
def __init__(self, config):
super().__init__()
self.num_groups = config.num_codevector_groups
self.num_vars = config.num_codevectors_per_group
if config.codevector_dim % self.num_groups != 0:
raise ValueError(
f"`config.codevector_dim {config.codevector_dim} must be divisible by `config.num_codevector_groups`"
f" {self.num_groups} for concatenation"
)
# storage for codebook variables (codewords)
self.codevectors = nn.Parameter(
torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups)
)
self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars)
# can be decayed for training
self.temperature = 2
@staticmethod
def _compute_perplexity(probs):
marginal_probs = probs.mean(dim=0)
perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum()
return perplexity
def forward(self, hidden_states):
batch_size, sequence_length, hidden_size = hidden_states.shape
# project to codevector dim
hidden_states = self.weight_proj(hidden_states)
hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1)
if self.training:
# sample code vector probs via gumbel in differentiateable way
codevector_probs = nn.functional.gumbel_softmax(
hidden_states.float(), tau=self.temperature, hard=True
).type_as(hidden_states)
# compute perplexity
codevector_soft_dist = torch.softmax(
hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1
)
perplexity = self._compute_perplexity(codevector_soft_dist)
else:
# take argmax in non-differentiable way
# comptute hard codevector distribution (one hot)
codevector_idx = hidden_states.argmax(dim=-1)
codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(
-1, codevector_idx.view(-1, 1), 1.0
)
codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1)
perplexity = self._compute_perplexity(codevector_probs)
codevector_probs = codevector_probs.view(batch_size * sequence_length, -1)
# use probs to retrieve codevectors
codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors
codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1)
codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1)
return codevectors, perplexity
class UniSpeechPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = UniSpeechConfig
base_model_prefix = "unispeech"
main_input_name = "input_values"
_keys_to_ignore_on_load_missing = [r"position_ids"]
supports_gradient_checkpointing = True
def _init_weights(self, module):
"""Initialize the weights"""
# gumbel softmax requires special init
if isinstance(module, UniSpeechGumbelVectorQuantizer):
module.weight_proj.weight.data.normal_(mean=0.0, std=1)
module.weight_proj.bias.data.zero_()
nn.init.uniform_(module.codevectors)
elif isinstance(module, UniSpeechPositionalConvEmbedding):
nn.init.normal_(
module.conv.weight,
mean=0,
std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),
)
nn.init.constant_(module.conv.bias, 0)
elif isinstance(module, UniSpeechFeatureProjection):
k = math.sqrt(1 / module.projection.in_features)
nn.init.uniform_(module.projection.weight, a=-k, b=k)
nn.init.uniform_(module.projection.bias, a=-k, b=k)
elif isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Conv1d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
nn.init.uniform_(module.bias, a=-k, b=k)
def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return torch_int_div(input_length - kernel_size, stride) + 1
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
return input_lengths
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
# Effectively attention_mask.sum(-1), but not inplace to be able to run
# on inference mode.
non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long)
batch_size = attention_mask.shape[0]
attention_mask = torch.zeros(
(batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
)
# these two operations makes sure that all values before the output lengths idxs are attended to
attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
return attention_mask
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (UniSpeechEncoder, UniSpeechEncoderStableLayerNorm, UniSpeechFeatureEncoder)):
module.gradient_checkpointing = value
UNISPEECH_START_DOCSTRING = r"""
UniSpeech was proposed in [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled
Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei,
Michael Zeng, Xuedong Huang.
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving etc.).
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`UniSpeechConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UNISPEECH_INPUTS_DOCSTRING = r"""
Args:
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file
into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install
soundfile*). To prepare the array into *input_values*, the [`UniSpeechProcessor`] should be used for
padding and conversion into a tensor of type *torch.FloatTensor*. See [`UniSpeechProcessor.__call__`] for
details.
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
<Tip warning={true}>
`attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==
True`. For all models whose processor has `config.return_attention_mask == False`, `attention_mask` should
**not** be passed to avoid degraded performance when doing batched inference. For such models
`input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these
models also yield slightly different results depending on whether `input_values` is padded or not.
</Tip>
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare UniSpeech Model transformer outputting raw hidden-states without any specific head on top.",
UNISPEECH_START_DOCSTRING,
)
class UniSpeechModel(UniSpeechPreTrainedModel):
def __init__(self, config: UniSpeechConfig):
super().__init__(config)
self.config = config
self.feature_extractor = UniSpeechFeatureEncoder(config)
self.feature_projection = UniSpeechFeatureProjection(config)
if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())
if config.do_stable_layer_norm:
self.encoder = UniSpeechEncoderStableLayerNorm(config)
else:
self.encoder = UniSpeechEncoder(config)
# Initialize weights and apply final processing
self.post_init()
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states
def _mask_hidden_states(
self,
hidden_states: torch.FloatTensor,
mask_time_indices: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
"""
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://arxiv.org/abs/1904.08779).
"""
# `config.apply_spec_augment` can set masking to False
if not getattr(self.config, "apply_spec_augment", True):
return hidden_states
# generate indices & apply SpecAugment along time axis
batch_size, sequence_length, hidden_size = hidden_states.size()
if mask_time_indices is not None:
# apply SpecAugment along time axis with given mask_time_indices
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
elif self.config.mask_time_prob > 0 and self.training:
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.config.mask_time_prob,
mask_length=self.config.mask_time_length,
attention_mask=attention_mask,
min_masks=self.config.mask_time_min_masks,
)
mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
if self.config.mask_feature_prob > 0 and self.training:
# generate indices & apply SpecAugment along feature axis
mask_feature_indices = _compute_mask_indices(
(batch_size, hidden_size),
mask_prob=self.config.mask_feature_prob,
mask_length=self.config.mask_feature_length,
min_masks=self.config.mask_feature_min_masks,
)
mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
hidden_states[mask_feature_indices] = 0
return hidden_states
@add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_PROCESSOR_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=Wav2Vec2BaseModelOutput,
config_class=_CONFIG_FOR_DOC,
modality="audio",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
mask_time_indices: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, Wav2Vec2BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
extract_features = self.feature_extractor(input_values)
extract_features = extract_features.transpose(1, 2)
if attention_mask is not None:
# compute reduced attention_mask corresponding to feature vectors
attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask)
hidden_states, extract_features = self.feature_projection(extract_features)
hidden_states = self._mask_hidden_states(
hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
if not return_dict:
return (hidden_states, extract_features) + encoder_outputs[1:]
return Wav2Vec2BaseModelOutput(
last_hidden_state=hidden_states,
extract_features=extract_features,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"""UniSpeech Model with a vector-quantization module and ctc loss for pre-training.""", UNISPEECH_START_DOCSTRING
)
class UniSpeechForPreTraining(UniSpeechPreTrainedModel):
def __init__(self, config: UniSpeechConfig):
super().__init__(config)
self.unispeech = UniSpeechModel(config)
self.dropout_features = nn.Dropout(config.feat_quantizer_dropout)
self.quantizer = UniSpeechGumbelVectorQuantizer(config)
self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim)
self.project_hid = nn.Linear(config.proj_codevector_dim, config.hidden_size)
self.ctc_proj = nn.Linear(config.hidden_size, config.num_ctc_classes)
self.dropout = nn.Dropout(config.final_dropout)
# Initialize weights and apply final processing
self.post_init()
def set_gumbel_temperature(self, temperature: int):
"""
Set the Gumbel softmax temperature to a given value. Only necessary for training
"""
self.quantizer.temperature = temperature
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.unispeech.feature_extractor._freeze_parameters()
@staticmethod
def compute_contrastive_logits(
target_features: torch.FloatTensor,
negative_features: torch.FloatTensor,
predicted_features: torch.FloatTensor,
temperature: int = 1,
):
"""
Compute logits for contrastive loss based using cosine similarity as the distance measure between
`[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied.
"""
target_features = torch.cat([target_features, negative_features], dim=0)
logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1)
logits = logits.type_as(target_features)
# apply temperature
logits = logits / temperature
return logits
@add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=UniSpeechForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, UniSpeechForPreTrainingOutput]:
r"""
mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
masked extracted features in *config.proj_codevector_dim* space.
sampled_negative_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_negatives)`, *optional*):
Indices indicating which quantized target vectors are used as negative sampled vectors in contrastive loss.
Required input for pre-training.
Returns:
Example:
```python
>>> import torch
>>> from transformers import Wav2Vec2FeatureExtractor, UniSpeechForPreTraining
>>> from transformers.models.unispeech.modeling_unispeech import _compute_mask_indices
>>> feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
... "hf-internal-testing/tiny-random-unispeech-sat"
... )
>>> model = UniSpeechForPreTraining.from_pretrained("microsoft/unispeech-large-1500h-cv")
>>> # TODO: Add full pretraining example
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.unispeech(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
transformer_features = outputs[0]
# quantize all (unmasked) extracted features and project to final vq dim
extract_features = self.dropout_features(outputs[1])
quantized_features, codevector_perplexity = self.quantizer(extract_features)
# project quantized features twice
quantized_features = self.project_q(quantized_features)
quantized_features = self.project_hid(quantized_features)
prob_replace_matrix = torch.empty(transformer_features.size(0), transformer_features.size(1)).fill_(
self.config.replace_prob
)
prob_replace_matrix = prob_replace_matrix.transpose(0, 1)
sampled_replace_matrix = torch.bernoulli(prob_replace_matrix).bool().to(transformer_features.device)
sampled_replace_matrix = sampled_replace_matrix.transpose(0, 1)
sampled_replace_matrix = sampled_replace_matrix.unsqueeze(-1)
logits = transformer_features.masked_fill(sampled_replace_matrix, 0.0) + (
quantized_features.masked_fill(~sampled_replace_matrix, 0.0)
)
# project to ctc units
logits = self.dropout(logits)
logits = self.ctc_proj(logits)
# TODO(PVP) - add negative sampling & loss computation
loss = None
if not return_dict:
if loss is not None:
return (loss, transformer_features, quantized_features, codevector_perplexity) + outputs[2:]
return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:]
return UniSpeechForPreTrainingOutput(
loss=loss,
projected_states=transformer_features,
projected_quantized_states=quantized_features,
codevector_perplexity=codevector_perplexity,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""UniSpeech Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
UNISPEECH_START_DOCSTRING,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->UniSpeech, wav2vec2->unispeech, WAV_2_VEC_2->UNISPEECH
class UniSpeechForCTC(UniSpeechPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.unispeech = UniSpeechModel(config)
self.dropout = nn.Dropout(config.final_dropout)
if config.vocab_size is None:
raise ValueError(
f"You are trying to instantiate {self.__class__} with a configuration that "
"does not define the vocabulary size of the language model head. Please "
"instantiate the model as follows: `UniSpeechForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
"or define `vocab_size` of your model's configuration."
)
output_hidden_size = (
config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
)
self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.unispeech.feature_extractor._freeze_parameters()
@add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_PROCESSOR_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=CausalLMOutput,
config_class=_CONFIG_FOR_DOC,
expected_output=_CTC_EXPECTED_OUTPUT,
expected_loss=_CTC_EXPECTED_LOSS,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[Tuple, CausalLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
config.vocab_size - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.unispeech(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
if labels.max() >= self.config.vocab_size:
raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
# retrieve loss input_lengths from attention_mask
attention_mask = (
attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
)
input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
# assuming that padded tokens are filled with -100
# when not being attended to
labels_mask = labels >= 0
target_lengths = labels_mask.sum(-1)
flattened_targets = labels.masked_select(labels_mask)
# ctc_loss doesn't support fp16
log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
with torch.backends.cudnn.flags(enabled=False):
loss = nn.functional.ctc_loss(
log_probs,
flattened_targets,
input_lengths,
target_lengths,
blank=self.config.pad_token_id,
reduction=self.config.ctc_loss_reduction,
zero_infinity=self.config.ctc_zero_infinity,
)
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutput(
loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
)
@add_start_docstrings(
"""
UniSpeech Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like
SUPERB Keyword Spotting.
""",
UNISPEECH_START_DOCSTRING,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification with Wav2Vec2->UniSpeech, wav2vec2->unispeech, WAV_2_VEC_2->UNISPEECH
class UniSpeechForSequenceClassification(UniSpeechPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if hasattr(config, "add_adapter") and config.add_adapter:
raise ValueError(
"Sequence classification does not support the use of UniSpeech adapters (config.add_adapter=True)"
)
self.unispeech = UniSpeechModel(config)
num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.unispeech.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.unispeech.parameters():
param.requires_grad = False
@add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_SEQ_CLASS_CHECKPOINT,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
modality="audio",
expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[Tuple, SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.unispeech(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
hidden_states = self.projector(hidden_states)
if attention_mask is None:
pooled_output = hidden_states.mean(dim=1)
else:
padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
hidden_states[~padding_mask] = 0.0
pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| 43.967823 | 164 | 0.676606 |
import math
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...deepspeed import is_deepspeed_zero3_enabled
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput, Wav2Vec2BaseModelOutput
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import torch_int_div
from ...utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_unispeech import UniSpeechConfig
logger = logging.get_logger(__name__)
_HIDDEN_STATES_START_POSITION = 2
_CONFIG_FOR_DOC = "UniSpeechConfig"
_PROCESSOR_FOR_DOC = "Wav2Vec2Processor"
_CHECKPOINT_FOR_DOC = "patrickvonplaten/unispeech-large-1500h-cv-timit"
_EXPECTED_OUTPUT_SHAPE = [1, 292, 1024]
_CTC_EXPECTED_OUTPUT = "'mister quilter is the apposl of the midle classes and weare glad to welcom his gosepl'"
_CTC_EXPECTED_LOSS = 17.17
_FEAT_EXTRACTOR_FOR_DOC = "Wav2Vec2FeatureExtractor"
_SEQ_CLASS_CHECKPOINT = "hf-internal-testing/tiny-random-unispeech"
_SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_0'"
_SEQ_CLASS_EXPECTED_LOSS = 0.66
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/unispeech-large-1500h-cv",
"microsoft/unispeech-large-multi-lingual-1500h-cv",
]
@dataclass
class UniSpeechForPreTrainingOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
projected_states: torch.FloatTensor = None
projected_quantized_states: torch.FloatTensor = None
codevector_perplexity: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
def _compute_mask_indices(
shape: Tuple[int, int],
mask_prob: float,
mask_length: int,
attention_mask: Optional[torch.LongTensor] = None,
min_masks: int = 0,
) -> np.ndarray:
batch_size, sequence_length = shape
if mask_length < 1:
raise ValueError("`mask_length` has to be bigger than 0.")
if mask_length > sequence_length:
raise ValueError(
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
f" and `sequence_length`: {sequence_length}`"
)
epsilon = np.random.rand(1).item()
def compute_num_masked_span(input_length):
num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
num_masked_span = max(num_masked_span, min_masks)
if num_masked_span * mask_length > sequence_length:
num_masked_span = sequence_length // mask_length
if input_length - (mask_length - 1) < num_masked_span:
num_masked_span = max(input_length - (mask_length - 1), 0)
return num_masked_span
input_lengths = (
attention_mask.sum(-1).detach().tolist()
if attention_mask is not None
else [sequence_length for _ in range(batch_size)]
)
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)
spec_aug_mask_idxs = []
max_num_masked_span = compute_num_masked_span(sequence_length)
if max_num_masked_span == 0:
return spec_aug_mask
for input_length in input_lengths:
num_masked_span = compute_num_masked_span(input_length)
spec_aug_mask_idx = np.random.choice(
np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
)
if len(spec_aug_mask_idx) == 0:
dummy_mask_idx = sequence_length - 1
else:
dummy_mask_idx = spec_aug_mask_idx[0]
spec_aug_mask_idx = np.concatenate(
[spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
)
spec_aug_mask_idxs.append(spec_aug_mask_idx)
spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
spec_aug_mask_idxs = np.broadcast_to(
spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
)
spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
offsets = np.arange(mask_length)[None, None, :]
offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
batch_size, max_num_masked_span * mask_length
)
spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
if spec_aug_mask_idxs.max() > sequence_length - 1:
spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
return spec_aug_mask
class UniSpeechNoLayerNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
class UniSpeechLayerNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.activation(hidden_states)
return hidden_states
class UniSpeechGroupNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.activation = ACT2FN[config.feat_extract_activation]
self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
class UniSpeechPositionalConvEmbedding(nn.Module):
def __init__(self, config):
super().__init__()
self.conv = nn.Conv1d(
config.hidden_size,
config.hidden_size,
kernel_size=config.num_conv_pos_embeddings,
padding=config.num_conv_pos_embeddings // 2,
groups=config.num_conv_pos_embedding_groups,
)
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
deepspeed.zero.register_external_parameter(self, self.conv.weight_v)
deepspeed.zero.register_external_parameter(self, self.conv.weight_g)
else:
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
self.padding = UniSpeechSamePadLayer(config.num_conv_pos_embeddings)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.conv(hidden_states)
hidden_states = self.padding(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states
class UniSpeechSamePadLayer(nn.Module):
def __init__(self, num_conv_pos_embeddings):
super().__init__()
self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
def forward(self, hidden_states):
if self.num_pad_remove > 0:
hidden_states = hidden_states[:, :, : -self.num_pad_remove]
return hidden_states
class UniSpeechFeatureEncoder(nn.Module):
def __init__(self, config):
super().__init__()
if config.feat_extract_norm == "group":
conv_layers = [UniSpeechGroupNormConvLayer(config, layer_id=0)] + [
UniSpeechNoLayerNormConvLayer(config, layer_id=i + 1)
for i in range(config.num_feat_extract_layers - 1)
]
elif config.feat_extract_norm == "layer":
conv_layers = [
UniSpeechLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)
]
else:
raise ValueError(
f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
)
self.conv_layers = nn.ModuleList(conv_layers)
self.gradient_checkpointing = False
self._requires_grad = True
def _freeze_parameters(self):
for param in self.parameters():
param.requires_grad = False
self._requires_grad = False
def forward(self, input_values):
hidden_states = input_values[:, None]
if self._requires_grad and self.training:
hidden_states.requires_grad = True
for conv_layer in self.conv_layers:
if self._requires_grad and self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(conv_layer),
hidden_states,
)
else:
hidden_states = conv_layer(hidden_states)
return hidden_states
class UniSpeechFeatureExtractor(UniSpeechFeatureEncoder):
def __init__(self, config):
super().__init__(config)
warnings.warn(
f"The class `{self.__class__.__name__}` has been depreciated "
"and will be removed in Transformers v5. "
f"Use `{self.__class__.__bases__[0].__name__}` instead.",
FutureWarning,
)
class UniSpeechFeatureProjection(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
self.dropout = nn.Dropout(config.feat_proj_dropout)
def forward(self, hidden_states):
norm_hidden_states = self.layer_norm(hidden_states)
hidden_states = self.projection(norm_hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states, norm_hidden_states
class UniSpeechAttention(nn.Module):
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states) * self.scaling
if is_cross_attention and past_key_value is not None:
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->UniSpeech
class UniSpeechFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.intermediate_dropout = nn.Dropout(config.activation_dropout)
self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.output_dropout = nn.Dropout(config.hidden_dropout)
def forward(self, hidden_states):
hidden_states = self.intermediate_dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.intermediate_dropout(hidden_states)
hidden_states = self.output_dense(hidden_states)
hidden_states = self.output_dropout(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayer with Wav2Vec2->UniSpeech
class UniSpeechEncoderLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = UniSpeechAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = UniSpeechFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
attn_residual = hidden_states
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states + self.feed_forward(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->UniSpeech
class UniSpeechEncoderLayerStableLayerNorm(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = UniSpeechAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = UniSpeechFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
attn_residual = hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Encoder with Wav2Vec2->UniSpeech
class UniSpeechEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = UniSpeechPositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList([UniSpeechEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens output 0
hidden_states[~attention_mask] = 0.0
# extend attention_mask
attention_mask = (1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)) * -10000.0
attention_mask = attention_mask.expand(
attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
)
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
if not skip_the_layer or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
if self.gradient_checkpointing and self.training:
# create gradient checkpointing function
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer),
hidden_states,
attention_mask,
)
else:
layer_outputs = layer(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderStableLayerNorm with Wav2Vec2->UniSpeech
class UniSpeechEncoderStableLayerNorm(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = UniSpeechPositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList(
[UniSpeechEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens are not attended to
hidden_states[~attention_mask] = 0
# extend attention_mask
attention_mask = (1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)) * -10000.0
attention_mask = attention_mask.expand(
attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
)
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.dropout(hidden_states)
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
if not skip_the_layer or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
# XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer),
hidden_states,
attention_mask,
)
else:
layer_outputs = layer(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class UniSpeechGumbelVectorQuantizer(nn.Module):
def __init__(self, config):
super().__init__()
self.num_groups = config.num_codevector_groups
self.num_vars = config.num_codevectors_per_group
if config.codevector_dim % self.num_groups != 0:
raise ValueError(
f"`config.codevector_dim {config.codevector_dim} must be divisible by `config.num_codevector_groups`"
f" {self.num_groups} for concatenation"
)
self.codevectors = nn.Parameter(
torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups)
)
self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars)
self.temperature = 2
@staticmethod
def _compute_perplexity(probs):
marginal_probs = probs.mean(dim=0)
perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum()
return perplexity
def forward(self, hidden_states):
batch_size, sequence_length, hidden_size = hidden_states.shape
hidden_states = self.weight_proj(hidden_states)
hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1)
if self.training:
codevector_probs = nn.functional.gumbel_softmax(
hidden_states.float(), tau=self.temperature, hard=True
).type_as(hidden_states)
codevector_soft_dist = torch.softmax(
hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1
)
perplexity = self._compute_perplexity(codevector_soft_dist)
else:
codevector_idx = hidden_states.argmax(dim=-1)
codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(
-1, codevector_idx.view(-1, 1), 1.0
)
codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1)
perplexity = self._compute_perplexity(codevector_probs)
codevector_probs = codevector_probs.view(batch_size * sequence_length, -1)
codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors
codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1)
codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1)
return codevectors, perplexity
class UniSpeechPreTrainedModel(PreTrainedModel):
config_class = UniSpeechConfig
base_model_prefix = "unispeech"
main_input_name = "input_values"
_keys_to_ignore_on_load_missing = [r"position_ids"]
supports_gradient_checkpointing = True
def _init_weights(self, module):
if isinstance(module, UniSpeechGumbelVectorQuantizer):
module.weight_proj.weight.data.normal_(mean=0.0, std=1)
module.weight_proj.bias.data.zero_()
nn.init.uniform_(module.codevectors)
elif isinstance(module, UniSpeechPositionalConvEmbedding):
nn.init.normal_(
module.conv.weight,
mean=0,
std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),
)
nn.init.constant_(module.conv.bias, 0)
elif isinstance(module, UniSpeechFeatureProjection):
k = math.sqrt(1 / module.projection.in_features)
nn.init.uniform_(module.projection.weight, a=-k, b=k)
nn.init.uniform_(module.projection.bias, a=-k, b=k)
elif isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Conv1d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
nn.init.uniform_(module.bias, a=-k, b=k)
def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
def _conv_out_length(input_length, kernel_size, stride):
return torch_int_div(input_length - kernel_size, stride) + 1
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
return input_lengths
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long)
batch_size = attention_mask.shape[0]
attention_mask = torch.zeros(
(batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
)
attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
return attention_mask
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (UniSpeechEncoder, UniSpeechEncoderStableLayerNorm, UniSpeechFeatureEncoder)):
module.gradient_checkpointing = value
UNISPEECH_START_DOCSTRING = r"""
UniSpeech was proposed in [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled
Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei,
Michael Zeng, Xuedong Huang.
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving etc.).
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`UniSpeechConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UNISPEECH_INPUTS_DOCSTRING = r"""
Args:
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file
into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install
soundfile*). To prepare the array into *input_values*, the [`UniSpeechProcessor`] should be used for
padding and conversion into a tensor of type *torch.FloatTensor*. See [`UniSpeechProcessor.__call__`] for
details.
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
<Tip warning={true}>
`attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==
True`. For all models whose processor has `config.return_attention_mask == False`, `attention_mask` should
**not** be passed to avoid degraded performance when doing batched inference. For such models
`input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these
models also yield slightly different results depending on whether `input_values` is padded or not.
</Tip>
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare UniSpeech Model transformer outputting raw hidden-states without any specific head on top.",
UNISPEECH_START_DOCSTRING,
)
class UniSpeechModel(UniSpeechPreTrainedModel):
def __init__(self, config: UniSpeechConfig):
super().__init__(config)
self.config = config
self.feature_extractor = UniSpeechFeatureEncoder(config)
self.feature_projection = UniSpeechFeatureProjection(config)
if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())
if config.do_stable_layer_norm:
self.encoder = UniSpeechEncoderStableLayerNorm(config)
else:
self.encoder = UniSpeechEncoder(config)
self.post_init()
def _mask_hidden_states(
self,
hidden_states: torch.FloatTensor,
mask_time_indices: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
if not getattr(self.config, "apply_spec_augment", True):
return hidden_states
batch_size, sequence_length, hidden_size = hidden_states.size()
if mask_time_indices is not None:
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
elif self.config.mask_time_prob > 0 and self.training:
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.config.mask_time_prob,
mask_length=self.config.mask_time_length,
attention_mask=attention_mask,
min_masks=self.config.mask_time_min_masks,
)
mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
if self.config.mask_feature_prob > 0 and self.training:
mask_feature_indices = _compute_mask_indices(
(batch_size, hidden_size),
mask_prob=self.config.mask_feature_prob,
mask_length=self.config.mask_feature_length,
min_masks=self.config.mask_feature_min_masks,
)
mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
hidden_states[mask_feature_indices] = 0
return hidden_states
@add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_PROCESSOR_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=Wav2Vec2BaseModelOutput,
config_class=_CONFIG_FOR_DOC,
modality="audio",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
mask_time_indices: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, Wav2Vec2BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
extract_features = self.feature_extractor(input_values)
extract_features = extract_features.transpose(1, 2)
if attention_mask is not None:
attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask)
hidden_states, extract_features = self.feature_projection(extract_features)
hidden_states = self._mask_hidden_states(
hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
if not return_dict:
return (hidden_states, extract_features) + encoder_outputs[1:]
return Wav2Vec2BaseModelOutput(
last_hidden_state=hidden_states,
extract_features=extract_features,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"""UniSpeech Model with a vector-quantization module and ctc loss for pre-training.""", UNISPEECH_START_DOCSTRING
)
class UniSpeechForPreTraining(UniSpeechPreTrainedModel):
def __init__(self, config: UniSpeechConfig):
super().__init__(config)
self.unispeech = UniSpeechModel(config)
self.dropout_features = nn.Dropout(config.feat_quantizer_dropout)
self.quantizer = UniSpeechGumbelVectorQuantizer(config)
self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim)
self.project_hid = nn.Linear(config.proj_codevector_dim, config.hidden_size)
self.ctc_proj = nn.Linear(config.hidden_size, config.num_ctc_classes)
self.dropout = nn.Dropout(config.final_dropout)
self.post_init()
def set_gumbel_temperature(self, temperature: int):
self.quantizer.temperature = temperature
def freeze_feature_extractor(self):
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
self.unispeech.feature_extractor._freeze_parameters()
@staticmethod
def compute_contrastive_logits(
target_features: torch.FloatTensor,
negative_features: torch.FloatTensor,
predicted_features: torch.FloatTensor,
temperature: int = 1,
):
target_features = torch.cat([target_features, negative_features], dim=0)
logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1)
logits = logits.type_as(target_features)
logits = logits / temperature
return logits
@add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=UniSpeechForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, UniSpeechForPreTrainingOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.unispeech(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
transformer_features = outputs[0]
extract_features = self.dropout_features(outputs[1])
quantized_features, codevector_perplexity = self.quantizer(extract_features)
quantized_features = self.project_q(quantized_features)
quantized_features = self.project_hid(quantized_features)
prob_replace_matrix = torch.empty(transformer_features.size(0), transformer_features.size(1)).fill_(
self.config.replace_prob
)
prob_replace_matrix = prob_replace_matrix.transpose(0, 1)
sampled_replace_matrix = torch.bernoulli(prob_replace_matrix).bool().to(transformer_features.device)
sampled_replace_matrix = sampled_replace_matrix.transpose(0, 1)
sampled_replace_matrix = sampled_replace_matrix.unsqueeze(-1)
logits = transformer_features.masked_fill(sampled_replace_matrix, 0.0) + (
quantized_features.masked_fill(~sampled_replace_matrix, 0.0)
)
logits = self.dropout(logits)
logits = self.ctc_proj(logits)
loss = None
if not return_dict:
if loss is not None:
return (loss, transformer_features, quantized_features, codevector_perplexity) + outputs[2:]
return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:]
return UniSpeechForPreTrainingOutput(
loss=loss,
projected_states=transformer_features,
projected_quantized_states=quantized_features,
codevector_perplexity=codevector_perplexity,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""UniSpeech Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
UNISPEECH_START_DOCSTRING,
)
class UniSpeechForCTC(UniSpeechPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.unispeech = UniSpeechModel(config)
self.dropout = nn.Dropout(config.final_dropout)
if config.vocab_size is None:
raise ValueError(
f"You are trying to instantiate {self.__class__} with a configuration that "
"does not define the vocabulary size of the language model head. Please "
"instantiate the model as follows: `UniSpeechForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
"or define `vocab_size` of your model's configuration."
)
output_hidden_size = (
config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
)
self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_extractor(self):
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
self.unispeech.feature_extractor._freeze_parameters()
@add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_PROCESSOR_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=CausalLMOutput,
config_class=_CONFIG_FOR_DOC,
expected_output=_CTC_EXPECTED_OUTPUT,
expected_loss=_CTC_EXPECTED_LOSS,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[Tuple, CausalLMOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.unispeech(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
if labels.max() >= self.config.vocab_size:
raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
# retrieve loss input_lengths from attention_mask
attention_mask = (
attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
)
input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
# assuming that padded tokens are filled with -100
# when not being attended to
labels_mask = labels >= 0
target_lengths = labels_mask.sum(-1)
flattened_targets = labels.masked_select(labels_mask)
# ctc_loss doesn't support fp16
log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
with torch.backends.cudnn.flags(enabled=False):
loss = nn.functional.ctc_loss(
log_probs,
flattened_targets,
input_lengths,
target_lengths,
blank=self.config.pad_token_id,
reduction=self.config.ctc_loss_reduction,
zero_infinity=self.config.ctc_zero_infinity,
)
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutput(
loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
)
@add_start_docstrings(
"""
UniSpeech Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like
SUPERB Keyword Spotting.
""",
UNISPEECH_START_DOCSTRING,
)
class UniSpeechForSequenceClassification(UniSpeechPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if hasattr(config, "add_adapter") and config.add_adapter:
raise ValueError(
"Sequence classification does not support the use of UniSpeech adapters (config.add_adapter=True)"
)
self.unispeech = UniSpeechModel(config)
num_layers = config.num_hidden_layers + 1
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
self.post_init()
def freeze_feature_extractor(self):
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
self.unispeech.feature_extractor._freeze_parameters()
def freeze_base_model(self):
for param in self.unispeech.parameters():
param.requires_grad = False
@add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_SEQ_CLASS_CHECKPOINT,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
modality="audio",
expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[Tuple, SequenceClassifierOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.unispeech(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
hidden_states = self.projector(hidden_states)
if attention_mask is None:
pooled_output = hidden_states.mean(dim=1)
else:
padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
hidden_states[~padding_mask] = 0.0
pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| true | true |
f72cde285309d6d399c8446c716859a2af197049 | 294 | py | Python | product/serializers.py | RKatana/inventory-app-django | a31614237daa5a2d62e30e51b9e573968ef3f0c0 | [
"Apache-2.0"
] | null | null | null | product/serializers.py | RKatana/inventory-app-django | a31614237daa5a2d62e30e51b9e573968ef3f0c0 | [
"Apache-2.0"
] | null | null | null | product/serializers.py | RKatana/inventory-app-django | a31614237daa5a2d62e30e51b9e573968ef3f0c0 | [
"Apache-2.0"
] | null | null | null | from rest_framework import serializers
from product.models import Product
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = '__all__'
class ProductListSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = ('store',)
| 21 | 57 | 0.785714 | from rest_framework import serializers
from product.models import Product
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = '__all__'
class ProductListSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = ('store',)
| true | true |
f72cde9e2a18d5047df425b607f0e92be3a3846e | 1,685 | py | Python | example/app/odnoklassniki.py | NorthIsUp/django-social-auth | 9afedc8ea777b32611d43c1c367babe2e3b18a90 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 863 | 2015-01-01T00:42:07.000Z | 2022-03-30T02:47:18.000Z | example/app/odnoklassniki.py | JohnieLee/django-social-auth | de36265a4799c435751d9af42ddf6fe7e7a90e0a | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 101 | 2015-01-08T00:28:16.000Z | 2022-03-07T03:11:19.000Z | example/app/odnoklassniki.py | JohnieLee/django-social-auth | de36265a4799c435751d9af42ddf6fe7e7a90e0a | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 256 | 2015-01-02T16:55:36.000Z | 2022-03-04T11:10:47.000Z | # -*- coding:utf-8 -*-
from django.conf import settings
from django.contrib.auth import BACKEND_SESSION_KEY, logout
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponse
from django.shortcuts import redirect
from django.views.generic.base import TemplateView
from social_auth.views import complete
SANDBOX_URL = 'http://api-sandbox.odnoklassniki.ru:8088/sandbox/protected/application/launch.do?appId={0:s}&userId=0'
class OdnoklassnikiInfo(TemplateView):
template_name = 'odnoklassniki_info.html'
def get(self, *args, **kwargs):
if hasattr(settings, 'ODNOKLASSNIKI_APP_ID'):
return redirect(SANDBOX_URL.format(settings.ODNOKLASSNIKI_APP_ID))
else:
return super(OdnoklassnikiInfo, self).get(*args, **kwargs)
ok_app_info = OdnoklassnikiInfo.as_view()
class OdnoklassnikiApp(TemplateView):
template_name = 'odnoklassniki.html'
def get(self, request, *args, **kwargs):
result = None
if request.GET.get('apiconnection', None):
if request.user.is_authenticated() and 'OdnoklassnikiAppBackend' not in request.session.get(BACKEND_SESSION_KEY, ''):
logout(request)
result = complete(request, 'odnoklassnikiapp')
if isinstance(result, HttpResponse):
return result
else:
if not request.user.is_authenticated() or 'OdnoklassnikiAppBackend' not in request.session.get(BACKEND_SESSION_KEY, ''):
request.user = AnonymousUser()
context = self.get_context_data(params=kwargs)
return self.render_to_response(context)
ok_app = OdnoklassnikiApp.as_view() | 42.125 | 132 | 0.706231 |
from django.conf import settings
from django.contrib.auth import BACKEND_SESSION_KEY, logout
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponse
from django.shortcuts import redirect
from django.views.generic.base import TemplateView
from social_auth.views import complete
SANDBOX_URL = 'http://api-sandbox.odnoklassniki.ru:8088/sandbox/protected/application/launch.do?appId={0:s}&userId=0'
class OdnoklassnikiInfo(TemplateView):
template_name = 'odnoklassniki_info.html'
def get(self, *args, **kwargs):
if hasattr(settings, 'ODNOKLASSNIKI_APP_ID'):
return redirect(SANDBOX_URL.format(settings.ODNOKLASSNIKI_APP_ID))
else:
return super(OdnoklassnikiInfo, self).get(*args, **kwargs)
ok_app_info = OdnoklassnikiInfo.as_view()
class OdnoklassnikiApp(TemplateView):
template_name = 'odnoklassniki.html'
def get(self, request, *args, **kwargs):
result = None
if request.GET.get('apiconnection', None):
if request.user.is_authenticated() and 'OdnoklassnikiAppBackend' not in request.session.get(BACKEND_SESSION_KEY, ''):
logout(request)
result = complete(request, 'odnoklassnikiapp')
if isinstance(result, HttpResponse):
return result
else:
if not request.user.is_authenticated() or 'OdnoklassnikiAppBackend' not in request.session.get(BACKEND_SESSION_KEY, ''):
request.user = AnonymousUser()
context = self.get_context_data(params=kwargs)
return self.render_to_response(context)
ok_app = OdnoklassnikiApp.as_view() | true | true |
f72cdf8621755d98f90f575e4d4b84f0878e736f | 973 | py | Python | setup.py | tjsego/simservice | 1ca1df4c6644f22217645575719cfa72f5b9f895 | [
"MIT"
] | 1 | 2021-08-08T03:15:47.000Z | 2021-08-08T03:15:47.000Z | setup.py | tjsego/simservice | 1ca1df4c6644f22217645575719cfa72f5b9f895 | [
"MIT"
] | null | null | null | setup.py | tjsego/simservice | 1ca1df4c6644f22217645575719cfa72f5b9f895 | [
"MIT"
] | null | null | null | import os
from setuptools import setup
__version__ = open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'VERSION.txt')).readline().strip()
setup(
name='simservice',
version=__version__,
description='A library for building simulation services in Python',
url='https://github.com/tjsego/simservice',
author='T.J. Sego',
author_email='tjsego@iu.edu',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
],
packages=['simservice'],
package_dir={'simservice': '.'},
python_requires='>=3.6',
package_data={'simservice': ['LICENSE', 'VERSION.txt']}
)
| 29.484848 | 110 | 0.626927 | import os
from setuptools import setup
__version__ = open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'VERSION.txt')).readline().strip()
setup(
name='simservice',
version=__version__,
description='A library for building simulation services in Python',
url='https://github.com/tjsego/simservice',
author='T.J. Sego',
author_email='tjsego@iu.edu',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
],
packages=['simservice'],
package_dir={'simservice': '.'},
python_requires='>=3.6',
package_data={'simservice': ['LICENSE', 'VERSION.txt']}
)
| true | true |
f72cdfc48dda33940d0a57929d4878e17ec7d72c | 1,102 | py | Python | exercicios-com-listas/exercicio12.py | diegolinkk/exercicios-python-brasil | 3bf7bdf0e98cdd06c115eedae4fa01e0c25fdba5 | [
"MIT"
] | null | null | null | exercicios-com-listas/exercicio12.py | diegolinkk/exercicios-python-brasil | 3bf7bdf0e98cdd06c115eedae4fa01e0c25fdba5 | [
"MIT"
] | null | null | null | exercicios-com-listas/exercicio12.py | diegolinkk/exercicios-python-brasil | 3bf7bdf0e98cdd06c115eedae4fa01e0c25fdba5 | [
"MIT"
] | null | null | null | #Foram anotadas as idades e infos de 30 alunos. Faça um Programa que determine quantos alunos com mais de 13 anos
# possuem altura inferior à média de altura desses alunos.
from random import random,randint
alunos = []
for _ in range(30):
altura = 0 #zera porque senão altura não entra no laço novamente
idade = randint(1,90)
#não permite infos abaixo dos 1.40
while altura < 1.40:
altura = random() * 2
aluno = []
aluno.append(round(altura,2))
aluno.append(idade)
alunos.append(aluno)
media_alturas = 0
for altura,idade in alunos:
media_alturas += altura
media_alturas = round((media_alturas / len(alunos)),2)
print(media_alturas)
alunos_13_abaixo_da_media = 0
for altura,idade in alunos:
if idade <= 13 and altura < media_alturas:
alunos_13_abaixo_da_media +=1
print(f"Altura: {altura} - idade {idade} *") #sinaliza qual aluno da lista está na condição com *
else:
print(f"Altura: {altura} - idade {idade}")
print(f"Alunos com 13 ou menos e altura abaixo da média: {alunos_13_abaixo_da_media}") | 25.627907 | 114 | 0.686025 |
from random import random,randint
alunos = []
for _ in range(30):
altura = 0
idade = randint(1,90)
while altura < 1.40:
altura = random() * 2
aluno = []
aluno.append(round(altura,2))
aluno.append(idade)
alunos.append(aluno)
media_alturas = 0
for altura,idade in alunos:
media_alturas += altura
media_alturas = round((media_alturas / len(alunos)),2)
print(media_alturas)
alunos_13_abaixo_da_media = 0
for altura,idade in alunos:
if idade <= 13 and altura < media_alturas:
alunos_13_abaixo_da_media +=1
print(f"Altura: {altura} - idade {idade} *")
else:
print(f"Altura: {altura} - idade {idade}")
print(f"Alunos com 13 ou menos e altura abaixo da média: {alunos_13_abaixo_da_media}") | true | true |
f72ce0eb629e9199e4a7dbe67896f97abd42dbec | 4,636 | py | Python | python/src/di_bq.py | cedadev/ceda-di | 5d7e21f28ead02d226c19f2831bc261897300b0f | [
"BSD-3-Clause-Clear"
] | 5 | 2015-04-17T08:52:34.000Z | 2020-07-02T13:32:41.000Z | python/src/di_bq.py | cedadev/ceda-di | 5d7e21f28ead02d226c19f2831bc261897300b0f | [
"BSD-3-Clause-Clear"
] | 14 | 2015-01-07T10:30:34.000Z | 2020-08-13T11:04:00.000Z | python/src/di_bq.py | cedadev/ceda-di | 5d7e21f28ead02d226c19f2831bc261897300b0f | [
"BSD-3-Clause-Clear"
] | 2 | 2016-01-27T11:31:34.000Z | 2017-05-18T13:37:18.000Z | #!/usr/bin/env python
"""
`di_bq.py` is a wrapper around the standard ceda-di tools to help parallelise
the processing of files using a batch queue.
This tool has two main functions:
* Generate lists of files to be processed in individual jobs by the queue
* Dispatch archive processing jobs to the batch queue
Usage:
di_bq.py (--help | --version)
di_bq.py gen-list <input-dir> <file-list-output-dir> [--num=<num>]
di_bq.py submit-jobs <dir-containing-file-lists> [--delete-after]
di_bq.py process <individual-file-list> [--delete-after]
Options:
--help Show this screen.
--version Show version.
--num=<num> Number of paths to store in each file [default: 5000].
--delete-after Delete input files after job submission.
"""
import json
import os
from docopt import docopt
from ceda_di import __version__ # Grab version from package __init__.py
from ceda_di.extract import Extract
import ceda_di.util.cmd as cmd
def dump_to_json(output_directory, seq, file_list):
"""
Write the object specified in "file_list" to a JSON file list.
:param str output_directory: The directory to write all of the files into.
:param int seq: The sequence number of the file.
:param list file_list: The list of files to serialise to JSON.
"""
out_name = "{seq}.json".format(seq=seq)
out_path = os.path.join(output_directory, out_name)
with open(out_path, "w") as out_f:
json.dump(file_list, out_f)
def construct_bsub_command(path, params={}):
# Mapping of "bsub" command parameters to what they mean
bsub_param = {
"stdout": "-o",
"stderr": "-e",
"num-cores": "-n",
"queue": "-q",
"walltime": "-W",
"jobname": "-J"
}
command = "bsub"
for k, v in params.items():
if k in bsub_param:
opt = " {option} {value}".format(option=bsub_param[k], value=v)
command += opt
command += "<<<"
# Multi-line string assignment here
srcdir = os.getcwd()
cedadir = "/".join(srcdir.split("/")[:-1]) # Get dir one level up
command += (
"\'" +
"cd {cedadir}\n".format(cedadir=cedadir) +
"source bin/activate\n" +
"cd {srcdir}\n".format(srcdir=srcdir) +
"python {script} process {path}".format(script=__file__, path=path) +
"\'"
)
return command
def bsub(path, config):
"""
Submit job to batch queue for processing.
"""
out = config["output-path"]
defaults = {
"stdout": os.path.join(out, "%J.o"),
"stderr": os.path.join(out, "%J.e"),
"num-cores": int(config["num-cores"]) + 1, # 1 extra core for main thread
"queue": config["batch-queue"],
"jobname": "ceda-di-{index}".format(index=config["es-index"])
}
bsub_script = construct_bsub_command(path, defaults)
os.system(bsub_script)
def main():
# Get arguments from command line
args = cmd.sanitise_args(docopt(__doc__, version=__version__))
if 'config' not in args or not args["config"]:
direc = os.path.dirname(__file__)
conf_path = os.path.join(direc, "../config/ceda_di.json")
args["config"] = conf_path
config = cmd.get_settings(args["config"], args)
if args["gen-list"]:
# Set up variables from command-line parameters
path = args["input-dir"]
output_directory = args["file-list-output-dir"]
max_files = int(args["num"])
seq = 0
# Begin sweeping for files
flist = []
for root, dirs, files in os.walk(path, followlinks=True):
for f in files:
fp = os.path.join(root, f)
flist.append(fp)
# Dump file paths to JSON document
if len(flist) >= max_files:
dump_to_json(output_directory, seq, flist)
seq += 1 # Increment file sequence number
flist = []
# Dump anything left over to JSON
dump_to_json(output_directory, seq, flist)
elif args["submit-jobs"]:
input_directory = args["dir-containing-file-lists"]
for root, dirs, files in os.walk(input_directory):
for f in files:
fp = os.path.join(root, f)
# Submit job to batch queue
bsub(fp, config)
elif args["process"]:
file_list = args["individual-file-list"]
with open(file_list, "r") as f:
files = json.load(f)
extract = Extract(config, files)
extract.run()
if __name__ == "__main__":
main()
| 30.906667 | 82 | 0.596204 |
import json
import os
from docopt import docopt
from ceda_di import __version__
from ceda_di.extract import Extract
import ceda_di.util.cmd as cmd
def dump_to_json(output_directory, seq, file_list):
out_name = "{seq}.json".format(seq=seq)
out_path = os.path.join(output_directory, out_name)
with open(out_path, "w") as out_f:
json.dump(file_list, out_f)
def construct_bsub_command(path, params={}):
bsub_param = {
"stdout": "-o",
"stderr": "-e",
"num-cores": "-n",
"queue": "-q",
"walltime": "-W",
"jobname": "-J"
}
command = "bsub"
for k, v in params.items():
if k in bsub_param:
opt = " {option} {value}".format(option=bsub_param[k], value=v)
command += opt
command += "<<<"
srcdir = os.getcwd()
cedadir = "/".join(srcdir.split("/")[:-1])
command += (
"\'" +
"cd {cedadir}\n".format(cedadir=cedadir) +
"source bin/activate\n" +
"cd {srcdir}\n".format(srcdir=srcdir) +
"python {script} process {path}".format(script=__file__, path=path) +
"\'"
)
return command
def bsub(path, config):
out = config["output-path"]
defaults = {
"stdout": os.path.join(out, "%J.o"),
"stderr": os.path.join(out, "%J.e"),
"num-cores": int(config["num-cores"]) + 1,
"queue": config["batch-queue"],
"jobname": "ceda-di-{index}".format(index=config["es-index"])
}
bsub_script = construct_bsub_command(path, defaults)
os.system(bsub_script)
def main():
args = cmd.sanitise_args(docopt(__doc__, version=__version__))
if 'config' not in args or not args["config"]:
direc = os.path.dirname(__file__)
conf_path = os.path.join(direc, "../config/ceda_di.json")
args["config"] = conf_path
config = cmd.get_settings(args["config"], args)
if args["gen-list"]:
path = args["input-dir"]
output_directory = args["file-list-output-dir"]
max_files = int(args["num"])
seq = 0
flist = []
for root, dirs, files in os.walk(path, followlinks=True):
for f in files:
fp = os.path.join(root, f)
flist.append(fp)
if len(flist) >= max_files:
dump_to_json(output_directory, seq, flist)
seq += 1
flist = []
dump_to_json(output_directory, seq, flist)
elif args["submit-jobs"]:
input_directory = args["dir-containing-file-lists"]
for root, dirs, files in os.walk(input_directory):
for f in files:
fp = os.path.join(root, f)
bsub(fp, config)
elif args["process"]:
file_list = args["individual-file-list"]
with open(file_list, "r") as f:
files = json.load(f)
extract = Extract(config, files)
extract.run()
if __name__ == "__main__":
main()
| true | true |
f72ce0efe3d0f872e2ed83a0a3cbfa7d5c6e9f5e | 9,361 | py | Python | layers/box_utils.py | sashuIya/ssd.pytorch | fe7d8722414fef4cce32f67422c896ef0c45d6bc | [
"MIT"
] | 1 | 2019-04-03T16:48:43.000Z | 2019-04-03T16:48:43.000Z | layers/box_utils.py | sashuIya/ssd.pytorch | fe7d8722414fef4cce32f67422c896ef0c45d6bc | [
"MIT"
] | null | null | null | layers/box_utils.py | sashuIya/ssd.pytorch | fe7d8722414fef4cce32f67422c896ef0c45d6bc | [
"MIT"
] | null | null | null | import torch
def point_form(boxes):
""" Convert prior_boxes to (xmin, ymin, xmax, ymax)
representation for comparison to point form ground truth data.
Args:
boxes: (tensor) center-size default boxes from priorbox layers.
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin
boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax
def center_size(boxes):
""" Convert prior_boxes to (cx, cy, w, h)
representation for comparison to center-size form ground truth data.
Args:
boxes: (tensor) point_form boxes
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
return torch.cat((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy
boxes[:, 2:] - boxes[:, :2], 1) # w, h
def intersect(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
"""
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1]
def jaccard(box_a, box_b):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes. Here we operate on
ground truth boxes and default boxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
Return:
jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
"""
inter = intersect(box_a, box_b)
area_a = ((box_a[:, 2]-box_a[:, 0]) *
(box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]
area_b = ((box_b[:, 2]-box_b[:, 0]) *
(box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]
union = area_a + area_b - inter
return inter / union # [A,B]
def match(threshold, truths, priors, variances, labels, loc_t, conf_t, idx):
"""Match each prior box with the ground truth box of the highest jaccard
overlap, encode the bounding boxes, then return the matched indices
corresponding to both confidence and location preds.
Args:
threshold: (float) The overlap threshold used when mathing boxes.
truths: (tensor) Ground truth boxes, Shape: [num_obj, num_priors].
priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].
variances: (tensor) Variances corresponding to each prior coord,
Shape: [num_priors, 4].
labels: (tensor) All the class labels for the image, Shape: [num_obj].
loc_t: (tensor) Tensor to be filled w/ endcoded location targets.
conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds.
idx: (int) current batch index
Return:
The matched indices corresponding to 1)location and 2)confidence preds.
"""
# jaccard index
overlaps = jaccard(
truths,
point_form(priors)
)
# (Bipartite Matching)
# [1,num_objects] best prior for each ground truth
best_prior_overlap, best_prior_idx = overlaps.max(1)
# [1,num_priors] best ground truth for each prior
best_truth_overlap, best_truth_idx = overlaps.max(0)
best_truth_idx.squeeze_(0)
best_truth_overlap.squeeze_(0)
best_prior_idx.squeeze_(1)
best_prior_overlap.squeeze_(1)
best_truth_overlap.index_fill_(0, best_prior_idx, 2) # ensure best prior
# TODO refactor: index best_prior_idx with long tensor
# ensure every gt matches with its prior of max overlap
for j in range(best_prior_idx.size(0)):
best_truth_idx[best_prior_idx[j]] = j
matches = truths[best_truth_idx] # Shape: [num_priors,4]
conf = labels[best_truth_idx] + 1 # Shape: [num_priors]
conf[best_truth_overlap < threshold] = 0 # label as background
loc = encode(matches, priors, variances)
loc_t[idx] = loc # [num_priors,4] encoded offsets to learn
conf_t[idx] = conf # [num_priors] top class label for each prior
def encode(matched, priors, variances):
"""Encode the variances from the priorbox layers into the ground truth boxes
we have matched (based on jaccard overlap) with the prior boxes.
Args:
matched: (tensor) Coords of ground truth for each prior in point-form
Shape: [num_priors, 4].
priors: (tensor) Prior boxes in center-offset form
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
encoded boxes (tensor), Shape: [num_priors, 4]
"""
# dist b/t match center and prior's center
g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]
# encode variance
g_cxcy /= (variances[0] * priors[:, 2:])
# match wh / prior wh
g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
g_wh = torch.log(g_wh) / variances[1]
# return target for smooth_l1_loss
return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
# Adapted from https://github.com/Hakuyume/chainer-ssd
def decode(loc, priors, variances):
"""Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
loc (tensor): location predictions for loc layers,
Shape: [num_priors,4]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded bounding box predictions
"""
boxes = torch.cat((
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
def log_sum_exp(x):
"""Utility function for computing log_sum_exp while determining
This will be used to determine unaveraged confidence loss across
all examples in a batch.
Args:
x (Variable(tensor)): conf_preds from conf layers
"""
x_max = x.data.max()
return torch.log(torch.sum(torch.exp(x-x_max), 1)) + x_max
# Original author: Francisco Massa:
# https://github.com/fmassa/object-detection.torch
# Ported to PyTorch by Max deGroot (02/01/2017)
def nms(boxes, scores, overlap=0.5, top_k=200):
"""Apply non-maximum suppression at test time to avoid detecting too many
overlapping bounding boxes for a given object.
Args:
boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
scores: (tensor) The class predscores for the img, Shape:[num_priors].
overlap: (float) The overlap thresh for suppressing unnecessary boxes.
top_k: (int) The Maximum number of box preds to consider.
Return:
The indices of the kept boxes with respect to num_priors.
"""
keep = scores.new(scores.size(0)).zero_().long()
if boxes.numel() == 0:
return keep
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = torch.mul(x2 - x1, y2 - y1)
v, idx = scores.sort(0) # sort in ascending order
# I = I[v >= 0.01]
idx = idx[-top_k:] # indices of the top-k largest vals
xx1 = boxes.new()
yy1 = boxes.new()
xx2 = boxes.new()
yy2 = boxes.new()
w = boxes.new()
h = boxes.new()
# keep = torch.Tensor()
count = 0
while idx.numel() > 0:
i = idx[-1] # index of current largest val
# keep.append(i)
keep[count] = i
count += 1
if idx.size(0) == 1:
break
idx = idx[:-1] # remove kept element from view
# load bboxes of next highest vals
torch.index_select(x1, 0, idx, out=xx1)
torch.index_select(y1, 0, idx, out=yy1)
torch.index_select(x2, 0, idx, out=xx2)
torch.index_select(y2, 0, idx, out=yy2)
# store element-wise max with next highest score
xx1 = torch.clamp(xx1, min=x1[i])
yy1 = torch.clamp(yy1, min=y1[i])
xx2 = torch.clamp(xx2, max=x2[i])
yy2 = torch.clamp(yy2, max=y2[i])
w.resize_as_(xx2)
h.resize_as_(yy2)
w = xx2 - xx1
h = yy2 - yy1
# check sizes of xx1 and xx2.. after each iteration
w = torch.clamp(w, min=0.0)
h = torch.clamp(h, min=0.0)
inter = w*h
# IoU = i / (area(a) + area(b) - i)
rem_areas = torch.index_select(area, 0, idx) # load remaining areas)
union = (rem_areas - inter) + area[i]
IoU = inter/union # store result in iou
# keep only elements with an IoU <= overlap
idx = idx[IoU.le(overlap)]
return keep, count
| 39.167364 | 80 | 0.609443 | import torch
def point_form(boxes):
return torch.cat((boxes[:, :2] - boxes[:, 2:]/2,
boxes[:, :2] + boxes[:, 2:]/2), 1)
def center_size(boxes):
return torch.cat((boxes[:, 2:] + boxes[:, :2])/2,
boxes[:, 2:] - boxes[:, :2], 1)
def intersect(box_a, box_b):
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1]
def jaccard(box_a, box_b):
inter = intersect(box_a, box_b)
area_a = ((box_a[:, 2]-box_a[:, 0]) *
(box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter)
area_b = ((box_b[:, 2]-box_b[:, 0]) *
(box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter)
union = area_a + area_b - inter
return inter / union
def match(threshold, truths, priors, variances, labels, loc_t, conf_t, idx):
overlaps = jaccard(
truths,
point_form(priors)
)
best_prior_overlap, best_prior_idx = overlaps.max(1)
best_truth_overlap, best_truth_idx = overlaps.max(0)
best_truth_idx.squeeze_(0)
best_truth_overlap.squeeze_(0)
best_prior_idx.squeeze_(1)
best_prior_overlap.squeeze_(1)
best_truth_overlap.index_fill_(0, best_prior_idx, 2)
for j in range(best_prior_idx.size(0)):
best_truth_idx[best_prior_idx[j]] = j
matches = truths[best_truth_idx]
conf = labels[best_truth_idx] + 1
conf[best_truth_overlap < threshold] = 0
loc = encode(matches, priors, variances)
loc_t[idx] = loc
conf_t[idx] = conf
def encode(matched, priors, variances):
g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]
# encode variance
g_cxcy /= (variances[0] * priors[:, 2:])
# match wh / prior wh
g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
g_wh = torch.log(g_wh) / variances[1]
# return target for smooth_l1_loss
return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
# Adapted from https://github.com/Hakuyume/chainer-ssd
def decode(loc, priors, variances):
boxes = torch.cat((
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
def log_sum_exp(x):
x_max = x.data.max()
return torch.log(torch.sum(torch.exp(x-x_max), 1)) + x_max
# Original author: Francisco Massa:
# https://github.com/fmassa/object-detection.torch
# Ported to PyTorch by Max deGroot (02/01/2017)
def nms(boxes, scores, overlap=0.5, top_k=200):
keep = scores.new(scores.size(0)).zero_().long()
if boxes.numel() == 0:
return keep
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = torch.mul(x2 - x1, y2 - y1)
v, idx = scores.sort(0) # sort in ascending order
# I = I[v >= 0.01]
idx = idx[-top_k:] # indices of the top-k largest vals
xx1 = boxes.new()
yy1 = boxes.new()
xx2 = boxes.new()
yy2 = boxes.new()
w = boxes.new()
h = boxes.new()
# keep = torch.Tensor()
count = 0
while idx.numel() > 0:
i = idx[-1] # index of current largest val
# keep.append(i)
keep[count] = i
count += 1
if idx.size(0) == 1:
break
idx = idx[:-1] # remove kept element from view
# load bboxes of next highest vals
torch.index_select(x1, 0, idx, out=xx1)
torch.index_select(y1, 0, idx, out=yy1)
torch.index_select(x2, 0, idx, out=xx2)
torch.index_select(y2, 0, idx, out=yy2)
# store element-wise max with next highest score
xx1 = torch.clamp(xx1, min=x1[i])
yy1 = torch.clamp(yy1, min=y1[i])
xx2 = torch.clamp(xx2, max=x2[i])
yy2 = torch.clamp(yy2, max=y2[i])
w.resize_as_(xx2)
h.resize_as_(yy2)
w = xx2 - xx1
h = yy2 - yy1
# check sizes of xx1 and xx2.. after each iteration
w = torch.clamp(w, min=0.0)
h = torch.clamp(h, min=0.0)
inter = w*h
# IoU = i / (area(a) + area(b) - i)
rem_areas = torch.index_select(area, 0, idx) # load remaining areas)
union = (rem_areas - inter) + area[i]
IoU = inter/union # store result in iou
# keep only elements with an IoU <= overlap
idx = idx[IoU.le(overlap)]
return keep, count
| true | true |
f72ce180f1675a5345ae733b6da480edb5ada453 | 1,749 | py | Python | test_package/conanfile.py | Twon/units | 7f64e55d044c8a8d9a5c6d4e4f55167409910749 | [
"MIT"
] | null | null | null | test_package/conanfile.py | Twon/units | 7f64e55d044c8a8d9a5c6d4e4f55167409910749 | [
"MIT"
] | null | null | null | test_package/conanfile.py | Twon/units | 7f64e55d044c8a8d9a5c6d4e4f55167409910749 | [
"MIT"
] | null | null | null | # The MIT License (MIT)
#
# Copyright (c) 2018 Mateusz Pusz
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from conans import ConanFile, tools, RunEnvironment
from conan.tools.cmake import CMakeToolchain, CMake, CMakeDeps
import os
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
def generate(self):
tc = CMakeToolchain(self, generator=os.getenv("CONAN_CMAKE_GENERATOR"))
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self.settings):
self.run("test_package", run_environment=True)
| 39.75 | 80 | 0.732419 |
from conans import ConanFile, tools, RunEnvironment
from conan.tools.cmake import CMakeToolchain, CMake, CMakeDeps
import os
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
def generate(self):
tc = CMakeToolchain(self, generator=os.getenv("CONAN_CMAKE_GENERATOR"))
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self.settings):
self.run("test_package", run_environment=True)
| true | true |
f72ce1a65a7b1d2ab102ec79f285ac7ea3eedaeb | 49,449 | py | Python | coremltools/converters/mil/frontend/torch/test/test_torch_ops.py | freedomtan/coremltools | 5ee9b537b81c44c140a2fa7571e547dfaa24e1ea | [
"BSD-3-Clause"
] | 1 | 2020-12-23T15:42:01.000Z | 2020-12-23T15:42:01.000Z | coremltools/converters/mil/frontend/torch/test/test_torch_ops.py | freedomtan/coremltools | 5ee9b537b81c44c140a2fa7571e547dfaa24e1ea | [
"BSD-3-Clause"
] | null | null | null | coremltools/converters/mil/frontend/torch/test/test_torch_ops.py | freedomtan/coremltools | 5ee9b537b81c44c140a2fa7571e547dfaa24e1ea | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import sys
from coremltools.models.utils import _python_version
from coremltools.models.utils import _macos_version
from coremltools.converters.mil import testing_reqs
from coremltools.converters.mil.testing_reqs import *
from .testing_utils import *
from coremltools import TensorType, ImageType, RangeDim
backends = testing_reqs.backends
torch = pytest.importorskip("torch")
pytestmark = pytest.mark.skipif(
sys.version_info >= (3, 8), reason="Segfault with Python 3.8+"
) # rdar://problem/65730375
class TestArgSort:
@pytest.mark.parametrize(
"rank, axis, descending, backend",
itertools.product(
[rank for rank in range(1, 6)],
[-1, 0],
[True, False],
backends
)
)
def test_argsort(self, rank, axis, descending, backend):
shape = tuple(np.random.randint(low=1, high=4, size=rank))
model = ModuleWrapper(
function=torch.argsort, kwargs={"dim": axis, "descending": descending}
)
run_compare_torch(shape, model, backend=backend)
class TestBatchNorm:
@pytest.mark.parametrize(
"num_features, eps, backend",
itertools.product([5, 3, 2, 1], [0.1, 1e-05, 1e-9], backends),
)
def test_batchnorm(self, num_features, eps, backend):
model = nn.BatchNorm2d(num_features, eps)
run_compare_torch((6, num_features, 5, 5), model, backend=backend)
@pytest.mark.parametrize("backend", backends)
def test_batchnorm_1d(self, backend):
class CRNNBase(nn.Module):
def __init__(self, ch_in, ch_out, kernel_size=3, use_bn=True):
super(CRNNBase, self).__init__()
self.conv = nn.Conv1d(ch_in, ch_out, kernel_size=kernel_size)
self.norm = nn.BatchNorm1d(ch_out)
def forward(self, x):
x = self.conv(x)
x = self.norm(x)
return x
model = CRNNBase(ch_in=6, ch_out=16)
run_compare_torch((1, 6, 15), model, backend=backend)
class TestInstanceNorm:
@pytest.mark.parametrize(
"num_features, eps, backend",
itertools.product([5, 3, 2, 1], [0.1, 1e-05, 1e-09], backends),
)
def test_instancenorm(self, num_features, eps, backend):
if backend == "nn_proto" and eps == 1e-09:
return
model = nn.InstanceNorm2d(num_features, eps)
run_compare_torch((6, num_features, 5, 5), model, backend=backend)
class TestGroupNorm:
@pytest.mark.parametrize(
"group_features, eps,affine, backend",
itertools.product([(16,32), (32,64), (1,1)], [0.1, 1e-05, 1e-09],[True, False], backends),
)
def test_groupnorm(self, group_features, eps, affine, backend):
if backend == "nn_proto" and eps == 1e-09:
return
model = nn.GroupNorm(group_features[0],group_features[1], eps=eps, affine=affine)
run_compare_torch((6, group_features[1], 5, 5), model, backend=backend)
class TestLinear:
@pytest.mark.parametrize(
"in_features, out_features, backend",
itertools.product([10, 25, 100], [3, 6], backends),
)
def test_addmm(self, in_features, out_features, backend):
model = nn.Linear(in_features, out_features)
run_compare_torch((1, in_features), model, backend=backend)
@pytest.mark.parametrize(
"in_features, out_features, backend",
itertools.product([5], [10], backends),
)
def test_linear_rank1_input(self, in_features, out_features, backend):
model = nn.Linear(in_features, out_features)
run_compare_torch((in_features,), model, backend=backend)
class TestConv:
@pytest.mark.parametrize(
"height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend",
itertools.product(
[5, 6], [5, 7], [1, 3], [1, 3], [1, 3], [1, 3], [1, 3], [1, 3], backends
),
)
def test_convolution2d(
self,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
backend,
groups=1,
):
model = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
run_compare_torch((1, in_channels, height, width), model, backend=backend)
class TestConvTranspose:
@pytest.mark.parametrize(
"width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend",
itertools.product(
[5, 7], [1, 3], [1, 3], [1, 3], [2, 3], [0, 1], [1, 3], backends
),
)
def test_convolution_transpose1d(
self,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
backend,
groups=1,
):
model = nn.ConvTranspose1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups
)
run_compare_torch((1, in_channels, width), model, backend=backend)
@pytest.mark.parametrize(
"height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend",
itertools.product(
[5, 6], [5, 7], [1, 3], [1, 3], [1, 3], [2, 3], [0, 1], [1, 3], backends
),
)
def test_convolution_transpose2d(
self,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
backend,
groups=1,
):
model = nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
run_compare_torch((1, in_channels, height, width), model, backend=backend)
# TODO: rdar://65588783 ([PyTorch] Define and error out on unsupported configuration for output_padding)
# TODO: rdar://65550420 (Add Image Resizing (crop, upsample, resize_bilinear) layers to the MIL backend)
@pytest.mark.parametrize(
"height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, output_padding, backend",
list(
itertools.product(
[10],
[10],
[1, 3],
[1, 3],
[1, 3],
[1, 2, 3],
[1, 3],
[1, 2],
[1, 2, (1, 2)],
["nn_proto"],
)
)
+ [
pytest.param(
5, 5, 1, 1, 3, 4, 1, 1, 2, "nn_proto", marks=pytest.mark.xfail
),
pytest.param(
5, 5, 1, 1, 3, 2, 1, 3, 2, "nn_proto", marks=pytest.mark.xfail
),
],
)
def test_convolution_transpose2d_output_padding(
self,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
output_padding,
backend,
groups=1,
):
# Output padding must be less than either stride or dilation
# Skip testing invalid combinations
if isinstance(output_padding, int):
if output_padding >= stride and output_padding >= dilation:
return
elif isinstance(output_padding, tuple):
for _output_padding in output_padding:
if _output_padding >= stride and _output_padding >= dilation:
return
model = nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=output_padding,
)
run_compare_torch((1, in_channels, height, width), model, backend=backend)
@pytest.mark.parametrize(
"depth, height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend",
itertools.product(
[3, 4], [5, 6], [5, 7], [1, 3], [1, 3], [1, 3], [2, 3], [0, 1], [1, 3], backends
),
)
@pytest.mark.skip(reason="rdar://65198011 (Re-enable Conv3dTranspose and DynamicTile unit tests)")
def test_convolution_transpose3d(
self,
depth,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
backend,
):
model = nn.ConvTranspose3d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
run_compare_torch((1, in_channels, depth, height, width), model, backend=backend)
class TestCond:
@pytest.mark.parametrize("backend", backends)
def test_cond(self, backend):
in_features = 1
out_features = 2
class TestNet(nn.Module):
def forward(self, x):
if torch.squeeze(x) < 10.:
return x*10.
else:
return x*2.
model = TestNet().eval()
torch_model = torch.jit.script(model)
run_compare_torch(torch.tensor([1.]), torch_model,
input_as_shape=False, backend=backend)
run_compare_torch(torch.tensor([11.]), torch_model,
input_as_shape=False, backend=backend)
class TestLoop:
@pytest.mark.parametrize("backend", backends)
def test_for_loop(self, backend):
class TestLayer(nn.Module):
def __init__(self):
super(TestLayer, self).__init__()
def forward(self, x):
x = 2.0 * x
return x
class TestNet(nn.Module):
input_size = (64,)
def __init__(self):
super(TestNet, self).__init__()
layer = TestLayer()
self.layer = torch.jit.trace(layer, torch.rand(self.input_size))
def forward(self, x):
for _ in range(7):
x = self.layer(x)
return x
model = TestNet().eval()
torch_model = torch.jit.script(model)
run_compare_torch(model.input_size, torch_model, backend=backend)
@pytest.mark.parametrize("backend", backends)
def test_while_loop(self, backend):
class TestLayer(nn.Module):
def __init__(self):
super(TestLayer, self).__init__()
def forward(self, x):
x = 0.5 * x
return x
class TestNet(nn.Module):
input_size = (1,)
def __init__(self):
super(TestNet, self).__init__()
layer = TestLayer()
self.layer = torch.jit.trace(layer, torch.rand(self.input_size))
def forward(self, x):
while x > 0.01:
x = self.layer(x)
return x
model = TestNet().eval()
torch_model = torch.jit.script(model)
run_compare_torch(model.input_size, torch_model, backend=backend)
class TestUpsample:
@pytest.mark.parametrize(
"output_size, align_corners, backend",
[
x
for x in itertools.product(
[(10, 10), (1, 1), (20, 20), (2, 3), (190, 170)],
[True, False],
backends,
)
],
)
def test_upsample_bilinear2d_with_output_size(
self, output_size, align_corners, backend
):
input_shape = (1, 3, 10, 10)
model = ModuleWrapper(
nn.functional.interpolate,
{"size": output_size, "mode": "bilinear", "align_corners": align_corners,},
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"scales_h, scales_w, align_corners, backend",
[
x
for x in itertools.product(
[2, 3, 4.5], [4, 5, 5.5], [True, False], backends
)
],
)
def test_upsample_bilinear2d_with_scales(
self, scales_h, scales_w, align_corners, backend
):
input_shape = (1, 3, 10, 10)
model = ModuleWrapper(
nn.functional.interpolate,
{
"scale_factor": (scales_h, scales_w),
"mode": "bilinear",
"align_corners": align_corners,
},
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"output_size, backend",
[
x
for x in itertools.product(
[(10, 10), (30, 20), (20, 20), (20, 30), (190, 170)], backends
)
],
)
def test_upsample_nearest2d_with_output_size(self, output_size, backend):
input_shape = (1, 3, 10, 10)
model = ModuleWrapper(
nn.functional.interpolate, {"size": output_size, "mode": "nearest"},
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"scales_h, scales_w, backend",
[x for x in itertools.product([2, 3, 5], [4, 5, 2], backends)],
)
def test_upsample_nearest2d_with_scales(self, scales_h, scales_w, backend):
input_shape = (1, 3, 10, 10)
model = ModuleWrapper(
nn.functional.interpolate,
{"scale_factor": (scales_h, scales_w), "mode": "nearest",},
)
run_compare_torch(input_shape, model, backend=backend)
class TestBranch:
@pytest.mark.parametrize("backend", backends)
def test_if(self, backend):
class TestLayer(nn.Module):
def __init__(self):
super(TestLayer, self).__init__()
def forward(self, x):
x = torch.mean(x)
return x
class TestNet(nn.Module):
input_size = (64,)
def __init__(self):
super(TestNet, self).__init__()
layer = TestLayer()
self.layer = torch.jit.trace(layer, torch.rand(self.input_size))
def forward(self, x):
m = self.layer(x)
if m < 0:
scale = -2.0
else:
scale = 2.0
x = scale * x
return x
model = TestNet().eval()
torch_model = torch.jit.script(model)
run_compare_torch(model.input_size, torch_model, backend=backend)
class TestAvgPool:
# rdar://66066001 (PyTorch converter: enable ceil_mode=True tests for pooling ops)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend",
itertools.product(
[(1, 3, 15), (1, 1, 7), (1, 3, 10)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
[True, False],
backends,
),
)
def test_avg_pool1d(
self, input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend
):
if padding > kernel_size / 2:
return
model = nn.AvgPool1d(
kernel_size,
stride,
padding,
ceil_mode=ceil_mode,
count_include_pad=include_pad,
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend",
itertools.product(
[(1, 3, 15, 15), (1, 1, 7, 7), (1, 3, 10, 10)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
[True, False],
backends,
),
)
def test_avg_pool2d(
self, input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend
):
if padding > kernel_size / 2:
return
model = nn.AvgPool2d(
kernel_size,
stride,
padding,
ceil_mode=ceil_mode,
count_include_pad=include_pad,
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend",
itertools.product(
[(1, 3, 11, 3, 11), (1, 1, 7, 4, 7), (1, 3, 6, 6, 3)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
[True, False],
backends,
),
)
def test_avg_pool3d(
self, input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend
):
if padding > kernel_size / 2:
return
model = nn.AvgPool3d(
kernel_size,
stride,
padding,
ceil_mode=ceil_mode,
count_include_pad=include_pad,
)
run_compare_torch(input_shape, model, backend=backend)
class TestAdaptiveMaxPool:
@pytest.mark.parametrize(
"output_size, magnification, delta, depth, backend",
itertools.product(
[(1,1), (3,2),(3,6),(32,32)],
[1,2,4,5,6,7],
[0,11],
[1,2,3],
backends,
),
)
def test_adaptive_max_pool2d(
self, output_size, magnification, delta, depth, backend
):
# input_size = output_size * magnification + delta
input_size = (delta + magnification * output_size[0], delta + magnification * output_size[1])
# since coremltools reproduces PyTorch's kernel sizes and
# offsets for adaptive pooling layers only when input_size is
# a multiple of output_size, we expect failures otherwise
if not (input_size[0] % output_size[0] == 0 and input_size[1] % output_size[1] == 0):
pytest.xfail("Test should fail because input_size is not a multiple of output_size")
n = 1
in_shape = (n,depth) + input_size
model = nn.AdaptiveMaxPool2d(
output_size
)
run_compare_torch(in_shape, model, backend=backend)
class TestMaxPool:
# rdar://66066001 (PyTorch converter: enable ceil_mode=True tests for pooling ops)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, backend",
itertools.product(
[(1, 3, 15), (1, 1, 7), (1, 3, 10)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
backends,
),
)
def test_max_pool1d(
self, input_shape, kernel_size, stride, padding, ceil_mode, backend
):
if padding > kernel_size / 2:
return
model = nn.MaxPool1d(
kernel_size,
stride,
padding,
dilation=1,
return_indices=False,
ceil_mode=ceil_mode,
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, backend",
itertools.product(
[(1, 3, 15, 15), (1, 1, 7, 7), (1, 3, 10, 10)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
backends,
),
)
def test_max_pool2d(
self, input_shape, kernel_size, stride, padding, ceil_mode, backend
):
if padding > kernel_size / 2:
return
model = nn.MaxPool2d(
kernel_size,
stride,
padding,
dilation=1,
return_indices=False,
ceil_mode=ceil_mode,
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, backend",
itertools.product(
[(1, 3, 11, 3, 11), (1, 1, 7, 4, 7), (1, 3, 6, 6, 3)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
backends,
),
)
def test_max_pool3d(
self, input_shape, kernel_size, stride, padding, ceil_mode, backend
):
if padding > kernel_size / 2:
return
model = nn.MaxPool3d(
kernel_size,
stride,
padding,
dilation=1,
return_indices=False,
ceil_mode=ceil_mode,
)
run_compare_torch(input_shape, model, backend=backend)
class TestLSTM:
def _pytorch_hidden_to_coreml(self, x):
# Split of Direction axis
f, b = torch.split(x, [1] * x.shape[0], dim=0)
# Concat on Hidden Size axis
x = torch.cat((f, b), dim=2)
# NOTE:
# We are omitting a squeeze because the conversion
# function for the mil op lstm unsqueezes the num_layers
# dimension
return x
@pytest.mark.parametrize(
"input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional, backend",
itertools.product(
[7], [5], [1], [True, False], [False], [0.3], [True, False], backends
),
)
def test_lstm(
self,
input_size,
hidden_size,
num_layers,
bias,
batch_first,
dropout,
bidirectional,
backend,
):
model = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
)
SEQUENCE_LENGTH = 3
BATCH_SIZE = 2
num_directions = int(bidirectional) + 1
# (seq_len, batch, input_size)
if batch_first:
_input = torch.rand(BATCH_SIZE, SEQUENCE_LENGTH, input_size)
else:
_input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size)
h0 = torch.randn(num_layers * num_directions, BATCH_SIZE, hidden_size)
c0 = torch.randn(num_layers * num_directions, BATCH_SIZE, hidden_size)
inputs = (_input, (h0, c0))
expected_results = model(*inputs)
# Need to do some output reshaping if bidirectional
if bidirectional:
ex_hn = self._pytorch_hidden_to_coreml(expected_results[1][0])
ex_cn = self._pytorch_hidden_to_coreml(expected_results[1][1])
expected_results = (expected_results[0], (ex_hn, ex_cn))
run_compare_torch(
inputs, model, expected_results, input_as_shape=False, backend=backend
)
@pytest.mark.parametrize(
"input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional, backend",
[
(7, 3, 2, True, True, 0.3, True, list(backends)[-1]),
(7, 3, 2, False, False, 0.3, False, list(backends)[0]),
],
)
def test_lstm_xexception(
self,
input_size,
hidden_size,
num_layers,
bias,
batch_first,
dropout,
bidirectional,
backend,
):
with pytest.raises(ValueError):
self.test_lstm(
input_size,
hidden_size,
num_layers,
bias,
batch_first,
dropout,
bidirectional,
backend=backend,
)
# Workaround for GitHub Issue #824
# i.e. the return h_n/c_n for a converted BLSTM are mangled.
# Therefore, just look at output 'y' (for now) which is correct.
class StripCellAndHidden(nn.Module):
def __init__(self,flagReturnTuple_):
super(StripCellAndHidden, self).__init__()
self.flagReturnTuple = flagReturnTuple_
def forward(self,x):
# Pass tuple, not tensor, to avoid issue in coremltools/converters/mil/frontend/torch/test/testing_utils.py on "if not expected_results:"
# Pass tensor when we need input for LSTM #2 as part of nn.Sequential()
return tuple(x[0]) if self.flagReturnTuple else x[0]
# Check GitHub Issue #810, assume num_layers == 2 and bidirectional == True
class TestStackedBLSTM:
@pytest.mark.parametrize(
"input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional, backend",
itertools.product([7], [5], [2], [True, False], [True, False], [0.3], [True], backends),
)
def test_lstm(
self,
input_size,
hidden_size,
num_layers,
bias,
batch_first,
dropout,
bidirectional,
backend,
):
model = nn.Sequential(
nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=True),
StripCellAndHidden(False),
nn.LSTM(
input_size=2*hidden_size,
hidden_size=hidden_size,
num_layers=1,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=True),
StripCellAndHidden(True)
)
SEQUENCE_LENGTH = 3
BATCH_SIZE = 2
num_directions = int(bidirectional) + 1
# (seq_len, batch, input_size)
if batch_first:
_input = torch.rand(BATCH_SIZE, SEQUENCE_LENGTH, input_size)
else:
_input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size)
# Do not use h_0/c_0 input and do not check h_n/c_n output, GitHub Issue #824
expected_results = model(_input)
run_compare_torch(_input, model, expected_results, input_as_shape=False, backend=backend)
class TestConcat:
# This tests an edge case where the list of tensors to concatenate only
# has one item. NN throws an error for this case, hence why we have to
# run through the full conversion process to test it.
@pytest.mark.parametrize("backend", backends)
def test_cat(self, backend):
class TestNet(nn.Module):
def __init__(self):
super(TestNet, self).__init__()
def forward(self, x):
x = torch.cat((x,), axis=1)
return x
model = TestNet()
run_compare_torch((1, 3, 16, 16), model, backend=backend)
class TestReduction:
@pytest.mark.parametrize(
"input_shape, dim, keepdim, backend",
itertools.product([(2, 2), (1, 1)], [0, 1], [True, False], backends),
)
def test_max(self, input_shape, dim, keepdim, backend):
class TestMax(nn.Module):
def __init__(self):
super(TestMax, self).__init__()
def forward(self, x):
return torch.max(x, dim=dim, keepdim=keepdim)
input_data = torch.rand(input_shape)
model = TestMax()
# TODO: Expected results are flipped due to naming issue:
# rdar://62681982 (Determine the output names of MLModels)
expected_results = model(input_data)[::-1]
run_compare_torch(
input_data,
model,
expected_results=expected_results,
input_as_shape=False,
backend=backend,
)
class TestLayerNorm:
@pytest.mark.parametrize(
"input_shape, eps, backend",
itertools.product([(1, 3, 15, 15), (1, 1, 1, 1)], [1e-5, 1e-9], backends),
)
def test_layer_norm(self, input_shape, eps, backend):
model = nn.LayerNorm(input_shape, eps=eps)
run_compare_torch(input_shape, model, backend=backend)
class TestPixelShuffle:
@pytest.mark.parametrize(
"batch_size, CHW, r, backend",
itertools.product([1, 3], [(1, 4, 4), (3, 2, 3)], [2, 4], backends),
)
def test_pixel_shuffle(self, batch_size, CHW, r, backend):
C, H, W = CHW
input_shape = (batch_size, C * r * r, H, W)
model = nn.PixelShuffle(upscale_factor=r)
run_compare_torch(input_shape, model, backend=backend)
class TestExpand:
@pytest.mark.parametrize(
"backend, shapes",
itertools.product(
backends,
[[(2, 1), (2, 2)], [(3, 1), (-1, 4)], [(1, 3, 4, 4), (3, 3, 4, 4)]]
),
)
def test_expand(self, backend, shapes):
input_shape, output_shape = shapes
class TestModel(torch.nn.Module):
def forward(self, x):
return x.expand(*output_shape)
model = TestModel()
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"backend, input_shapes",
itertools.product(
backends,
[[(2, 1), (2, 2)], [(3, 1), (3, 4)], [(1, 3, 4, 4), (3, 3, 4, 4)]]
),
)
def test_expand_as(self, backend, input_shapes):
class TestModel(torch.nn.Module):
def forward(self, x, y):
return x.expand_as(y)
model = TestModel()
run_compare_torch(input_shapes, model, backend=backend)
class TestExpandDims:
@pytest.mark.parametrize(
"backend, rank_and_axis",
itertools.product(
backends,
[
(rank, axis)
for rank in range(1, 5)
for axis in range(-rank - 1, rank + 1)
],
),
)
def test_unsqueeze(self, backend, rank_and_axis):
rank, axis = rank_and_axis
input_shape = tuple(np.random.randint(low=2, high=10, size=rank))
model = ModuleWrapper(function=torch.unsqueeze, kwargs={"dim": axis})
run_compare_torch(input_shape, model, backend=backend)
class TestSqueeze:
@pytest.mark.parametrize(
"backend, rank_and_axis",
itertools.product(
backends,
[(2, 1), (2, 0), (3, 1), (3, None), (4, None), (4, 2), (5, None), (5, -1),],
),
)
def test_squeeze(self, backend, rank_and_axis):
rank, axis = rank_and_axis
input_shape = list(np.random.randint(low=2, high=10, size=rank))
if axis is not None:
input_shape[axis] = 1
else:
input_shape[0] = 1
input_shape = tuple(input_shape)
model = ModuleWrapper(
function=torch.squeeze, kwargs={"dim": axis} if axis else {}
)
run_compare_torch(input_shape, model, backend=backend)
class TestCumSum:
@pytest.mark.parametrize(
"backend, axis",
itertools.product(
backends,
[-1, 0, 1, 2, 3],
),
)
def test_cumsum(self, backend, axis):
input_shape = list(np.random.randint(low=2, high=10, size=4))
input_shape = tuple(input_shape)
model = ModuleWrapper(
function=torch.cumsum, kwargs={"dim": axis}
)
run_compare_torch(input_shape, model, backend=backend)
class TestReshape:
# TODO: <rdar://66239973> Add dynamic & rank preserving reshape tests for pytorch
@pytest.mark.parametrize(
"backend, output_shape",
itertools.product(backends, [(3, 2), (2, -1), (2, 1, 1, 3),],),
)
def test_reshape(self, backend, output_shape):
input_shape = (2, 3)
model = ModuleWrapper(function=torch.reshape, kwargs={"shape": output_shape})
run_compare_torch(input_shape, model, backend=backend)
class TestFlatten:
@pytest.mark.parametrize(
"backend, start_dim",
itertools.product(backends, [2,-2],),
)
def test_reshape(self, backend, start_dim):
input_shape = (2, 3, 4, 5)
model = ModuleWrapper(function=torch.flatten, kwargs={"start_dim": start_dim})
run_compare_torch(input_shape, model, backend=backend)
class TestGather:
@pytest.mark.xfail(
reason="Load constant not copied properly for integer valued constants. Enable after eng/PR-65551506 is merged",
run=False,
)
@pytest.mark.parametrize(
"rank_and_axis, backend",
itertools.product([(i, j) for i in range(1, 6) for j in range(0, i)], backends),
)
def test_gather_along_axis(self, rank_and_axis, backend):
rank, axis = rank_and_axis
params_shape = np.random.randint(low=2, high=5, size=rank)
indices_shape = np.copy(params_shape)
indices_shape[axis] = np.random.randint(low=1, high=8)
indices = np.random.randint(0, params_shape[axis], size=indices_shape)
params_shape, indices_shape = tuple(params_shape), tuple(indices_shape)
model = ModuleWrapper(
function=torch.gather,
kwargs={"dim": axis, "index": torch.from_numpy(indices)},
)
run_compare_torch([params_shape], model, backend=backend)
class TestActivation:
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_relu(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.ReLU().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
model = ModuleWrapper(nn.functional.relu_)
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_relu6(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.ReLU6().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, alpha", itertools.product(backends, [0.1, 0.25, 2.0]),
)
def test_prelu(self, backend, alpha):
input_shape = tuple(np.random.randint(low=5, high=10, size=4))
C = input_shape[1]
model = nn.PReLU(C, alpha).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, alpha",
itertools.product(backends, range(1, 6), [0.1, 2.0, 1.5]),
)
def test_leaky_relu(self, backend, rank, alpha):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.LeakyReLU(negative_slope=alpha).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
model = ModuleWrapper(nn.functional.leaky_relu_, {'negative_slope': alpha})
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_softmax(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.Softmax().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, range_val",
itertools.product(
backends, range(1, 6), [(-1.0, 1.0), (0.0, 0.1), (1.0, 3.0), (-1.0, 6.0)]
),
)
def test_hardtanh(self, backend, rank, range_val):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.Hardtanh(range_val[0], range_val[1]).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
model = ModuleWrapper(nn.functional.hardtanh_,
{'min_val': range_val[0], 'max_val': range_val[1]})
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, alpha",
itertools.product(backends, range(1, 6), [0.1, 2.0, 1.5]),
)
def test_elu(self, backend, rank, alpha):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.ELU(alpha).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
# rdar://problem/66557565
@pytest.mark.parametrize(
"backend, rank", itertools.product(['nn_proto'], range(1, 6)),
)
def test_gelu(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.GELU().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.skipif(_python_version() < (3, 6), reason="requires python 3.6")
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_erf(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
class ERFActivation(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.erf(x)
model = ERFActivation().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_sigmoid(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.Sigmoid().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.skipif(_python_version() < (3, 6), reason="requires python 3.6")
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_sigmoid_hard(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.Hardsigmoid().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, beta, threshold", itertools.product(backends, [1, 2, 5], [5, 10, 20]),
)
@pytest.mark.skipif(
_macos_version() <= (11,),
reason="Parametric SoftPlus segfaults on macOS 10.15 and below. (rdar://problem/66555235)",
)
def test_softplus(self, backend, beta, threshold):
input_shape = (1, 10, 5, 15)
model = nn.Softplus(beta, threshold).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
# rdar://problem/66557565
@pytest.mark.parametrize(
"backend, rank", itertools.product(['nn_proto'], range(1, 6)),
)
def test_softsign(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.Softsign().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
class TestElementWiseUnary:
@pytest.mark.parametrize(
"backend, rank, op_string",
itertools.product(
backends,
[4],
[
"abs",
"acos",
"asin",
"atan",
"ceil",
"cos",
"cosh",
"exp",
"floor",
"round",
"sin",
"sinh",
"sqrt",
"square",
"tan",
"tanh",
"sign",
],
),
)
def test_elementwise_no_params(self, backend, rank, op_string):
if not contains_op(torch, op_string):
return
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
op_func = getattr(torch, op_string)
model = ModuleWrapper(function=op_func)
run_compare_torch(
input_shape, model, backend=backend,
)
## TODO (rdar://66577921): Needs to move to test_elementwise_no_params after backend is added
@pytest.mark.parametrize(
"backend, rank",
itertools.product(
['nn_proto'],
[4],
),
)
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6 or higher")
def test_square(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = ModuleWrapper(function=torch.square)
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, clamp_range",
itertools.product(
backends,
[4],
[(0.0, 1.0), (-1.0, 0.5), (0.2, 0.7)],
),
)
def test_clamp(self, backend, rank, clamp_range):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = ModuleWrapper(torch.clamp, {'min': clamp_range[0], 'max': clamp_range[1]})
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, threshold",
itertools.product(
['nn_proto'], # rdar://66597974 Renable for all backends due to missing cast
[4],
[(0.0, 0.0), (0.5, 0.5), (0.5, 10), (0.9, 0.0)]
),
)
def test_threshold(self, backend, rank, threshold):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = torch.nn.Threshold(threshold[0], threshold[1]).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, op_string",
itertools.product(
backends,
[4],
[
"log",
"rsqrt",
"reciprocal",
],
),
)
def test_elementwise_numerically_stable(self, backend, rank, op_string):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
op_func = getattr(torch, op_string)
model = ModuleWrapper(function=op_func)
run_compare_torch(
input_shape, model, backend=backend, rand_range=(20, 100)
)
class TestMatMul:
@pytest.mark.parametrize("backend", backends)
def test_bmm(self, backend):
shape_x, shape_y = (3,4,5), (3,5,6)
model = ModuleWrapper(function=torch.bmm)
run_compare_torch(
[shape_x, shape_y], model, backend=backend,
)
class TestSplit:
@pytest.mark.parametrize(
"backend, split_size_or_sections, dim",
itertools.product(backends, [1, 2, [1, 4]], [0, -2]),
)
def test_split(self, backend, split_size_or_sections, dim):
input_shape = (5, 2)
model = ModuleWrapper(function=torch.split,
kwargs={"split_size_or_sections": split_size_or_sections, "dim": dim})
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"backend, split_sizes, dim",
itertools.product(backends, [[1, 4], [3, 2]], [-1, -2]),
)
def test_split_with_sizes(self, backend, split_sizes, dim):
input_shape = (5, 5)
model = ModuleWrapper(function=torch.split_with_sizes,
kwargs={"split_sizes": split_sizes, "dim": dim})
run_compare_torch(input_shape, model, backend=backend)
class TestUnbind:
@pytest.mark.parametrize(
"backend, dim",
itertools.product(backends,[0,1,2]),
)
def test_unbind(self, backend, dim):
input_shape = (3, 3, 4)
model = ModuleWrapper(function=torch.unbind,
kwargs={"dim": dim})
run_compare_torch(input_shape, model, backend=backend)
class TestTranspose:
@pytest.mark.parametrize(
"backend, rank, dims",
itertools.product(backends, list(range(2, 6)),
[(0, 1), (-2, -1), (1, 0), (-1, -2)]),
)
def test(self, backend, rank, dims):
input_shape = tuple(np.random.randint(low=1, high=4, size=rank))
model = ModuleWrapper(function=torch.transpose,
kwargs={"dim0": dims[0], "dim1": dims[1]})
run_compare_torch(input_shape, model, backend=backend)
class TestTo:
@pytest.mark.parametrize(
"backend", backends,
)
def test_cast_bug(self, backend):
class TestModel(torch.nn.Module):
def forward(self, spans, embedding):
spans = spans.float().relu().int()
max1, _ = torch.max(spans, dim=1, keepdim=False)
max1, _ = torch.max(max1, dim=1, keepdim=False)
max2, _ = torch.max(embedding, dim=1, keepdim=False)
max2, _ = torch.max(max2, dim=1, keepdim=False)
sigmoided_scores = max1 + max2
return sigmoided_scores
model = TestModel()
run_compare_torch([(1, 21, 2), (1, 6, 384)], model, backend=backend)# [spans.shape, embedding.shape]
class TestSlice:
@pytest.mark.skipif(_python_version() < (3, 6), reason="requires python 3.6")
@pytest.mark.parametrize(
"backend", backends,
)
def test_dynamic_slice(self, backend):
class DynamicSlicer(torch.nn.Module):
def __init__(self):
super(DynamicSlicer, self).__init__()
def forward(self, x, context_length):
return x[context_length:, :, :]
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.tokens_embedding = torch.nn.Embedding(10, 10, 0)
self.context_embedding = torch.nn.Embedding(10, 10, 0)
self.dynamic_slicer = DynamicSlicer()
def forward(self, tokens, context, context_length):
tokens_embeddings = self.tokens_embedding(tokens)
context_embeddings = self.context_embedding(context)
embeddings = torch.cat((context_embeddings, tokens_embeddings), dim=0)
embeddings = self.dynamic_slicer(embeddings, context_length)
return embeddings
model = Model()
batch_size = 5
inputs = [ TensorType(name="tokens", shape=(10, batch_size), dtype=np.int64),
TensorType(name="context", shape=(3, batch_size), dtype=np.int64),
TensorType(name="context_length", shape=(), dtype=np.int32),
]
run_compare_torch(inputs, model, rand_range=(0, 8), backend=backend, use_scripting=False)
class TestRepeat:
@pytest.mark.parametrize(
"backend, rank",
itertools.product(backends, list(range(1, 6))),
)
def test_repeat(self, backend, rank):
input_shape = np.random.randint(low=2, high=6, size=rank)
repeats = np.random.randint(low=2, high=4, size=rank)
input_shape = tuple(input_shape)
model = ModuleWrapper(function=lambda x: x.repeat(*repeats))
run_compare_torch(input_shape, model, backend=backend)
class TestStd:
@pytest.mark.parametrize(
"backend, unbiased",
itertools.product(backends, [True, False]),
)
def test_std_2_inputs(self, backend, unbiased):
model = ModuleWrapper(function=torch.std,
kwargs={"unbiased": unbiased})
x = torch.randn(1, 5, 10) * 3
out = torch.std(x, unbiased=unbiased).unsqueeze(0)
run_compare_torch(x, model, expected_results=out, input_as_shape=False, backend=backend)
@pytest.mark.parametrize(
"backend, unbiased, dim, keepdim",
itertools.product(backends, [True, False], [[0,2], [1], [2]], [True, False]),
)
def test_std_4_inputs(self, backend, unbiased, dim, keepdim):
model = ModuleWrapper(function=torch.std,
kwargs={"unbiased": unbiased, "dim" : dim, "keepdim": keepdim})
input_shape = (2, 5, 10)
run_compare_torch(input_shape, model, backend=backend)
class TestTopk:
@pytest.mark.parametrize(
"backend, largest, shape_dim_k",
itertools.product(
backends,
[True, False],
[
((4, 6, 7, 3), -1, 2),
((10, 3, 4), 2, 2),
((10, 5), -2, 3),
((5,), 0, 2)
],
),
)
def test_topk(self, backend, largest, shape_dim_k):
input_shape = shape_dim_k[0]
dim = shape_dim_k[1]
k = shape_dim_k[2]
class TopkModel(nn.Module):
def __init__(self):
super(TopkModel, self).__init__()
def forward(self, x):
return torch.topk(x, k, dim=dim, largest=largest)
input_data = torch.rand(input_shape)
model = TopkModel()
expected_results = model(input_data)
expected_results = [expected_results.values, expected_results.indices]
run_compare_torch(
input_data,
model,
expected_results=expected_results,
input_as_shape=False,
backend=backend,
) | 33.076254 | 145 | 0.563105 |
import sys
from coremltools.models.utils import _python_version
from coremltools.models.utils import _macos_version
from coremltools.converters.mil import testing_reqs
from coremltools.converters.mil.testing_reqs import *
from .testing_utils import *
from coremltools import TensorType, ImageType, RangeDim
backends = testing_reqs.backends
torch = pytest.importorskip("torch")
pytestmark = pytest.mark.skipif(
sys.version_info >= (3, 8), reason="Segfault with Python 3.8+"
)
class TestArgSort:
@pytest.mark.parametrize(
"rank, axis, descending, backend",
itertools.product(
[rank for rank in range(1, 6)],
[-1, 0],
[True, False],
backends
)
)
def test_argsort(self, rank, axis, descending, backend):
shape = tuple(np.random.randint(low=1, high=4, size=rank))
model = ModuleWrapper(
function=torch.argsort, kwargs={"dim": axis, "descending": descending}
)
run_compare_torch(shape, model, backend=backend)
class TestBatchNorm:
@pytest.mark.parametrize(
"num_features, eps, backend",
itertools.product([5, 3, 2, 1], [0.1, 1e-05, 1e-9], backends),
)
def test_batchnorm(self, num_features, eps, backend):
model = nn.BatchNorm2d(num_features, eps)
run_compare_torch((6, num_features, 5, 5), model, backend=backend)
@pytest.mark.parametrize("backend", backends)
def test_batchnorm_1d(self, backend):
class CRNNBase(nn.Module):
def __init__(self, ch_in, ch_out, kernel_size=3, use_bn=True):
super(CRNNBase, self).__init__()
self.conv = nn.Conv1d(ch_in, ch_out, kernel_size=kernel_size)
self.norm = nn.BatchNorm1d(ch_out)
def forward(self, x):
x = self.conv(x)
x = self.norm(x)
return x
model = CRNNBase(ch_in=6, ch_out=16)
run_compare_torch((1, 6, 15), model, backend=backend)
class TestInstanceNorm:
@pytest.mark.parametrize(
"num_features, eps, backend",
itertools.product([5, 3, 2, 1], [0.1, 1e-05, 1e-09], backends),
)
def test_instancenorm(self, num_features, eps, backend):
if backend == "nn_proto" and eps == 1e-09:
return
model = nn.InstanceNorm2d(num_features, eps)
run_compare_torch((6, num_features, 5, 5), model, backend=backend)
class TestGroupNorm:
@pytest.mark.parametrize(
"group_features, eps,affine, backend",
itertools.product([(16,32), (32,64), (1,1)], [0.1, 1e-05, 1e-09],[True, False], backends),
)
def test_groupnorm(self, group_features, eps, affine, backend):
if backend == "nn_proto" and eps == 1e-09:
return
model = nn.GroupNorm(group_features[0],group_features[1], eps=eps, affine=affine)
run_compare_torch((6, group_features[1], 5, 5), model, backend=backend)
class TestLinear:
@pytest.mark.parametrize(
"in_features, out_features, backend",
itertools.product([10, 25, 100], [3, 6], backends),
)
def test_addmm(self, in_features, out_features, backend):
model = nn.Linear(in_features, out_features)
run_compare_torch((1, in_features), model, backend=backend)
@pytest.mark.parametrize(
"in_features, out_features, backend",
itertools.product([5], [10], backends),
)
def test_linear_rank1_input(self, in_features, out_features, backend):
model = nn.Linear(in_features, out_features)
run_compare_torch((in_features,), model, backend=backend)
class TestConv:
@pytest.mark.parametrize(
"height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend",
itertools.product(
[5, 6], [5, 7], [1, 3], [1, 3], [1, 3], [1, 3], [1, 3], [1, 3], backends
),
)
def test_convolution2d(
self,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
backend,
groups=1,
):
model = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
run_compare_torch((1, in_channels, height, width), model, backend=backend)
class TestConvTranspose:
@pytest.mark.parametrize(
"width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend",
itertools.product(
[5, 7], [1, 3], [1, 3], [1, 3], [2, 3], [0, 1], [1, 3], backends
),
)
def test_convolution_transpose1d(
self,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
backend,
groups=1,
):
model = nn.ConvTranspose1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups
)
run_compare_torch((1, in_channels, width), model, backend=backend)
@pytest.mark.parametrize(
"height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend",
itertools.product(
[5, 6], [5, 7], [1, 3], [1, 3], [1, 3], [2, 3], [0, 1], [1, 3], backends
),
)
def test_convolution_transpose2d(
self,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
backend,
groups=1,
):
model = nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
run_compare_torch((1, in_channels, height, width), model, backend=backend)
@pytest.mark.parametrize(
"height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, output_padding, backend",
list(
itertools.product(
[10],
[10],
[1, 3],
[1, 3],
[1, 3],
[1, 2, 3],
[1, 3],
[1, 2],
[1, 2, (1, 2)],
["nn_proto"],
)
)
+ [
pytest.param(
5, 5, 1, 1, 3, 4, 1, 1, 2, "nn_proto", marks=pytest.mark.xfail
),
pytest.param(
5, 5, 1, 1, 3, 2, 1, 3, 2, "nn_proto", marks=pytest.mark.xfail
),
],
)
def test_convolution_transpose2d_output_padding(
self,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
output_padding,
backend,
groups=1,
):
if isinstance(output_padding, int):
if output_padding >= stride and output_padding >= dilation:
return
elif isinstance(output_padding, tuple):
for _output_padding in output_padding:
if _output_padding >= stride and _output_padding >= dilation:
return
model = nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=output_padding,
)
run_compare_torch((1, in_channels, height, width), model, backend=backend)
@pytest.mark.parametrize(
"depth, height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend",
itertools.product(
[3, 4], [5, 6], [5, 7], [1, 3], [1, 3], [1, 3], [2, 3], [0, 1], [1, 3], backends
),
)
@pytest.mark.skip(reason="rdar://65198011 (Re-enable Conv3dTranspose and DynamicTile unit tests)")
def test_convolution_transpose3d(
self,
depth,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
backend,
):
model = nn.ConvTranspose3d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
run_compare_torch((1, in_channels, depth, height, width), model, backend=backend)
class TestCond:
@pytest.mark.parametrize("backend", backends)
def test_cond(self, backend):
in_features = 1
out_features = 2
class TestNet(nn.Module):
def forward(self, x):
if torch.squeeze(x) < 10.:
return x*10.
else:
return x*2.
model = TestNet().eval()
torch_model = torch.jit.script(model)
run_compare_torch(torch.tensor([1.]), torch_model,
input_as_shape=False, backend=backend)
run_compare_torch(torch.tensor([11.]), torch_model,
input_as_shape=False, backend=backend)
class TestLoop:
@pytest.mark.parametrize("backend", backends)
def test_for_loop(self, backend):
class TestLayer(nn.Module):
def __init__(self):
super(TestLayer, self).__init__()
def forward(self, x):
x = 2.0 * x
return x
class TestNet(nn.Module):
input_size = (64,)
def __init__(self):
super(TestNet, self).__init__()
layer = TestLayer()
self.layer = torch.jit.trace(layer, torch.rand(self.input_size))
def forward(self, x):
for _ in range(7):
x = self.layer(x)
return x
model = TestNet().eval()
torch_model = torch.jit.script(model)
run_compare_torch(model.input_size, torch_model, backend=backend)
@pytest.mark.parametrize("backend", backends)
def test_while_loop(self, backend):
class TestLayer(nn.Module):
def __init__(self):
super(TestLayer, self).__init__()
def forward(self, x):
x = 0.5 * x
return x
class TestNet(nn.Module):
input_size = (1,)
def __init__(self):
super(TestNet, self).__init__()
layer = TestLayer()
self.layer = torch.jit.trace(layer, torch.rand(self.input_size))
def forward(self, x):
while x > 0.01:
x = self.layer(x)
return x
model = TestNet().eval()
torch_model = torch.jit.script(model)
run_compare_torch(model.input_size, torch_model, backend=backend)
class TestUpsample:
@pytest.mark.parametrize(
"output_size, align_corners, backend",
[
x
for x in itertools.product(
[(10, 10), (1, 1), (20, 20), (2, 3), (190, 170)],
[True, False],
backends,
)
],
)
def test_upsample_bilinear2d_with_output_size(
self, output_size, align_corners, backend
):
input_shape = (1, 3, 10, 10)
model = ModuleWrapper(
nn.functional.interpolate,
{"size": output_size, "mode": "bilinear", "align_corners": align_corners,},
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"scales_h, scales_w, align_corners, backend",
[
x
for x in itertools.product(
[2, 3, 4.5], [4, 5, 5.5], [True, False], backends
)
],
)
def test_upsample_bilinear2d_with_scales(
self, scales_h, scales_w, align_corners, backend
):
input_shape = (1, 3, 10, 10)
model = ModuleWrapper(
nn.functional.interpolate,
{
"scale_factor": (scales_h, scales_w),
"mode": "bilinear",
"align_corners": align_corners,
},
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"output_size, backend",
[
x
for x in itertools.product(
[(10, 10), (30, 20), (20, 20), (20, 30), (190, 170)], backends
)
],
)
def test_upsample_nearest2d_with_output_size(self, output_size, backend):
input_shape = (1, 3, 10, 10)
model = ModuleWrapper(
nn.functional.interpolate, {"size": output_size, "mode": "nearest"},
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"scales_h, scales_w, backend",
[x for x in itertools.product([2, 3, 5], [4, 5, 2], backends)],
)
def test_upsample_nearest2d_with_scales(self, scales_h, scales_w, backend):
input_shape = (1, 3, 10, 10)
model = ModuleWrapper(
nn.functional.interpolate,
{"scale_factor": (scales_h, scales_w), "mode": "nearest",},
)
run_compare_torch(input_shape, model, backend=backend)
class TestBranch:
@pytest.mark.parametrize("backend", backends)
def test_if(self, backend):
class TestLayer(nn.Module):
def __init__(self):
super(TestLayer, self).__init__()
def forward(self, x):
x = torch.mean(x)
return x
class TestNet(nn.Module):
input_size = (64,)
def __init__(self):
super(TestNet, self).__init__()
layer = TestLayer()
self.layer = torch.jit.trace(layer, torch.rand(self.input_size))
def forward(self, x):
m = self.layer(x)
if m < 0:
scale = -2.0
else:
scale = 2.0
x = scale * x
return x
model = TestNet().eval()
torch_model = torch.jit.script(model)
run_compare_torch(model.input_size, torch_model, backend=backend)
class TestAvgPool:
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend",
itertools.product(
[(1, 3, 15), (1, 1, 7), (1, 3, 10)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
[True, False],
backends,
),
)
def test_avg_pool1d(
self, input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend
):
if padding > kernel_size / 2:
return
model = nn.AvgPool1d(
kernel_size,
stride,
padding,
ceil_mode=ceil_mode,
count_include_pad=include_pad,
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend",
itertools.product(
[(1, 3, 15, 15), (1, 1, 7, 7), (1, 3, 10, 10)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
[True, False],
backends,
),
)
def test_avg_pool2d(
self, input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend
):
if padding > kernel_size / 2:
return
model = nn.AvgPool2d(
kernel_size,
stride,
padding,
ceil_mode=ceil_mode,
count_include_pad=include_pad,
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend",
itertools.product(
[(1, 3, 11, 3, 11), (1, 1, 7, 4, 7), (1, 3, 6, 6, 3)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
[True, False],
backends,
),
)
def test_avg_pool3d(
self, input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend
):
if padding > kernel_size / 2:
return
model = nn.AvgPool3d(
kernel_size,
stride,
padding,
ceil_mode=ceil_mode,
count_include_pad=include_pad,
)
run_compare_torch(input_shape, model, backend=backend)
class TestAdaptiveMaxPool:
@pytest.mark.parametrize(
"output_size, magnification, delta, depth, backend",
itertools.product(
[(1,1), (3,2),(3,6),(32,32)],
[1,2,4,5,6,7],
[0,11],
[1,2,3],
backends,
),
)
def test_adaptive_max_pool2d(
self, output_size, magnification, delta, depth, backend
):
input_size = (delta + magnification * output_size[0], delta + magnification * output_size[1])
# offsets for adaptive pooling layers only when input_size is
# a multiple of output_size, we expect failures otherwise
if not (input_size[0] % output_size[0] == 0 and input_size[1] % output_size[1] == 0):
pytest.xfail("Test should fail because input_size is not a multiple of output_size")
n = 1
in_shape = (n,depth) + input_size
model = nn.AdaptiveMaxPool2d(
output_size
)
run_compare_torch(in_shape, model, backend=backend)
class TestMaxPool:
# rdar://66066001 (PyTorch converter: enable ceil_mode=True tests for pooling ops)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, backend",
itertools.product(
[(1, 3, 15), (1, 1, 7), (1, 3, 10)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
backends,
),
)
def test_max_pool1d(
self, input_shape, kernel_size, stride, padding, ceil_mode, backend
):
if padding > kernel_size / 2:
return
model = nn.MaxPool1d(
kernel_size,
stride,
padding,
dilation=1,
return_indices=False,
ceil_mode=ceil_mode,
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, backend",
itertools.product(
[(1, 3, 15, 15), (1, 1, 7, 7), (1, 3, 10, 10)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
backends,
),
)
def test_max_pool2d(
self, input_shape, kernel_size, stride, padding, ceil_mode, backend
):
if padding > kernel_size / 2:
return
model = nn.MaxPool2d(
kernel_size,
stride,
padding,
dilation=1,
return_indices=False,
ceil_mode=ceil_mode,
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, backend",
itertools.product(
[(1, 3, 11, 3, 11), (1, 1, 7, 4, 7), (1, 3, 6, 6, 3)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
backends,
),
)
def test_max_pool3d(
self, input_shape, kernel_size, stride, padding, ceil_mode, backend
):
if padding > kernel_size / 2:
return
model = nn.MaxPool3d(
kernel_size,
stride,
padding,
dilation=1,
return_indices=False,
ceil_mode=ceil_mode,
)
run_compare_torch(input_shape, model, backend=backend)
class TestLSTM:
def _pytorch_hidden_to_coreml(self, x):
# Split of Direction axis
f, b = torch.split(x, [1] * x.shape[0], dim=0)
# Concat on Hidden Size axis
x = torch.cat((f, b), dim=2)
# NOTE:
# We are omitting a squeeze because the conversion
# function for the mil op lstm unsqueezes the num_layers
# dimension
return x
@pytest.mark.parametrize(
"input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional, backend",
itertools.product(
[7], [5], [1], [True, False], [False], [0.3], [True, False], backends
),
)
def test_lstm(
self,
input_size,
hidden_size,
num_layers,
bias,
batch_first,
dropout,
bidirectional,
backend,
):
model = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
)
SEQUENCE_LENGTH = 3
BATCH_SIZE = 2
num_directions = int(bidirectional) + 1
# (seq_len, batch, input_size)
if batch_first:
_input = torch.rand(BATCH_SIZE, SEQUENCE_LENGTH, input_size)
else:
_input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size)
h0 = torch.randn(num_layers * num_directions, BATCH_SIZE, hidden_size)
c0 = torch.randn(num_layers * num_directions, BATCH_SIZE, hidden_size)
inputs = (_input, (h0, c0))
expected_results = model(*inputs)
# Need to do some output reshaping if bidirectional
if bidirectional:
ex_hn = self._pytorch_hidden_to_coreml(expected_results[1][0])
ex_cn = self._pytorch_hidden_to_coreml(expected_results[1][1])
expected_results = (expected_results[0], (ex_hn, ex_cn))
run_compare_torch(
inputs, model, expected_results, input_as_shape=False, backend=backend
)
@pytest.mark.parametrize(
"input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional, backend",
[
(7, 3, 2, True, True, 0.3, True, list(backends)[-1]),
(7, 3, 2, False, False, 0.3, False, list(backends)[0]),
],
)
def test_lstm_xexception(
self,
input_size,
hidden_size,
num_layers,
bias,
batch_first,
dropout,
bidirectional,
backend,
):
with pytest.raises(ValueError):
self.test_lstm(
input_size,
hidden_size,
num_layers,
bias,
batch_first,
dropout,
bidirectional,
backend=backend,
)
# Workaround for GitHub Issue #824
# i.e. the return h_n/c_n for a converted BLSTM are mangled.
# Therefore, just look at output 'y' (for now) which is correct.
class StripCellAndHidden(nn.Module):
def __init__(self,flagReturnTuple_):
super(StripCellAndHidden, self).__init__()
self.flagReturnTuple = flagReturnTuple_
def forward(self,x):
# Pass tuple, not tensor, to avoid issue in coremltools/converters/mil/frontend/torch/test/testing_utils.py on "if not expected_results:"
# Pass tensor when we need input for LSTM #2 as part of nn.Sequential()
return tuple(x[0]) if self.flagReturnTuple else x[0]
# Check GitHub Issue #810, assume num_layers == 2 and bidirectional == True
class TestStackedBLSTM:
@pytest.mark.parametrize(
"input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional, backend",
itertools.product([7], [5], [2], [True, False], [True, False], [0.3], [True], backends),
)
def test_lstm(
self,
input_size,
hidden_size,
num_layers,
bias,
batch_first,
dropout,
bidirectional,
backend,
):
model = nn.Sequential(
nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=True),
StripCellAndHidden(False),
nn.LSTM(
input_size=2*hidden_size,
hidden_size=hidden_size,
num_layers=1,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=True),
StripCellAndHidden(True)
)
SEQUENCE_LENGTH = 3
BATCH_SIZE = 2
num_directions = int(bidirectional) + 1
# (seq_len, batch, input_size)
if batch_first:
_input = torch.rand(BATCH_SIZE, SEQUENCE_LENGTH, input_size)
else:
_input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size)
# Do not use h_0/c_0 input and do not check h_n/c_n output, GitHub Issue #824
expected_results = model(_input)
run_compare_torch(_input, model, expected_results, input_as_shape=False, backend=backend)
class TestConcat:
# This tests an edge case where the list of tensors to concatenate only
# has one item. NN throws an error for this case, hence why we have to
# run through the full conversion process to test it.
@pytest.mark.parametrize("backend", backends)
def test_cat(self, backend):
class TestNet(nn.Module):
def __init__(self):
super(TestNet, self).__init__()
def forward(self, x):
x = torch.cat((x,), axis=1)
return x
model = TestNet()
run_compare_torch((1, 3, 16, 16), model, backend=backend)
class TestReduction:
@pytest.mark.parametrize(
"input_shape, dim, keepdim, backend",
itertools.product([(2, 2), (1, 1)], [0, 1], [True, False], backends),
)
def test_max(self, input_shape, dim, keepdim, backend):
class TestMax(nn.Module):
def __init__(self):
super(TestMax, self).__init__()
def forward(self, x):
return torch.max(x, dim=dim, keepdim=keepdim)
input_data = torch.rand(input_shape)
model = TestMax()
# TODO: Expected results are flipped due to naming issue:
# rdar://62681982 (Determine the output names of MLModels)
expected_results = model(input_data)[::-1]
run_compare_torch(
input_data,
model,
expected_results=expected_results,
input_as_shape=False,
backend=backend,
)
class TestLayerNorm:
@pytest.mark.parametrize(
"input_shape, eps, backend",
itertools.product([(1, 3, 15, 15), (1, 1, 1, 1)], [1e-5, 1e-9], backends),
)
def test_layer_norm(self, input_shape, eps, backend):
model = nn.LayerNorm(input_shape, eps=eps)
run_compare_torch(input_shape, model, backend=backend)
class TestPixelShuffle:
@pytest.mark.parametrize(
"batch_size, CHW, r, backend",
itertools.product([1, 3], [(1, 4, 4), (3, 2, 3)], [2, 4], backends),
)
def test_pixel_shuffle(self, batch_size, CHW, r, backend):
C, H, W = CHW
input_shape = (batch_size, C * r * r, H, W)
model = nn.PixelShuffle(upscale_factor=r)
run_compare_torch(input_shape, model, backend=backend)
class TestExpand:
@pytest.mark.parametrize(
"backend, shapes",
itertools.product(
backends,
[[(2, 1), (2, 2)], [(3, 1), (-1, 4)], [(1, 3, 4, 4), (3, 3, 4, 4)]]
),
)
def test_expand(self, backend, shapes):
input_shape, output_shape = shapes
class TestModel(torch.nn.Module):
def forward(self, x):
return x.expand(*output_shape)
model = TestModel()
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"backend, input_shapes",
itertools.product(
backends,
[[(2, 1), (2, 2)], [(3, 1), (3, 4)], [(1, 3, 4, 4), (3, 3, 4, 4)]]
),
)
def test_expand_as(self, backend, input_shapes):
class TestModel(torch.nn.Module):
def forward(self, x, y):
return x.expand_as(y)
model = TestModel()
run_compare_torch(input_shapes, model, backend=backend)
class TestExpandDims:
@pytest.mark.parametrize(
"backend, rank_and_axis",
itertools.product(
backends,
[
(rank, axis)
for rank in range(1, 5)
for axis in range(-rank - 1, rank + 1)
],
),
)
def test_unsqueeze(self, backend, rank_and_axis):
rank, axis = rank_and_axis
input_shape = tuple(np.random.randint(low=2, high=10, size=rank))
model = ModuleWrapper(function=torch.unsqueeze, kwargs={"dim": axis})
run_compare_torch(input_shape, model, backend=backend)
class TestSqueeze:
@pytest.mark.parametrize(
"backend, rank_and_axis",
itertools.product(
backends,
[(2, 1), (2, 0), (3, 1), (3, None), (4, None), (4, 2), (5, None), (5, -1),],
),
)
def test_squeeze(self, backend, rank_and_axis):
rank, axis = rank_and_axis
input_shape = list(np.random.randint(low=2, high=10, size=rank))
if axis is not None:
input_shape[axis] = 1
else:
input_shape[0] = 1
input_shape = tuple(input_shape)
model = ModuleWrapper(
function=torch.squeeze, kwargs={"dim": axis} if axis else {}
)
run_compare_torch(input_shape, model, backend=backend)
class TestCumSum:
@pytest.mark.parametrize(
"backend, axis",
itertools.product(
backends,
[-1, 0, 1, 2, 3],
),
)
def test_cumsum(self, backend, axis):
input_shape = list(np.random.randint(low=2, high=10, size=4))
input_shape = tuple(input_shape)
model = ModuleWrapper(
function=torch.cumsum, kwargs={"dim": axis}
)
run_compare_torch(input_shape, model, backend=backend)
class TestReshape:
# TODO: <rdar://66239973> Add dynamic & rank preserving reshape tests for pytorch
@pytest.mark.parametrize(
"backend, output_shape",
itertools.product(backends, [(3, 2), (2, -1), (2, 1, 1, 3),],),
)
def test_reshape(self, backend, output_shape):
input_shape = (2, 3)
model = ModuleWrapper(function=torch.reshape, kwargs={"shape": output_shape})
run_compare_torch(input_shape, model, backend=backend)
class TestFlatten:
@pytest.mark.parametrize(
"backend, start_dim",
itertools.product(backends, [2,-2],),
)
def test_reshape(self, backend, start_dim):
input_shape = (2, 3, 4, 5)
model = ModuleWrapper(function=torch.flatten, kwargs={"start_dim": start_dim})
run_compare_torch(input_shape, model, backend=backend)
class TestGather:
@pytest.mark.xfail(
reason="Load constant not copied properly for integer valued constants. Enable after eng/PR-65551506 is merged",
run=False,
)
@pytest.mark.parametrize(
"rank_and_axis, backend",
itertools.product([(i, j) for i in range(1, 6) for j in range(0, i)], backends),
)
def test_gather_along_axis(self, rank_and_axis, backend):
rank, axis = rank_and_axis
params_shape = np.random.randint(low=2, high=5, size=rank)
indices_shape = np.copy(params_shape)
indices_shape[axis] = np.random.randint(low=1, high=8)
indices = np.random.randint(0, params_shape[axis], size=indices_shape)
params_shape, indices_shape = tuple(params_shape), tuple(indices_shape)
model = ModuleWrapper(
function=torch.gather,
kwargs={"dim": axis, "index": torch.from_numpy(indices)},
)
run_compare_torch([params_shape], model, backend=backend)
class TestActivation:
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_relu(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.ReLU().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
model = ModuleWrapper(nn.functional.relu_)
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_relu6(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.ReLU6().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, alpha", itertools.product(backends, [0.1, 0.25, 2.0]),
)
def test_prelu(self, backend, alpha):
input_shape = tuple(np.random.randint(low=5, high=10, size=4))
C = input_shape[1]
model = nn.PReLU(C, alpha).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, alpha",
itertools.product(backends, range(1, 6), [0.1, 2.0, 1.5]),
)
def test_leaky_relu(self, backend, rank, alpha):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.LeakyReLU(negative_slope=alpha).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
model = ModuleWrapper(nn.functional.leaky_relu_, {'negative_slope': alpha})
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_softmax(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.Softmax().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, range_val",
itertools.product(
backends, range(1, 6), [(-1.0, 1.0), (0.0, 0.1), (1.0, 3.0), (-1.0, 6.0)]
),
)
def test_hardtanh(self, backend, rank, range_val):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.Hardtanh(range_val[0], range_val[1]).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
model = ModuleWrapper(nn.functional.hardtanh_,
{'min_val': range_val[0], 'max_val': range_val[1]})
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, alpha",
itertools.product(backends, range(1, 6), [0.1, 2.0, 1.5]),
)
def test_elu(self, backend, rank, alpha):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.ELU(alpha).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
# rdar://problem/66557565
@pytest.mark.parametrize(
"backend, rank", itertools.product(['nn_proto'], range(1, 6)),
)
def test_gelu(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.GELU().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.skipif(_python_version() < (3, 6), reason="requires python 3.6")
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_erf(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
class ERFActivation(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.erf(x)
model = ERFActivation().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_sigmoid(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.Sigmoid().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.skipif(_python_version() < (3, 6), reason="requires python 3.6")
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_sigmoid_hard(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.Hardsigmoid().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, beta, threshold", itertools.product(backends, [1, 2, 5], [5, 10, 20]),
)
@pytest.mark.skipif(
_macos_version() <= (11,),
reason="Parametric SoftPlus segfaults on macOS 10.15 and below. (rdar://problem/66555235)",
)
def test_softplus(self, backend, beta, threshold):
input_shape = (1, 10, 5, 15)
model = nn.Softplus(beta, threshold).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
# rdar://problem/66557565
@pytest.mark.parametrize(
"backend, rank", itertools.product(['nn_proto'], range(1, 6)),
)
def test_softsign(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.Softsign().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
class TestElementWiseUnary:
@pytest.mark.parametrize(
"backend, rank, op_string",
itertools.product(
backends,
[4],
[
"abs",
"acos",
"asin",
"atan",
"ceil",
"cos",
"cosh",
"exp",
"floor",
"round",
"sin",
"sinh",
"sqrt",
"square",
"tan",
"tanh",
"sign",
],
),
)
def test_elementwise_no_params(self, backend, rank, op_string):
if not contains_op(torch, op_string):
return
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
op_func = getattr(torch, op_string)
model = ModuleWrapper(function=op_func)
run_compare_torch(
input_shape, model, backend=backend,
)
## TODO (rdar://66577921): Needs to move to test_elementwise_no_params after backend is added
@pytest.mark.parametrize(
"backend, rank",
itertools.product(
['nn_proto'],
[4],
),
)
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6 or higher")
def test_square(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = ModuleWrapper(function=torch.square)
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, clamp_range",
itertools.product(
backends,
[4],
[(0.0, 1.0), (-1.0, 0.5), (0.2, 0.7)],
),
)
def test_clamp(self, backend, rank, clamp_range):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = ModuleWrapper(torch.clamp, {'min': clamp_range[0], 'max': clamp_range[1]})
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, threshold",
itertools.product(
['nn_proto'], # rdar://66597974 Renable for all backends due to missing cast
[4],
[(0.0, 0.0), (0.5, 0.5), (0.5, 10), (0.9, 0.0)]
),
)
def test_threshold(self, backend, rank, threshold):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = torch.nn.Threshold(threshold[0], threshold[1]).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, op_string",
itertools.product(
backends,
[4],
[
"log",
"rsqrt",
"reciprocal",
],
),
)
def test_elementwise_numerically_stable(self, backend, rank, op_string):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
op_func = getattr(torch, op_string)
model = ModuleWrapper(function=op_func)
run_compare_torch(
input_shape, model, backend=backend, rand_range=(20, 100)
)
class TestMatMul:
@pytest.mark.parametrize("backend", backends)
def test_bmm(self, backend):
shape_x, shape_y = (3,4,5), (3,5,6)
model = ModuleWrapper(function=torch.bmm)
run_compare_torch(
[shape_x, shape_y], model, backend=backend,
)
class TestSplit:
@pytest.mark.parametrize(
"backend, split_size_or_sections, dim",
itertools.product(backends, [1, 2, [1, 4]], [0, -2]),
)
def test_split(self, backend, split_size_or_sections, dim):
input_shape = (5, 2)
model = ModuleWrapper(function=torch.split,
kwargs={"split_size_or_sections": split_size_or_sections, "dim": dim})
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"backend, split_sizes, dim",
itertools.product(backends, [[1, 4], [3, 2]], [-1, -2]),
)
def test_split_with_sizes(self, backend, split_sizes, dim):
input_shape = (5, 5)
model = ModuleWrapper(function=torch.split_with_sizes,
kwargs={"split_sizes": split_sizes, "dim": dim})
run_compare_torch(input_shape, model, backend=backend)
class TestUnbind:
@pytest.mark.parametrize(
"backend, dim",
itertools.product(backends,[0,1,2]),
)
def test_unbind(self, backend, dim):
input_shape = (3, 3, 4)
model = ModuleWrapper(function=torch.unbind,
kwargs={"dim": dim})
run_compare_torch(input_shape, model, backend=backend)
class TestTranspose:
@pytest.mark.parametrize(
"backend, rank, dims",
itertools.product(backends, list(range(2, 6)),
[(0, 1), (-2, -1), (1, 0), (-1, -2)]),
)
def test(self, backend, rank, dims):
input_shape = tuple(np.random.randint(low=1, high=4, size=rank))
model = ModuleWrapper(function=torch.transpose,
kwargs={"dim0": dims[0], "dim1": dims[1]})
run_compare_torch(input_shape, model, backend=backend)
class TestTo:
@pytest.mark.parametrize(
"backend", backends,
)
def test_cast_bug(self, backend):
class TestModel(torch.nn.Module):
def forward(self, spans, embedding):
spans = spans.float().relu().int()
max1, _ = torch.max(spans, dim=1, keepdim=False)
max1, _ = torch.max(max1, dim=1, keepdim=False)
max2, _ = torch.max(embedding, dim=1, keepdim=False)
max2, _ = torch.max(max2, dim=1, keepdim=False)
sigmoided_scores = max1 + max2
return sigmoided_scores
model = TestModel()
run_compare_torch([(1, 21, 2), (1, 6, 384)], model, backend=backend)# [spans.shape, embedding.shape]
class TestSlice:
@pytest.mark.skipif(_python_version() < (3, 6), reason="requires python 3.6")
@pytest.mark.parametrize(
"backend", backends,
)
def test_dynamic_slice(self, backend):
class DynamicSlicer(torch.nn.Module):
def __init__(self):
super(DynamicSlicer, self).__init__()
def forward(self, x, context_length):
return x[context_length:, :, :]
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.tokens_embedding = torch.nn.Embedding(10, 10, 0)
self.context_embedding = torch.nn.Embedding(10, 10, 0)
self.dynamic_slicer = DynamicSlicer()
def forward(self, tokens, context, context_length):
tokens_embeddings = self.tokens_embedding(tokens)
context_embeddings = self.context_embedding(context)
embeddings = torch.cat((context_embeddings, tokens_embeddings), dim=0)
embeddings = self.dynamic_slicer(embeddings, context_length)
return embeddings
model = Model()
batch_size = 5
inputs = [ TensorType(name="tokens", shape=(10, batch_size), dtype=np.int64),
TensorType(name="context", shape=(3, batch_size), dtype=np.int64),
TensorType(name="context_length", shape=(), dtype=np.int32),
]
run_compare_torch(inputs, model, rand_range=(0, 8), backend=backend, use_scripting=False)
class TestRepeat:
@pytest.mark.parametrize(
"backend, rank",
itertools.product(backends, list(range(1, 6))),
)
def test_repeat(self, backend, rank):
input_shape = np.random.randint(low=2, high=6, size=rank)
repeats = np.random.randint(low=2, high=4, size=rank)
input_shape = tuple(input_shape)
model = ModuleWrapper(function=lambda x: x.repeat(*repeats))
run_compare_torch(input_shape, model, backend=backend)
class TestStd:
@pytest.mark.parametrize(
"backend, unbiased",
itertools.product(backends, [True, False]),
)
def test_std_2_inputs(self, backend, unbiased):
model = ModuleWrapper(function=torch.std,
kwargs={"unbiased": unbiased})
x = torch.randn(1, 5, 10) * 3
out = torch.std(x, unbiased=unbiased).unsqueeze(0)
run_compare_torch(x, model, expected_results=out, input_as_shape=False, backend=backend)
@pytest.mark.parametrize(
"backend, unbiased, dim, keepdim",
itertools.product(backends, [True, False], [[0,2], [1], [2]], [True, False]),
)
def test_std_4_inputs(self, backend, unbiased, dim, keepdim):
model = ModuleWrapper(function=torch.std,
kwargs={"unbiased": unbiased, "dim" : dim, "keepdim": keepdim})
input_shape = (2, 5, 10)
run_compare_torch(input_shape, model, backend=backend)
class TestTopk:
@pytest.mark.parametrize(
"backend, largest, shape_dim_k",
itertools.product(
backends,
[True, False],
[
((4, 6, 7, 3), -1, 2),
((10, 3, 4), 2, 2),
((10, 5), -2, 3),
((5,), 0, 2)
],
),
)
def test_topk(self, backend, largest, shape_dim_k):
input_shape = shape_dim_k[0]
dim = shape_dim_k[1]
k = shape_dim_k[2]
class TopkModel(nn.Module):
def __init__(self):
super(TopkModel, self).__init__()
def forward(self, x):
return torch.topk(x, k, dim=dim, largest=largest)
input_data = torch.rand(input_shape)
model = TopkModel()
expected_results = model(input_data)
expected_results = [expected_results.values, expected_results.indices]
run_compare_torch(
input_data,
model,
expected_results=expected_results,
input_as_shape=False,
backend=backend,
) | true | true |
f72ce252a89798bc51b81ba3b3a05a173b92e02c | 8,096 | py | Python | Natural Language Processing with Attention Models/Week 4 - Chatbot/w4_unittest.py | meet-seth/Coursera-Deep-Learning | 6fbf9d406468c825ffa1ff2e177dbfd43084bace | [
"MIT"
] | 362 | 2020-10-08T07:34:25.000Z | 2022-03-30T05:11:30.000Z | NLP/Learn_by_deeplearning.ai/Course 4 - Attention Models /Labs/Week 4/w4_unittest.py | abcd1758323829/skills | 195fad43e99de5efe6491817ad2b79e12665cc2a | [
"MIT"
] | 7 | 2020-07-07T16:10:23.000Z | 2021-06-04T08:17:55.000Z | NLP/Learn_by_deeplearning.ai/Course 4 - Attention Models /Labs/Week 4/w4_unittest.py | abcd1758323829/skills | 195fad43e99de5efe6491817ad2b79e12665cc2a | [
"MIT"
] | 238 | 2020-10-08T12:01:31.000Z | 2022-03-25T08:10:42.000Z | import numpy as np
import trax
#from trax import layers as tl
#from trax.fastmath import numpy as fastnp
#from trax.supervised import training
# UNIT TEST for UNQ_C1
def test_get_conversation(target):
data = {'file1.json': {'log':[{'text': 'hi'},
{'text': 'hello'},
{'text': 'nice'}]},
'file2.json':{'log':[{'text': 'a b'},
{'text': ''},
{'text': 'good '},
{'text': 'no?'}]}}
res1 = target('file1.json', data)
res2 = target('file2.json', data)
expected1 = ' Person 1: hi Person 2: hello Person 1: nice'
expected2 = ' Person 1: a b Person 2: Person 1: good Person 2: no?'
success = 0
fails = 0
try:
assert res1 == expected1
success += 1
except ValueError:
print('Error in test 1 \nResult : ', res1, 'x \nExpected: ', expected1)
fails += 1
try:
assert res2 == expected2
success += 1
except:
print('Error in test 2 \nResult : ', res2, ' \nExpected: ', expected2)
fails += 1
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
# UNIT TEST for UNQ_C2
def test_reversible_layer_forward(target):
f1 = lambda x: x + 2
g1 = lambda x: x * 3
f2 = lambda x: x + 1
g2 = lambda x: x * 2
input_vector1 = np.array([1, 2, 3, 4, 5, 6, 7, 8])
expected1 = np.array([8, 10, 12, 14, 29, 36, 43, 50])
input_vector2 = np.array([1] * 128)
expected2 = np.array([3] * 64 + [7] * 64)
success = 0
fails = 0
try:
res = target(input_vector1, f1, g1)
assert isinstance(res, np.ndarray)
success += 1
except:
print('Wrong type! Output is not of type np.ndarray')
fails += 1
try:
res = target(input_vector1, f1, g1)
assert np.allclose(res, expected1)
success += 1
except ValueError:
print('Error in test 1 \nResult : ', res, 'x \nExpected: ', expected1)
fails += 1
try:
res = target(input_vector2, f2, g2)
assert np.allclose(res, expected2)
success += 1
except:
print('Error in test 2 \nResult : ', res, ' \nExpected: ', expected2)
fails += 1
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
# UNIT TEST for UNQ_C3
def test_reversible_layer_reverse(target):
f1 = lambda x: x + 2
g1 = lambda x: x * 3
f2 = lambda x: x + 1
g2 = lambda x: x * 2
input_vector1 = np.array([1, 2, 3, 4, 5, 6, 7, 8])
expected1 = np.array([-3, 0, 3, 6, 2, 0, -2, -4])
input_vector2 = np.array([1] * 128)
expected2 = np.array([1] * 64 + [-1] * 64)
success = 0
fails = 0
try:
res = target(input_vector1, f1, g1)
assert isinstance(res, np.ndarray)
success += 1
except:
print('Wrong type! Output is not of type np.ndarray')
fails += 1
try:
res = target(input_vector1, f1, g1)
assert np.allclose(res, expected1)
success += 1
except ValueError:
print('Error in test 1 \nResult : ', res, 'x \nExpected: ', expected1)
fails += 1
try:
res = target(input_vector2, f2, g2)
assert np.allclose(res, expected2)
success += 1
except:
print('Error in test 2 \nResult : ', res, ' \nExpected: ', expected2)
fails += 1
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
# UNIT TEST for UNQ_C4
def test_ReformerLM(target):
test_cases = [
{
"name":"layer_len_check",
"expected":11,
"error":"We found {} layers in your model. It should be 11.\nCheck the LSTM stack before the dense layer"
},
{
"name":"simple_test_check",
"expected":"Serial[ShiftRight(1)Embedding_train_512DropoutPositionalEncodingDup_out2ReversibleSerial_in2_out2[ReversibleHalfResidualV2_in2_out2[Serial[LayerNorm]SelfAttention]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNormDense_2048DropoutFastGeluDense_512Dropout]]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNorm]SelfAttention]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNormDense_2048DropoutFastGeluDense_512Dropout]]ReversibleSwap_in2_out2]Concatenate_in2LayerNormDropoutDense_trainLogSoftmax]",
"error":"The ReformerLM is not defined properly."
}
]
temp_model = target('train')
success = 0
fails = 0
for test_case in test_cases:
try:
if test_case['name'] == "simple_test_check":
assert test_case["expected"] == str(temp_model).replace(' ', '').replace('\n','')
success += 1
if test_case['name'] == "layer_len_check":
if test_case["expected"] == len(temp_model.sublayers):
success += 1
else:
print(test_case["error"].format(len(temp_model.sublayers)))
fails += 1
except:
print(test_case['error'])
fails += 1
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
# UNIT TEST for UNQ_C5
def test_tasks(train_task, eval_task):
target = train_task
success = 0
fails = 0
# Test the labeled data parameter for train_task
try:
strlabel = str(target._labeled_data)
assert ("generator" in strlabel) and ("add_loss_weights" in strlabel)
success += 1
except:
fails += 1
print("Wrong labeled data parameter in train_task")
# Test the cross entropy loss data parameter
try:
strlabel = str(target._loss_layer)
assert(strlabel == "CrossEntropyLoss_in3")
success += 1
except:
fails += 1
print("Wrong loss functions. CrossEntropyLoss_in3 was expected")
# Test the optimizer parameter
try:
assert(isinstance(target.optimizer, trax.optimizers.adam.Adam))
success += 1
except:
fails += 1
print("Wrong optimizer")
# Test the schedule parameter
try:
assert(isinstance(target._lr_schedule,trax.supervised.lr_schedules._BodyAndTail))
success += 1
except:
fails += 1
print("Wrong learning rate schedule type")
# Test the _n_steps_per_checkpoint parameter
try:
assert(target._n_steps_per_checkpoint==10)
success += 1
except:
fails += 1
print("Wrong checkpoint step frequency")
target = eval_task
# Test the labeled data parameter for eval_task
try:
strlabel = str(target._labeled_data)
assert ("generator" in strlabel) and ("add_loss_weights" in strlabel)
success += 1
except:
fails += 1
print("Wrong labeled data parameter in eval_task")
# Test the metrics in eval_task
try:
strlabel = str(target._metrics).replace(' ', '')
assert(strlabel == "[CrossEntropyLoss_in3,Accuracy_in3]")
success += 1
except:
fails += 1
print(f"Wrong metrics. found {strlabel} but expected [CrossEntropyLoss_in3,Accuracy_in3]")
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
| 31.874016 | 580 | 0.561018 | import numpy as np
import trax
def test_get_conversation(target):
data = {'file1.json': {'log':[{'text': 'hi'},
{'text': 'hello'},
{'text': 'nice'}]},
'file2.json':{'log':[{'text': 'a b'},
{'text': ''},
{'text': 'good '},
{'text': 'no?'}]}}
res1 = target('file1.json', data)
res2 = target('file2.json', data)
expected1 = ' Person 1: hi Person 2: hello Person 1: nice'
expected2 = ' Person 1: a b Person 2: Person 1: good Person 2: no?'
success = 0
fails = 0
try:
assert res1 == expected1
success += 1
except ValueError:
print('Error in test 1 \nResult : ', res1, 'x \nExpected: ', expected1)
fails += 1
try:
assert res2 == expected2
success += 1
except:
print('Error in test 2 \nResult : ', res2, ' \nExpected: ', expected2)
fails += 1
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
def test_reversible_layer_forward(target):
f1 = lambda x: x + 2
g1 = lambda x: x * 3
f2 = lambda x: x + 1
g2 = lambda x: x * 2
input_vector1 = np.array([1, 2, 3, 4, 5, 6, 7, 8])
expected1 = np.array([8, 10, 12, 14, 29, 36, 43, 50])
input_vector2 = np.array([1] * 128)
expected2 = np.array([3] * 64 + [7] * 64)
success = 0
fails = 0
try:
res = target(input_vector1, f1, g1)
assert isinstance(res, np.ndarray)
success += 1
except:
print('Wrong type! Output is not of type np.ndarray')
fails += 1
try:
res = target(input_vector1, f1, g1)
assert np.allclose(res, expected1)
success += 1
except ValueError:
print('Error in test 1 \nResult : ', res, 'x \nExpected: ', expected1)
fails += 1
try:
res = target(input_vector2, f2, g2)
assert np.allclose(res, expected2)
success += 1
except:
print('Error in test 2 \nResult : ', res, ' \nExpected: ', expected2)
fails += 1
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
def test_reversible_layer_reverse(target):
f1 = lambda x: x + 2
g1 = lambda x: x * 3
f2 = lambda x: x + 1
g2 = lambda x: x * 2
input_vector1 = np.array([1, 2, 3, 4, 5, 6, 7, 8])
expected1 = np.array([-3, 0, 3, 6, 2, 0, -2, -4])
input_vector2 = np.array([1] * 128)
expected2 = np.array([1] * 64 + [-1] * 64)
success = 0
fails = 0
try:
res = target(input_vector1, f1, g1)
assert isinstance(res, np.ndarray)
success += 1
except:
print('Wrong type! Output is not of type np.ndarray')
fails += 1
try:
res = target(input_vector1, f1, g1)
assert np.allclose(res, expected1)
success += 1
except ValueError:
print('Error in test 1 \nResult : ', res, 'x \nExpected: ', expected1)
fails += 1
try:
res = target(input_vector2, f2, g2)
assert np.allclose(res, expected2)
success += 1
except:
print('Error in test 2 \nResult : ', res, ' \nExpected: ', expected2)
fails += 1
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
def test_ReformerLM(target):
test_cases = [
{
"name":"layer_len_check",
"expected":11,
"error":"We found {} layers in your model. It should be 11.\nCheck the LSTM stack before the dense layer"
},
{
"name":"simple_test_check",
"expected":"Serial[ShiftRight(1)Embedding_train_512DropoutPositionalEncodingDup_out2ReversibleSerial_in2_out2[ReversibleHalfResidualV2_in2_out2[Serial[LayerNorm]SelfAttention]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNormDense_2048DropoutFastGeluDense_512Dropout]]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNorm]SelfAttention]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNormDense_2048DropoutFastGeluDense_512Dropout]]ReversibleSwap_in2_out2]Concatenate_in2LayerNormDropoutDense_trainLogSoftmax]",
"error":"The ReformerLM is not defined properly."
}
]
temp_model = target('train')
success = 0
fails = 0
for test_case in test_cases:
try:
if test_case['name'] == "simple_test_check":
assert test_case["expected"] == str(temp_model).replace(' ', '').replace('\n','')
success += 1
if test_case['name'] == "layer_len_check":
if test_case["expected"] == len(temp_model.sublayers):
success += 1
else:
print(test_case["error"].format(len(temp_model.sublayers)))
fails += 1
except:
print(test_case['error'])
fails += 1
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
def test_tasks(train_task, eval_task):
target = train_task
success = 0
fails = 0
try:
strlabel = str(target._labeled_data)
assert ("generator" in strlabel) and ("add_loss_weights" in strlabel)
success += 1
except:
fails += 1
print("Wrong labeled data parameter in train_task")
try:
strlabel = str(target._loss_layer)
assert(strlabel == "CrossEntropyLoss_in3")
success += 1
except:
fails += 1
print("Wrong loss functions. CrossEntropyLoss_in3 was expected")
try:
assert(isinstance(target.optimizer, trax.optimizers.adam.Adam))
success += 1
except:
fails += 1
print("Wrong optimizer")
try:
assert(isinstance(target._lr_schedule,trax.supervised.lr_schedules._BodyAndTail))
success += 1
except:
fails += 1
print("Wrong learning rate schedule type")
try:
assert(target._n_steps_per_checkpoint==10)
success += 1
except:
fails += 1
print("Wrong checkpoint step frequency")
target = eval_task
try:
strlabel = str(target._labeled_data)
assert ("generator" in strlabel) and ("add_loss_weights" in strlabel)
success += 1
except:
fails += 1
print("Wrong labeled data parameter in eval_task")
try:
strlabel = str(target._metrics).replace(' ', '')
assert(strlabel == "[CrossEntropyLoss_in3,Accuracy_in3]")
success += 1
except:
fails += 1
print(f"Wrong metrics. found {strlabel} but expected [CrossEntropyLoss_in3,Accuracy_in3]")
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
| true | true |
f72ce29ddb1dc2f405d1811a6553b8fcc76db122 | 7,445 | py | Python | many_requests/many_requests_.py | 0xflotus/many_requests | dab3963eff471669f7b372cf488a2d9623270fab | [
"MIT"
] | null | null | null | many_requests/many_requests_.py | 0xflotus/many_requests | dab3963eff471669f7b372cf488a2d9623270fab | [
"MIT"
] | null | null | null | many_requests/many_requests_.py | 0xflotus/many_requests | dab3963eff471669f7b372cf488a2d9623270fab | [
"MIT"
] | null | null | null |
import logging
from json import JSONDecodeError
from typing import List, Optional, Dict, Union
import asks
import trio
from asks.errors import BadHttpResponse
from asks.response_objects import Response
from h11 import RemoteProtocolError
from .easy_async import EasyAsync, delayed, zip_kw
from .common import BadResponse, N_WORKERS_DEFAULT, N_CONNECTIONS_DEFAULT, is_collection
class ManyRequests:
def __init__(
self,
n_workers=N_WORKERS_DEFAULT,
n_connections=N_CONNECTIONS_DEFAULT,
retries=10,
retry_sleep=3,
ok_codes=(200,),
ok_response_func=None,
json=False,
):
"""
Dead easy interface for executing many HTTP requests asynchronously.
Args:
n_workers: Max number of workers to use. Too many workers will use a lot of memory and increase startup
time, too few can lead to slower execution.
n_connections: Max number of open connections to have open at once. The number of connections is also
limited by the OS. For example, by default MacOS has a limit of 256 and Ubuntu has ~66k. These limits
can be changed with OS configuration.
retries: Number of retries to attempt if a request fails
retry_sleep: How long to wait in seconds before retrying a request
ok_codes: A sequence of HTTP status codes to accept as ok. If `any`, all responses will be assumed to be ok
ok_response_func: A function to apply to the response to determine if ok. Should return True/False.
json: Parse response body as json and return instead of full Responce object
Examples:
Execute 10 GET requests to https://example.org
>>> responses = ManyRequests(n_workers=5, n_connections=5)(
>>> method='GET', url=[f'https://example.org' for i in range(10)])
"""
self.n_workers = n_workers
self.n_connections = n_connections
self.session = None
self.retries = retries
self.retry_sleep = retry_sleep
self.ok_codes = ok_codes
self.ok_response_func = ok_response_func
self.json = json
self.requests = None
self.responses = None
def __call__(
self,
method: Union[str, List[str]],
url: Union[str, List[str]],
params=None,
data=None,
json=None,
headers=None,
cookies=None,
auth=None,
) -> List[Union[Response, BadResponse]]:
"""
Process asynchronously many requests, handling bad responses. Return the responses in the same order.
If no ok response was obtained after retires a `BadResponse` will be included in the corresponding position of
the output. A `BadResponse` will contain the last response and error reason.
Arguments mimic `asks.request`_, which in turn mimics `requests.request`_.
Each argument could be a single item or a list of items. When they are a single item, that attribute is
duplicated for every request.
Args:
method: HTTP method type `GET`, `OPTIONS`, `HEAD`, `POST`, `PUT`, `PATCH`, or `DELETE`.
url: URL of the Request
params: Dictionary, list of tuples or bytes to send in the query string of the Request
data: Dictionary, list of tuples, bytes, or file-like object to send in the body of the Request
json: A JSON serializable Python object to send in the body of the Request
headers: Dictionary of HTTP Headers to send with the Request
cookies: Dict or CookieJar object to send with the Request
auth: Auth tuple to enable Basic/Digest/Custom HTTP Auth
Returns:
responses: A list of responses in the same order of the requests. Will include a `BadResponse` in the
position of a request where no good response was obtained.
.. _asks.request:
https://asks.readthedocs.io/en/latest/overview-of-funcs-and-args.html
.. _requests.request:
https://2.python-requests.org/en/master/api/#requests.request
"""
length = None
for e in (method, url, params, data, json, headers, cookies, auth):
if not is_collection(e):
continue
try:
l = len(e)
if length is None or l < length:
length = l
except TypeError:
pass
self.session = asks.Session(connections=self.n_connections)
responses = EasyAsync(n_workers=self.n_workers)(
tasks=(
delayed(self._runner)(request_kwargs=kwargs)
for kwargs in zip_kw(
method=method,
url=url,
params=params,
data=data,
json=json,
headers=headers,
cookies=cookies,
auth=auth,
)
),
length=length,
)
return responses
async def _runner(self, request_kwargs):
"""Task which handles completing a HTTP request and errors that arise"""
last_error = None
for attempt_i in range(0, self.retries+1):
try:
try:
response = await self.session.request(**request_kwargs)
except RemoteProtocolError as e:
raise BadResponse('RemoteProtocolError', reason='RemoteProtocolError', attempt_num=attempt_i)
except BadHttpResponse as e:
raise BadResponse('BadHttpResponse', reason='BadHttpResponse', attempt_num=attempt_i)
if self.ok_codes != "any" and response.status_code not in self.ok_codes:
raise BadResponse(f"Bad response status code: {response.status_code}. Should be in {self.ok_codes}",
response=response, reason='bad_status_code', attempt_num=attempt_i)
if self.ok_response_func is not None and not self.ok_response_func(response):
raise BadResponse('Not OK response determined by `ok_response_func`', response=response,
reason='ok_response_func', attempt_num=attempt_i)
if self.json:
try:
response = response.json()
except JSONDecodeError as e:
raise BadResponse('Cannot decode JSON', response=response, reason='JSONDecodeError',
attempt_num=attempt_i)
logging.debug(f"OK Response {request_kwargs}")
return response
except BadResponse as e:
try:
code, text = response.status_code, response.text
except NameError:
code, text = None, None
logging.info(
f"BAD Response {request_kwargs}: Attempt {attempt_i}. Error {type(e).__name__}. Code: {code}. Body: {text}"
)
last_error = e
await trio.sleep(self.retry_sleep)
logging.warning(
f"FAILED Request {request_kwargs}: Permanently failed. Last error: {last_error.description}"
)
return last_error
| 41.361111 | 127 | 0.595567 |
import logging
from json import JSONDecodeError
from typing import List, Optional, Dict, Union
import asks
import trio
from asks.errors import BadHttpResponse
from asks.response_objects import Response
from h11 import RemoteProtocolError
from .easy_async import EasyAsync, delayed, zip_kw
from .common import BadResponse, N_WORKERS_DEFAULT, N_CONNECTIONS_DEFAULT, is_collection
class ManyRequests:
def __init__(
self,
n_workers=N_WORKERS_DEFAULT,
n_connections=N_CONNECTIONS_DEFAULT,
retries=10,
retry_sleep=3,
ok_codes=(200,),
ok_response_func=None,
json=False,
):
self.n_workers = n_workers
self.n_connections = n_connections
self.session = None
self.retries = retries
self.retry_sleep = retry_sleep
self.ok_codes = ok_codes
self.ok_response_func = ok_response_func
self.json = json
self.requests = None
self.responses = None
def __call__(
self,
method: Union[str, List[str]],
url: Union[str, List[str]],
params=None,
data=None,
json=None,
headers=None,
cookies=None,
auth=None,
) -> List[Union[Response, BadResponse]]:
length = None
for e in (method, url, params, data, json, headers, cookies, auth):
if not is_collection(e):
continue
try:
l = len(e)
if length is None or l < length:
length = l
except TypeError:
pass
self.session = asks.Session(connections=self.n_connections)
responses = EasyAsync(n_workers=self.n_workers)(
tasks=(
delayed(self._runner)(request_kwargs=kwargs)
for kwargs in zip_kw(
method=method,
url=url,
params=params,
data=data,
json=json,
headers=headers,
cookies=cookies,
auth=auth,
)
),
length=length,
)
return responses
async def _runner(self, request_kwargs):
last_error = None
for attempt_i in range(0, self.retries+1):
try:
try:
response = await self.session.request(**request_kwargs)
except RemoteProtocolError as e:
raise BadResponse('RemoteProtocolError', reason='RemoteProtocolError', attempt_num=attempt_i)
except BadHttpResponse as e:
raise BadResponse('BadHttpResponse', reason='BadHttpResponse', attempt_num=attempt_i)
if self.ok_codes != "any" and response.status_code not in self.ok_codes:
raise BadResponse(f"Bad response status code: {response.status_code}. Should be in {self.ok_codes}",
response=response, reason='bad_status_code', attempt_num=attempt_i)
if self.ok_response_func is not None and not self.ok_response_func(response):
raise BadResponse('Not OK response determined by `ok_response_func`', response=response,
reason='ok_response_func', attempt_num=attempt_i)
if self.json:
try:
response = response.json()
except JSONDecodeError as e:
raise BadResponse('Cannot decode JSON', response=response, reason='JSONDecodeError',
attempt_num=attempt_i)
logging.debug(f"OK Response {request_kwargs}")
return response
except BadResponse as e:
try:
code, text = response.status_code, response.text
except NameError:
code, text = None, None
logging.info(
f"BAD Response {request_kwargs}: Attempt {attempt_i}. Error {type(e).__name__}. Code: {code}. Body: {text}"
)
last_error = e
await trio.sleep(self.retry_sleep)
logging.warning(
f"FAILED Request {request_kwargs}: Permanently failed. Last error: {last_error.description}"
)
return last_error
| true | true |
f72ce2a2454d1eafcfe45c1437983329f74f1dde | 1,804 | py | Python | src/robot/parsing/restsupport.py | phil-davis/robotframework | 4d4ce686cbe01e293bb86ea6ff34330e8c45fc43 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2019-09-21T17:13:24.000Z | 2019-09-24T19:13:25.000Z | src/robot/parsing/restsupport.py | phil-davis/robotframework | 4d4ce686cbe01e293bb86ea6ff34330e8c45fc43 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/robot/parsing/restsupport.py | phil-davis/robotframework | 4d4ce686cbe01e293bb86ea6ff34330e8c45fc43 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-12-30T14:05:02.000Z | 2019-12-30T14:05:02.000Z | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import DataError
try:
from docutils.core import publish_doctree
from docutils.parsers.rst.directives import register_directive
from docutils.parsers.rst.directives.body import CodeBlock
except ImportError:
raise DataError("Using reStructuredText test data requires having "
"'docutils' module version 0.9 or newer installed.")
class CaptureRobotData(CodeBlock):
def run(self):
if 'robotframework' in self.arguments:
store = RobotDataStorage(self.state_machine.document)
store.add_data(self.content)
return []
register_directive('code', CaptureRobotData)
register_directive('code-block', CaptureRobotData)
register_directive('sourcecode', CaptureRobotData)
class RobotDataStorage(object):
def __init__(self, doctree):
if not hasattr(doctree, '_robot_data'):
doctree._robot_data = []
self._robot_data = doctree._robot_data
def add_data(self, rows):
self._robot_data.extend(rows)
def get_data(self):
return '\n'.join(self._robot_data)
def has_data(self):
return bool(self._robot_data)
| 32.214286 | 75 | 0.720621 |
from robot.errors import DataError
try:
from docutils.core import publish_doctree
from docutils.parsers.rst.directives import register_directive
from docutils.parsers.rst.directives.body import CodeBlock
except ImportError:
raise DataError("Using reStructuredText test data requires having "
"'docutils' module version 0.9 or newer installed.")
class CaptureRobotData(CodeBlock):
def run(self):
if 'robotframework' in self.arguments:
store = RobotDataStorage(self.state_machine.document)
store.add_data(self.content)
return []
register_directive('code', CaptureRobotData)
register_directive('code-block', CaptureRobotData)
register_directive('sourcecode', CaptureRobotData)
class RobotDataStorage(object):
def __init__(self, doctree):
if not hasattr(doctree, '_robot_data'):
doctree._robot_data = []
self._robot_data = doctree._robot_data
def add_data(self, rows):
self._robot_data.extend(rows)
def get_data(self):
return '\n'.join(self._robot_data)
def has_data(self):
return bool(self._robot_data)
| true | true |
f72ce2fc8328c2744c2230cbb122e8c573eb15fd | 3,512 | py | Python | app/app/settings.py | mlobina/recipe-app-API | 0ded3c37a84c109c469d1dd7db015e8d73d3e9f6 | [
"MIT"
] | null | null | null | app/app/settings.py | mlobina/recipe-app-API | 0ded3c37a84c109c469d1dd7db015e8d73d3e9f6 | [
"MIT"
] | null | null | null | app/app/settings.py | mlobina/recipe-app-API | 0ded3c37a84c109c469d1dd7db015e8d73d3e9f6 | [
"MIT"
] | 1 | 2021-08-25T06:29:11.000Z | 2021-08-25T06:29:11.000Z | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-sumebxzlqerp)6^8g!b%n-)r03)4pxwioril1^4igma-3_iw=c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
'HOST': os.environ.get('DB_HOST'),
"PORT": '5432'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
AUTH_USER_MODEL = 'core.User'
| 25.266187 | 91 | 0.691059 |
from pathlib import Path
import os
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'django-insecure-sumebxzlqerp)6^8g!b%n-)r03)4pxwioril1^4igma-3_iw=c'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
'HOST': os.environ.get('DB_HOST'),
"PORT": '5432'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
AUTH_USER_MODEL = 'core.User'
| true | true |
f72ce338215fc493ab34b010dba156b5b7042cc3 | 948 | py | Python | barbican/model/migration/alembic_migrations/versions/4ecde3a3a72a_add_cas_column_to_project_quotas_table.py | mail2nsrajesh/barbican | d16d932b77486e9b2f8c6d30e628a6e66517b1a6 | [
"Apache-2.0"
] | 1 | 2020-03-01T05:01:57.000Z | 2020-03-01T05:01:57.000Z | barbican/model/migration/alembic_migrations/versions/4ecde3a3a72a_add_cas_column_to_project_quotas_table.py | kkutysllb/barbican | 7b14d983e0dce6dcffe9781b05c52335b8203fc7 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | barbican/model/migration/alembic_migrations/versions/4ecde3a3a72a_add_cas_column_to_project_quotas_table.py | kkutysllb/barbican | 7b14d983e0dce6dcffe9781b05c52335b8203fc7 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add cas column to project quotas table
Revision ID: 4ecde3a3a72a
Revises: 10220ccbe7fa
Create Date: 2015-09-09 09:40:08.540064
"""
# revision identifiers, used by Alembic.
revision = '4ecde3a3a72a'
down_revision = '10220ccbe7fa'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'project_quotas',
sa.Column('cas', sa.Integer(), nullable=True))
| 27.882353 | 75 | 0.741561 |
revision = '4ecde3a3a72a'
down_revision = '10220ccbe7fa'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'project_quotas',
sa.Column('cas', sa.Integer(), nullable=True))
| true | true |
f72ce5c56cac97953d6d62c376de59376e33bee5 | 1,625 | py | Python | ParameterFiles/params_bootes_3as_ext.py | dunkenj/eazy-pype | 9cb8ac765d659ace36c00293a5809fc4a066e1ec | [
"MIT"
] | 1 | 2019-07-25T10:55:18.000Z | 2019-07-25T10:55:18.000Z | ParameterFiles/params_bootes_3as_ext.py | dunkenj/eazy-pype | 9cb8ac765d659ace36c00293a5809fc4a066e1ec | [
"MIT"
] | null | null | null | ParameterFiles/params_bootes_3as_ext.py | dunkenj/eazy-pype | 9cb8ac765d659ace36c00293a5809fc4a066e1ec | [
"MIT"
] | 1 | 2018-12-18T16:31:41.000Z | 2018-12-18T16:31:41.000Z | """
Main inputs:
(Change for all fields)
"""
eazypath = '/data2/ken/photoz/eazy-photoz/src/eazy'
working_folder = '/data2/ken/photoz/bootes_3as_ext'
photometry_catalog = 'Bootes_merged_Icorr_2014a_all_ap3_mags.zs.fits.mod'
photometry_format = 'fits'
filter_file = 'filter.bootes_mbrown_2014a.res'
translate_file = 'brown.zphot.2014.translate'
zspec_col = 'z_spec'
flux_col = 'flux'
fluxerr_col ='fluxerr'
do_zp = True
do_zp_tests = False
do_subcats = False
do_full = False
do_stellar = False
do_hb = False
do_merge = False
"""
Training parameters
"""
Ncrossval = 1
test_fraction = 0.2
process_outliers = False
correct_extinction = False
"""
Fitting Parameters
(Change only when needed)
"""
# Templates: Any combination of 'eazy', 'swire', 'atlas'
templates = ['eazy', 'atlas', 'cosmos']#, 'swire']#, 'cosmos', 'atlas'] #,'cosmos', 'atlas']
fitting_mode = ['a', '1', '1']
defaults = ['defaults/zphot.eazy',
'defaults/zphot.atlas_ext',
'defaults/zphot.cosmos']
#'defaults/zphot.eazy',
#'defaults/zphot.atlas',
#'defaults/zphot.swire']
stellar_params = 'defaults/zphot.pickles'
additional_errors = [0.0, 0.1, 0.1]
template_error_norm = [1., 0., 0.]
template_error_file = ''
lambda_fit_max = [5., 30., 30.]
"""
Combination Parameters
"""
include_prior = True
fbad_prior = 'mag' # 'flat', 'vol' or 'mag'
prior_parameter_path = 'bootes_I_prior_coeff.npz'
prior_fname = 'ch2_mag'
prior_colname = 'ch2_mag'
alpha_colname = 'I_mag'
gpz = False
"""
System Parameters
(Specific system only - fixed after installation)
"""
block_size = 1e4
ncpus = 10
| 18.895349 | 94 | 0.687385 | eazypath = '/data2/ken/photoz/eazy-photoz/src/eazy'
working_folder = '/data2/ken/photoz/bootes_3as_ext'
photometry_catalog = 'Bootes_merged_Icorr_2014a_all_ap3_mags.zs.fits.mod'
photometry_format = 'fits'
filter_file = 'filter.bootes_mbrown_2014a.res'
translate_file = 'brown.zphot.2014.translate'
zspec_col = 'z_spec'
flux_col = 'flux'
fluxerr_col ='fluxerr'
do_zp = True
do_zp_tests = False
do_subcats = False
do_full = False
do_stellar = False
do_hb = False
do_merge = False
Ncrossval = 1
test_fraction = 0.2
process_outliers = False
correct_extinction = False
templates = ['eazy', 'atlas', 'cosmos'],
'defaults/zphot.atlas_ext',
'defaults/zphot.cosmos']
stellar_params = 'defaults/zphot.pickles'
additional_errors = [0.0, 0.1, 0.1]
template_error_norm = [1., 0., 0.]
template_error_file = ''
lambda_fit_max = [5., 30., 30.]
include_prior = True
fbad_prior = 'mag'
prior_parameter_path = 'bootes_I_prior_coeff.npz'
prior_fname = 'ch2_mag'
prior_colname = 'ch2_mag'
alpha_colname = 'I_mag'
gpz = False
block_size = 1e4
ncpus = 10
| true | true |
f72ce61d9bb99f838eedbbc565639f110f2dfc86 | 14,763 | py | Python | tf_agents/bandits/agents/neural_linucb_agent_test.py | PeterDomanski/agents | 63c1c76f16f2068a637b26282c34a8825583e73e | [
"Apache-2.0"
] | 1 | 2021-07-16T04:44:19.000Z | 2021-07-16T04:44:19.000Z | tf_agents/bandits/agents/neural_linucb_agent_test.py | PeterDomanski/agents | 63c1c76f16f2068a637b26282c34a8825583e73e | [
"Apache-2.0"
] | null | null | null | tf_agents/bandits/agents/neural_linucb_agent_test.py | PeterDomanski/agents | 63c1c76f16f2068a637b26282c34a8825583e73e | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.bandits.agents.neural_linucb_agent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.bandits.agents import neural_linucb_agent
from tf_agents.bandits.agents import utils as bandit_utils
from tf_agents.bandits.drivers import driver_utils
from tf_agents.bandits.policies import policy_utilities
from tf_agents.networks import network
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import time_step
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import # TF internal
tfd = tfp.distributions
class DummyNet(network.Network):
def __init__(self, observation_spec, encoding_dim=10):
super(DummyNet, self).__init__(
observation_spec, state_spec=(), name='DummyNet')
context_dim = observation_spec.shape[0]
self._layers.append(
tf.keras.layers.Dense(
encoding_dim,
kernel_initializer=tf.compat.v1.initializers.constant(
np.ones([context_dim, encoding_dim])),
bias_initializer=tf.compat.v1.initializers.constant(
np.zeros([encoding_dim]))))
def call(self, inputs, step_type=None, network_state=()):
del step_type
inputs = tf.cast(inputs, tf.float32)
for layer in self.layers:
inputs = layer(inputs)
return inputs, network_state
def test_cases():
return parameterized.named_parameters(
{
'testcase_name': '_batch1_contextdim10',
'batch_size': 1,
'context_dim': 10,
}, {
'testcase_name': '_batch4_contextdim5',
'batch_size': 4,
'context_dim': 5,
})
def _get_initial_and_final_steps(batch_size, context_dim):
observation = np.array(range(batch_size * context_dim)).reshape(
[batch_size, context_dim])
reward = np.random.uniform(0.0, 1.0, [batch_size])
initial_step = time_step.TimeStep(
tf.constant(
time_step.StepType.FIRST, dtype=tf.int32, shape=[batch_size],
name='step_type'),
tf.constant(0.0, dtype=tf.float32, shape=[batch_size], name='reward'),
tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),
tf.constant(observation, dtype=tf.float32,
shape=[batch_size, context_dim], name='observation'))
final_step = time_step.TimeStep(
tf.constant(
time_step.StepType.LAST, dtype=tf.int32, shape=[batch_size],
name='step_type'),
tf.constant(reward, dtype=tf.float32, shape=[batch_size], name='reward'),
tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),
tf.constant(observation + 100.0, dtype=tf.float32,
shape=[batch_size, context_dim], name='observation'))
return initial_step, final_step
def _get_initial_and_final_steps_with_action_mask(batch_size,
context_dim,
num_actions=None):
observation = np.array(range(batch_size * context_dim)).reshape(
[batch_size, context_dim])
observation = tf.constant(observation, dtype=tf.float32)
mask = 1 - tf.eye(batch_size, num_columns=num_actions, dtype=tf.int32)
reward = np.random.uniform(0.0, 1.0, [batch_size])
initial_step = time_step.TimeStep(
tf.constant(
time_step.StepType.FIRST,
dtype=tf.int32,
shape=[batch_size],
name='step_type'),
tf.constant(0.0, dtype=tf.float32, shape=[batch_size], name='reward'),
tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),
(observation, mask))
final_step = time_step.TimeStep(
tf.constant(
time_step.StepType.LAST,
dtype=tf.int32,
shape=[batch_size],
name='step_type'),
tf.constant(reward, dtype=tf.float32, shape=[batch_size], name='reward'),
tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),
(observation + 100.0, mask))
return initial_step, final_step
def _get_action_step(action):
return policy_step.PolicyStep(
action=tf.convert_to_tensor(action),
info=policy_utilities.PolicyInfo())
def _get_experience(initial_step, action_step, final_step):
single_experience = driver_utils.trajectory_for_bandit(
initial_step, action_step, final_step)
# Adds a 'time' dimension.
return tf.nest.map_structure(
lambda x: tf.expand_dims(tf.convert_to_tensor(x), 1),
single_experience)
@test_util.run_all_in_graph_and_eager_modes
class NeuralLinUCBAgentTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(NeuralLinUCBAgentTest, self).setUp()
tf.compat.v1.enable_resource_variables()
@test_cases()
def testInitializeAgentNumTrainSteps0(self, batch_size, context_dim):
num_actions = 5
observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec)
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=0,
encoding_dim=10,
optimizer=None)
self.evaluate(agent.initialize())
@test_cases()
def testInitializeAgentNumTrainSteps10(self, batch_size, context_dim):
num_actions = 5
observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec)
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=10,
encoding_dim=10,
optimizer=None)
self.evaluate(agent.initialize())
@test_cases()
def testNeuralLinUCBUpdateNumTrainSteps0(self, batch_size=1, context_dim=10):
"""Check NeuralLinUCBAgent updates when behaving like LinUCB."""
# Construct a `Trajectory` for the given action, observation, reward.
num_actions = 5
initial_step, final_step = _get_initial_and_final_steps(
batch_size, context_dim)
action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)
action_step = _get_action_step(action)
experience = _get_experience(initial_step, action_step, final_step)
# Construct an agent and perform the update.
observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec)
encoding_dim = 10
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=0,
encoding_dim=encoding_dim,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=1e-2))
loss_info = agent.train(experience)
self.evaluate(agent.initialize())
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(loss_info)
final_a = self.evaluate(agent.cov_matrix)
final_b = self.evaluate(agent.data_vector)
# Compute the expected updated estimates.
observations_list = tf.dynamic_partition(
data=tf.reshape(tf.cast(experience.observation, tf.float64),
[batch_size, context_dim]),
partitions=tf.convert_to_tensor(action),
num_partitions=num_actions)
rewards_list = tf.dynamic_partition(
data=tf.reshape(tf.cast(experience.reward, tf.float64), [batch_size]),
partitions=tf.convert_to_tensor(action),
num_partitions=num_actions)
expected_a_updated_list = []
expected_b_updated_list = []
for _, (observations_for_arm, rewards_for_arm) in enumerate(zip(
observations_list, rewards_list)):
encoded_observations_for_arm, _ = encoder(observations_for_arm)
encoded_observations_for_arm = tf.cast(
encoded_observations_for_arm, dtype=tf.float64)
num_samples_for_arm_current = tf.cast(
tf.shape(rewards_for_arm)[0], tf.float64)
num_samples_for_arm_total = num_samples_for_arm_current
# pylint: disable=cell-var-from-loop
def true_fn():
a_new = tf.matmul(
encoded_observations_for_arm,
encoded_observations_for_arm,
transpose_a=True)
b_new = bandit_utils.sum_reward_weighted_observations(
rewards_for_arm, encoded_observations_for_arm)
return a_new, b_new
def false_fn():
return (tf.zeros([encoding_dim, encoding_dim], dtype=tf.float64),
tf.zeros([encoding_dim], dtype=tf.float64))
a_new, b_new = tf.cond(
tf.squeeze(num_samples_for_arm_total) > 0,
true_fn,
false_fn)
expected_a_updated_list.append(self.evaluate(a_new))
expected_b_updated_list.append(self.evaluate(b_new))
# Check that the actual updated estimates match the expectations.
self.assertAllClose(expected_a_updated_list, final_a)
self.assertAllClose(expected_b_updated_list, final_b)
@test_cases()
def testNeuralLinUCBUpdateNumTrainSteps10(self, batch_size=1, context_dim=10):
"""Check NeuralLinUCBAgent updates when behaving like eps-greedy."""
# Construct a `Trajectory` for the given action, observation, reward.
num_actions = 5
initial_step, final_step = _get_initial_and_final_steps(
batch_size, context_dim)
action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)
action_step = _get_action_step(action)
experience = _get_experience(initial_step, action_step, final_step)
# Construct an agent and perform the update.
observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec)
encoding_dim = 10
variable_collection = neural_linucb_agent.NeuralLinUCBVariableCollection(
num_actions, encoding_dim)
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=10,
encoding_dim=encoding_dim,
variable_collection=variable_collection,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.001))
loss_info, _ = agent.train(experience)
self.evaluate(agent.initialize())
self.evaluate(tf.compat.v1.global_variables_initializer())
loss_value = self.evaluate(loss_info)
self.assertGreater(loss_value, 0.0)
@test_cases()
def testNeuralLinUCBUpdateNumTrainSteps10MaskedActions(
self, batch_size=1, context_dim=10):
"""Check updates when behaving like eps-greedy and using masked actions."""
# Construct a `Trajectory` for the given action, observation, reward.
num_actions = 5
initial_step, final_step = _get_initial_and_final_steps_with_action_mask(
batch_size, context_dim, num_actions)
action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)
action_step = _get_action_step(action)
experience = _get_experience(initial_step, action_step, final_step)
# Construct an agent and perform the update.
observation_spec = (tensor_spec.TensorSpec([context_dim], tf.float32),
tensor_spec.TensorSpec([num_actions], tf.int32))
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec[0])
encoding_dim = 10
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=10,
encoding_dim=encoding_dim,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.001),
observation_and_action_constraint_splitter=lambda x: (x[0], x[1]))
loss_info, _ = agent.train(experience)
self.evaluate(agent.initialize())
self.evaluate(tf.compat.v1.global_variables_initializer())
loss_value = self.evaluate(loss_info)
self.assertGreater(loss_value, 0.0)
def testInitializeRestoreVariableCollection(self):
if not tf.executing_eagerly():
self.skipTest('Test only works in eager mode.')
num_actions = 5
encoding_dim = 7
variable_collection = neural_linucb_agent.NeuralLinUCBVariableCollection(
num_actions=num_actions, encoding_dim=encoding_dim)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(variable_collection.num_samples_list)
checkpoint = tf.train.Checkpoint(variable_collection=variable_collection)
checkpoint_dir = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_dir, 'checkpoint')
checkpoint.save(file_prefix=checkpoint_prefix)
variable_collection.actions_from_reward_layer.assign(False)
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
checkpoint_load_status = checkpoint.restore(latest_checkpoint)
self.evaluate(checkpoint_load_status.initialize_or_restore())
self.assertEqual(
self.evaluate(variable_collection.actions_from_reward_layer), True)
if __name__ == '__main__':
tf.test.main()
| 40.446575 | 110 | 0.721398 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.bandits.agents import neural_linucb_agent
from tf_agents.bandits.agents import utils as bandit_utils
from tf_agents.bandits.drivers import driver_utils
from tf_agents.bandits.policies import policy_utilities
from tf_agents.networks import network
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import time_step
from tensorflow.python.framework import test_util distributions
class DummyNet(network.Network):
def __init__(self, observation_spec, encoding_dim=10):
super(DummyNet, self).__init__(
observation_spec, state_spec=(), name='DummyNet')
context_dim = observation_spec.shape[0]
self._layers.append(
tf.keras.layers.Dense(
encoding_dim,
kernel_initializer=tf.compat.v1.initializers.constant(
np.ones([context_dim, encoding_dim])),
bias_initializer=tf.compat.v1.initializers.constant(
np.zeros([encoding_dim]))))
def call(self, inputs, step_type=None, network_state=()):
del step_type
inputs = tf.cast(inputs, tf.float32)
for layer in self.layers:
inputs = layer(inputs)
return inputs, network_state
def test_cases():
return parameterized.named_parameters(
{
'testcase_name': '_batch1_contextdim10',
'batch_size': 1,
'context_dim': 10,
}, {
'testcase_name': '_batch4_contextdim5',
'batch_size': 4,
'context_dim': 5,
})
def _get_initial_and_final_steps(batch_size, context_dim):
observation = np.array(range(batch_size * context_dim)).reshape(
[batch_size, context_dim])
reward = np.random.uniform(0.0, 1.0, [batch_size])
initial_step = time_step.TimeStep(
tf.constant(
time_step.StepType.FIRST, dtype=tf.int32, shape=[batch_size],
name='step_type'),
tf.constant(0.0, dtype=tf.float32, shape=[batch_size], name='reward'),
tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),
tf.constant(observation, dtype=tf.float32,
shape=[batch_size, context_dim], name='observation'))
final_step = time_step.TimeStep(
tf.constant(
time_step.StepType.LAST, dtype=tf.int32, shape=[batch_size],
name='step_type'),
tf.constant(reward, dtype=tf.float32, shape=[batch_size], name='reward'),
tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),
tf.constant(observation + 100.0, dtype=tf.float32,
shape=[batch_size, context_dim], name='observation'))
return initial_step, final_step
def _get_initial_and_final_steps_with_action_mask(batch_size,
context_dim,
num_actions=None):
observation = np.array(range(batch_size * context_dim)).reshape(
[batch_size, context_dim])
observation = tf.constant(observation, dtype=tf.float32)
mask = 1 - tf.eye(batch_size, num_columns=num_actions, dtype=tf.int32)
reward = np.random.uniform(0.0, 1.0, [batch_size])
initial_step = time_step.TimeStep(
tf.constant(
time_step.StepType.FIRST,
dtype=tf.int32,
shape=[batch_size],
name='step_type'),
tf.constant(0.0, dtype=tf.float32, shape=[batch_size], name='reward'),
tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),
(observation, mask))
final_step = time_step.TimeStep(
tf.constant(
time_step.StepType.LAST,
dtype=tf.int32,
shape=[batch_size],
name='step_type'),
tf.constant(reward, dtype=tf.float32, shape=[batch_size], name='reward'),
tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),
(observation + 100.0, mask))
return initial_step, final_step
def _get_action_step(action):
return policy_step.PolicyStep(
action=tf.convert_to_tensor(action),
info=policy_utilities.PolicyInfo())
def _get_experience(initial_step, action_step, final_step):
single_experience = driver_utils.trajectory_for_bandit(
initial_step, action_step, final_step)
return tf.nest.map_structure(
lambda x: tf.expand_dims(tf.convert_to_tensor(x), 1),
single_experience)
@test_util.run_all_in_graph_and_eager_modes
class NeuralLinUCBAgentTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(NeuralLinUCBAgentTest, self).setUp()
tf.compat.v1.enable_resource_variables()
@test_cases()
def testInitializeAgentNumTrainSteps0(self, batch_size, context_dim):
num_actions = 5
observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec)
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=0,
encoding_dim=10,
optimizer=None)
self.evaluate(agent.initialize())
@test_cases()
def testInitializeAgentNumTrainSteps10(self, batch_size, context_dim):
num_actions = 5
observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec)
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=10,
encoding_dim=10,
optimizer=None)
self.evaluate(agent.initialize())
@test_cases()
def testNeuralLinUCBUpdateNumTrainSteps0(self, batch_size=1, context_dim=10):
num_actions = 5
initial_step, final_step = _get_initial_and_final_steps(
batch_size, context_dim)
action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)
action_step = _get_action_step(action)
experience = _get_experience(initial_step, action_step, final_step)
observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec)
encoding_dim = 10
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=0,
encoding_dim=encoding_dim,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=1e-2))
loss_info = agent.train(experience)
self.evaluate(agent.initialize())
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(loss_info)
final_a = self.evaluate(agent.cov_matrix)
final_b = self.evaluate(agent.data_vector)
observations_list = tf.dynamic_partition(
data=tf.reshape(tf.cast(experience.observation, tf.float64),
[batch_size, context_dim]),
partitions=tf.convert_to_tensor(action),
num_partitions=num_actions)
rewards_list = tf.dynamic_partition(
data=tf.reshape(tf.cast(experience.reward, tf.float64), [batch_size]),
partitions=tf.convert_to_tensor(action),
num_partitions=num_actions)
expected_a_updated_list = []
expected_b_updated_list = []
for _, (observations_for_arm, rewards_for_arm) in enumerate(zip(
observations_list, rewards_list)):
encoded_observations_for_arm, _ = encoder(observations_for_arm)
encoded_observations_for_arm = tf.cast(
encoded_observations_for_arm, dtype=tf.float64)
num_samples_for_arm_current = tf.cast(
tf.shape(rewards_for_arm)[0], tf.float64)
num_samples_for_arm_total = num_samples_for_arm_current
def true_fn():
a_new = tf.matmul(
encoded_observations_for_arm,
encoded_observations_for_arm,
transpose_a=True)
b_new = bandit_utils.sum_reward_weighted_observations(
rewards_for_arm, encoded_observations_for_arm)
return a_new, b_new
def false_fn():
return (tf.zeros([encoding_dim, encoding_dim], dtype=tf.float64),
tf.zeros([encoding_dim], dtype=tf.float64))
a_new, b_new = tf.cond(
tf.squeeze(num_samples_for_arm_total) > 0,
true_fn,
false_fn)
expected_a_updated_list.append(self.evaluate(a_new))
expected_b_updated_list.append(self.evaluate(b_new))
self.assertAllClose(expected_a_updated_list, final_a)
self.assertAllClose(expected_b_updated_list, final_b)
@test_cases()
def testNeuralLinUCBUpdateNumTrainSteps10(self, batch_size=1, context_dim=10):
num_actions = 5
initial_step, final_step = _get_initial_and_final_steps(
batch_size, context_dim)
action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)
action_step = _get_action_step(action)
experience = _get_experience(initial_step, action_step, final_step)
observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec)
encoding_dim = 10
variable_collection = neural_linucb_agent.NeuralLinUCBVariableCollection(
num_actions, encoding_dim)
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=10,
encoding_dim=encoding_dim,
variable_collection=variable_collection,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.001))
loss_info, _ = agent.train(experience)
self.evaluate(agent.initialize())
self.evaluate(tf.compat.v1.global_variables_initializer())
loss_value = self.evaluate(loss_info)
self.assertGreater(loss_value, 0.0)
@test_cases()
def testNeuralLinUCBUpdateNumTrainSteps10MaskedActions(
self, batch_size=1, context_dim=10):
num_actions = 5
initial_step, final_step = _get_initial_and_final_steps_with_action_mask(
batch_size, context_dim, num_actions)
action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)
action_step = _get_action_step(action)
experience = _get_experience(initial_step, action_step, final_step)
observation_spec = (tensor_spec.TensorSpec([context_dim], tf.float32),
tensor_spec.TensorSpec([num_actions], tf.int32))
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec[0])
encoding_dim = 10
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=10,
encoding_dim=encoding_dim,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.001),
observation_and_action_constraint_splitter=lambda x: (x[0], x[1]))
loss_info, _ = agent.train(experience)
self.evaluate(agent.initialize())
self.evaluate(tf.compat.v1.global_variables_initializer())
loss_value = self.evaluate(loss_info)
self.assertGreater(loss_value, 0.0)
def testInitializeRestoreVariableCollection(self):
if not tf.executing_eagerly():
self.skipTest('Test only works in eager mode.')
num_actions = 5
encoding_dim = 7
variable_collection = neural_linucb_agent.NeuralLinUCBVariableCollection(
num_actions=num_actions, encoding_dim=encoding_dim)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(variable_collection.num_samples_list)
checkpoint = tf.train.Checkpoint(variable_collection=variable_collection)
checkpoint_dir = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_dir, 'checkpoint')
checkpoint.save(file_prefix=checkpoint_prefix)
variable_collection.actions_from_reward_layer.assign(False)
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
checkpoint_load_status = checkpoint.restore(latest_checkpoint)
self.evaluate(checkpoint_load_status.initialize_or_restore())
self.assertEqual(
self.evaluate(variable_collection.actions_from_reward_layer), True)
if __name__ == '__main__':
tf.test.main()
| true | true |
f72ce6309a00519d759cb64bf82d33c3718dba6a | 2,373 | py | Python | src/fhir_types/FHIR_AdverseEvent_SuspectEntity.py | anthem-ai/fhir-types | 42348655fb3a9b3f131b911d6bc0782da8c14ce4 | [
"Apache-2.0"
] | 2 | 2022-02-03T00:51:30.000Z | 2022-02-03T18:42:43.000Z | src/fhir_types/FHIR_AdverseEvent_SuspectEntity.py | anthem-ai/fhir-types | 42348655fb3a9b3f131b911d6bc0782da8c14ce4 | [
"Apache-2.0"
] | null | null | null | src/fhir_types/FHIR_AdverseEvent_SuspectEntity.py | anthem-ai/fhir-types | 42348655fb3a9b3f131b911d6bc0782da8c14ce4 | [
"Apache-2.0"
] | null | null | null | from typing import Any, List, Literal, TypedDict
from .FHIR_AdverseEvent_Causality import FHIR_AdverseEvent_Causality
from .FHIR_Reference import FHIR_Reference
from .FHIR_string import FHIR_string
# Actual or potential/avoided event causing unintended physical injury resulting from or contributed to by medical care, a research study or other healthcare setting factors that requires additional monitoring, treatment, or hospitalization, or that results in death.
FHIR_AdverseEvent_SuspectEntity = TypedDict(
"FHIR_AdverseEvent_SuspectEntity",
{
# Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces.
"id": FHIR_string,
# May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# Identifies the actual instance of what caused the adverse event. May be a substance, medication, medication administration, medication statement or a device.
"instance": FHIR_Reference,
# Information on the possible cause of the event.
"causality": List[FHIR_AdverseEvent_Causality],
},
total=False,
)
| 98.875 | 836 | 0.785925 | from typing import Any, List, Literal, TypedDict
from .FHIR_AdverseEvent_Causality import FHIR_AdverseEvent_Causality
from .FHIR_Reference import FHIR_Reference
from .FHIR_string import FHIR_string
FHIR_AdverseEvent_SuspectEntity = TypedDict(
"FHIR_AdverseEvent_SuspectEntity",
{
"id": FHIR_string,
"extension": List[Any],
"modifierExtension": List[Any],
# Identifies the actual instance of what caused the adverse event. May be a substance, medication, medication administration, medication statement or a device.
"instance": FHIR_Reference,
# Information on the possible cause of the event.
"causality": List[FHIR_AdverseEvent_Causality],
},
total=False,
)
| true | true |
f72ce673fa0c764781f68e1466651c550042bccc | 13,709 | py | Python | src/attributecode/transform.py | oneforthidiot/aboutcode-toolkit | 666cc8857aadaeb4e07c540c817c831b0f3234e2 | [
"Apache-2.0"
] | 1 | 2021-01-02T08:16:55.000Z | 2021-01-02T08:16:55.000Z | src/attributecode/transform.py | oneforthidiot/aboutcode-toolkit | 666cc8857aadaeb4e07c540c817c831b0f3234e2 | [
"Apache-2.0"
] | null | null | null | src/attributecode/transform.py | oneforthidiot/aboutcode-toolkit | 666cc8857aadaeb4e07c540c817c831b0f3234e2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf8 -*-
# ============================================================================
# Copyright (c) 2013-2020 nexB Inc. http://www.nexb.com/ - All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from collections import Counter
from collections import OrderedDict
import io
import json
import attr
from attributecode import CRITICAL
from attributecode import Error
from attributecode import saneyaml
from attributecode.util import csv
from attributecode.util import python2
from attributecode.util import replace_tab_with_spaces
if python2: # pragma: nocover
from itertools import izip_longest as zip_longest # NOQA
else: # pragma: nocover
from itertools import zip_longest # NOQA
def transform_csv_to_csv(location, output, transformer):
"""
Read a CSV file at `location` and write a new CSV file at `output`. Apply
transformations using the `transformer` Transformer.
Return a list of Error objects.
"""
if not transformer:
raise ValueError('Cannot transform without Transformer')
rows = read_csv_rows(location)
errors = []
data = iter(rows)
names = next(rows)
field_names = strip_trailing_fields_csv(names)
dupes = check_duplicate_fields(field_names)
if dupes:
msg = u'Duplicated field name: %(name)s'
for name in dupes:
errors.append(Error(CRITICAL, msg % locals()))
return field_names, [], errors
# Convert to dicts
new_data = [OrderedDict(zip_longest(field_names, item)) for item in data]
field_names, updated_data, errors = transform_data(new_data, transformer)
if errors:
return errors
else:
write_csv(output, updated_data, field_names)
return []
def transform_json_to_json(location, output, transformer):
"""
Read a JSON file at `location` and write a new JSON file at `output`. Apply
transformations using the `transformer` Transformer.
Return a list of Error objects.
"""
if not transformer:
raise ValueError('Cannot transform without Transformer')
items = read_json(location)
data = strip_trailing_fields_json(items)
new_data = normalize_dict_data(data)
field_names, updated_data, errors = transform_data(new_data, transformer)
if errors:
return errors
else:
write_json(output, updated_data)
return []
def strip_trailing_fields_csv(names):
"""
Strip trailing spaces for field names #456
"""
field_names = []
for name in names:
field_names.append(name.strip())
return field_names
def strip_trailing_fields_json(items):
"""
Strip trailing spaces for field name #456
"""
data = []
od = OrderedDict()
for item in items:
for field in item:
stripped_field_name = field.strip()
od[stripped_field_name] = item[field]
data.append(od)
return data
def normalize_dict_data(data):
"""
Check if the input data from scancode-toolkit and normalize to a normal
dictionary if it is.
Return a list type of normalized dictionary.
"""
try:
# Check if this is a JSON output from scancode-toolkit
if(data["headers"][0]["tool_name"] == "scancode-toolkit"):
#only takes data inside "files"
new_data = data["files"]
except:
new_data = data
if not isinstance(new_data, list):
new_data = [new_data]
return new_data
def transform_data(data, transformer):
"""
Read a dictionary and apply transformations using the
`transformer` Transformer.
Return a tuple of:
([field names...], [transformed ordered dict...], [Error objects..])
"""
if not transformer:
return data
renamed_field_data = transformer.apply_renamings(data)
field_names = renamed_field_data[0].keys()
if transformer.field_filters:
renamed_field_data = list(transformer.filter_fields(renamed_field_data))
field_names = [c for c in field_names if c in transformer.field_filters]
if transformer.exclude_fields:
renamed_field_data = list(transformer.filter_excluded(renamed_field_data))
field_names = [c for c in field_names if c not in transformer.exclude_fields]
errors = transformer.check_required_fields(renamed_field_data)
if errors:
return field_names, data, errors
return field_names, renamed_field_data, errors
tranformer_config_help = '''
A transform configuration file is used to describe which transformations and
validations to apply to a source CSV file. This is a simple text file using YAML
format, using the same format as an .ABOUT file.
The attributes that can be set in a configuration file are:
* field_renamings:
An optional map of source CSV or JSON field name to target CSV/JSON new field name that
is used to rename CSV fields.
For instance with this configuration the fields "Directory/Location" will be
renamed to "about_resource" and "foo" to "bar":
field_renamings:
about_resource : 'Directory/Location'
bar : foo
The renaming is always applied first before other transforms and checks. All
other field names referenced below are these that exist AFTER the renamings
have been applied to the existing field names.
* required_fields:
An optional list of required field names that must have a value, beyond the
standard fields names. If a source CSV/JSON does not have such a field or a row is
missing a value for a required field, an error is reported.
For instance with this configuration an error will be reported if the fields
"name" and "version" are missing or if any row does not have a value set for
these fields:
required_fields:
- name
- version
* field_filters:
An optional list of field names that should be kept in the transformed CSV/JSON. If
this list is provided, all the fields from the source CSV/JSON that should be kept
in the target CSV/JSON must be listed regardless of either standard or required
fields. If this list is not provided, all source CSV/JSON fields are kept in the
transformed target CSV/JSON.
For instance with this configuration the target CSV/JSON will only contains the "name"
and "version" fields and no other field:
field_filters:
- name
- version
* exclude_fields:
An optional list of field names that should be excluded in the transformed CSV/JSON. If
this list is provided, all the fields from the source CSV/JSON that should be excluded
in the target CSV/JSON must be listed. Excluding standard or required fields will cause
an error. If this list is not provided, all source CSV/JSON fields are kept in the
transformed target CSV/JSON.
For instance with this configuration the target CSV/JSON will not contain the "type"
and "temp" fields:
exclude_fields:
- type
- temp
'''
@attr.attributes
class Transformer(object):
__doc__ = tranformer_config_help
field_renamings = attr.attrib(default=attr.Factory(dict))
required_fields = attr.attrib(default=attr.Factory(list))
field_filters = attr.attrib(default=attr.Factory(list))
exclude_fields = attr.attrib(default=attr.Factory(list))
# a list of all the standard fields from AboutCode toolkit
standard_fields = attr.attrib(default=attr.Factory(list), init=False)
# a list of the subset of standard fields that are essential and MUST be
# present for AboutCode toolkit to work
essential_fields = attr.attrib(default=attr.Factory(list), init=False)
# called by attr after the __init__()
def __attrs_post_init__(self, *args, **kwargs):
from attributecode.model import About
about = About()
self.essential_fields = list(about.required_fields)
self.standard_fields = [f.name for f in about.all_fields()]
@classmethod
def default(cls):
"""
Return a default Transformer with built-in transforms.
"""
return cls(
field_renamings={},
required_fields=[],
field_filters=[],
exclude_fields=[],
)
@classmethod
def from_file(cls, location):
"""
Load and return a Transformer instance from a YAML configuration file at
`location`.
"""
with io.open(location, encoding='utf-8') as conf:
data = saneyaml.load(replace_tab_with_spaces(conf.read()))
return cls(
field_renamings=data.get('field_renamings', {}),
required_fields=data.get('required_fields', []),
field_filters=data.get('field_filters', []),
exclude_fields=data.get('exclude_fields', []),
)
def check_required_fields(self, data):
"""
Return a list of Error for a `data` list of ordered dict where a
dict is missing a value for a required field name.
"""
errors = []
required = set(self.essential_fields + self.required_fields)
if not required:
return []
for rn, item in enumerate(data):
missings = [rk for rk in required if not item.get(rk)]
if not missings:
continue
missings = ', '.join(missings)
msg = 'Row {rn} is missing required values for fields: {missings}'
errors.append(Error(CRITICAL, msg.format(**locals())))
return errors
def apply_renamings(self, data):
"""
Return a tranformed list of `field_names` where fields are renamed
based on this Transformer configuration.
"""
renamings = self.field_renamings
if not renamings:
return data
renamings = {n: rn for n, rn in renamings.items()}
renamed_list = []
for row in data:
renamed = OrderedDict()
for key in row:
matched = False
for renamed_key in renamings:
if key == renamings[renamed_key]:
renamed[renamed_key] = row[key]
matched = True
if not matched:
renamed[key] = row[key]
renamed_list.append(renamed)
return renamed_list
"""
def clean_fields(self, field_names):
Apply standard cleanups to a list of fields and return these.
if not field_names:
return field_names
return [c.strip().lower() for c in field_names]
"""
def filter_fields(self, data):
"""
Yield transformed dicts from a `data` list of dicts keeping only
fields with a name in the `field_filters`of this Transformer.
Return the data unchanged if no `field_filters` exists.
"""
#field_filters = set(self.clean_fields(self.field_filters))
field_filters = set(self.field_filters)
for entry in data:
items = ((k, v) for k, v in entry.items() if k in field_filters)
yield OrderedDict(items)
def filter_excluded(self, data):
"""
Yield transformed dicts from a `data` list of dicts excluding
fields with names in the `exclude_fields`of this Transformer.
Return the data unchanged if no `exclude_fields` exists.
"""
#exclude_fields = set(self.clean_fields(self.exclude_fields))
exclude_fields = set(self.exclude_fields)
for entry in data:
items = ((k, v) for k, v in entry.items() if k not in exclude_fields)
yield OrderedDict(items)
def check_duplicate_fields(field_names):
"""
Check that there are no duplicate in the `field_names` list of field name
strings, ignoring case. Return a list of unique duplicated field names.
"""
counted = Counter(c.lower() for c in field_names)
return [field for field, count in sorted(counted.items()) if count > 1]
def read_csv_rows(location):
"""
Yield rows (as a list of values) from a CSV file at `location`.
"""
with io.open(location, encoding='utf-8', errors='replace') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
yield row
def read_json(location):
"""
Yield rows (as a list of values) from a CSV file at `location`.
"""
with io.open(location, encoding='utf-8', errors='replace') as jsonfile:
data = json.load(jsonfile, object_pairs_hook=OrderedDict)
return data
def write_csv(location, data, field_names): # NOQA
"""
Write a CSV file at `location` the `data` list of ordered dicts using the
`field_names`.
"""
with io.open(location, 'w', encoding='utf-8', newline='\n') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=field_names)
writer.writeheader()
writer.writerows(data)
def write_json(location, data):
"""
Write a JSON file at `location` the `data` list of ordered dicts.
"""
with open(location, 'w') as jsonfile:
json.dump(data, jsonfile, indent=3)
| 34.358396 | 87 | 0.665183 |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from collections import Counter
from collections import OrderedDict
import io
import json
import attr
from attributecode import CRITICAL
from attributecode import Error
from attributecode import saneyaml
from attributecode.util import csv
from attributecode.util import python2
from attributecode.util import replace_tab_with_spaces
if python2:
from itertools import izip_longest as zip_longest
else:
from itertools import zip_longest
def transform_csv_to_csv(location, output, transformer):
if not transformer:
raise ValueError('Cannot transform without Transformer')
rows = read_csv_rows(location)
errors = []
data = iter(rows)
names = next(rows)
field_names = strip_trailing_fields_csv(names)
dupes = check_duplicate_fields(field_names)
if dupes:
msg = u'Duplicated field name: %(name)s'
for name in dupes:
errors.append(Error(CRITICAL, msg % locals()))
return field_names, [], errors
new_data = [OrderedDict(zip_longest(field_names, item)) for item in data]
field_names, updated_data, errors = transform_data(new_data, transformer)
if errors:
return errors
else:
write_csv(output, updated_data, field_names)
return []
def transform_json_to_json(location, output, transformer):
if not transformer:
raise ValueError('Cannot transform without Transformer')
items = read_json(location)
data = strip_trailing_fields_json(items)
new_data = normalize_dict_data(data)
field_names, updated_data, errors = transform_data(new_data, transformer)
if errors:
return errors
else:
write_json(output, updated_data)
return []
def strip_trailing_fields_csv(names):
field_names = []
for name in names:
field_names.append(name.strip())
return field_names
def strip_trailing_fields_json(items):
data = []
od = OrderedDict()
for item in items:
for field in item:
stripped_field_name = field.strip()
od[stripped_field_name] = item[field]
data.append(od)
return data
def normalize_dict_data(data):
try:
if(data["headers"][0]["tool_name"] == "scancode-toolkit"):
new_data = data["files"]
except:
new_data = data
if not isinstance(new_data, list):
new_data = [new_data]
return new_data
def transform_data(data, transformer):
if not transformer:
return data
renamed_field_data = transformer.apply_renamings(data)
field_names = renamed_field_data[0].keys()
if transformer.field_filters:
renamed_field_data = list(transformer.filter_fields(renamed_field_data))
field_names = [c for c in field_names if c in transformer.field_filters]
if transformer.exclude_fields:
renamed_field_data = list(transformer.filter_excluded(renamed_field_data))
field_names = [c for c in field_names if c not in transformer.exclude_fields]
errors = transformer.check_required_fields(renamed_field_data)
if errors:
return field_names, data, errors
return field_names, renamed_field_data, errors
tranformer_config_help = '''
A transform configuration file is used to describe which transformations and
validations to apply to a source CSV file. This is a simple text file using YAML
format, using the same format as an .ABOUT file.
The attributes that can be set in a configuration file are:
* field_renamings:
An optional map of source CSV or JSON field name to target CSV/JSON new field name that
is used to rename CSV fields.
For instance with this configuration the fields "Directory/Location" will be
renamed to "about_resource" and "foo" to "bar":
field_renamings:
about_resource : 'Directory/Location'
bar : foo
The renaming is always applied first before other transforms and checks. All
other field names referenced below are these that exist AFTER the renamings
have been applied to the existing field names.
* required_fields:
An optional list of required field names that must have a value, beyond the
standard fields names. If a source CSV/JSON does not have such a field or a row is
missing a value for a required field, an error is reported.
For instance with this configuration an error will be reported if the fields
"name" and "version" are missing or if any row does not have a value set for
these fields:
required_fields:
- name
- version
* field_filters:
An optional list of field names that should be kept in the transformed CSV/JSON. If
this list is provided, all the fields from the source CSV/JSON that should be kept
in the target CSV/JSON must be listed regardless of either standard or required
fields. If this list is not provided, all source CSV/JSON fields are kept in the
transformed target CSV/JSON.
For instance with this configuration the target CSV/JSON will only contains the "name"
and "version" fields and no other field:
field_filters:
- name
- version
* exclude_fields:
An optional list of field names that should be excluded in the transformed CSV/JSON. If
this list is provided, all the fields from the source CSV/JSON that should be excluded
in the target CSV/JSON must be listed. Excluding standard or required fields will cause
an error. If this list is not provided, all source CSV/JSON fields are kept in the
transformed target CSV/JSON.
For instance with this configuration the target CSV/JSON will not contain the "type"
and "temp" fields:
exclude_fields:
- type
- temp
'''
@attr.attributes
class Transformer(object):
__doc__ = tranformer_config_help
field_renamings = attr.attrib(default=attr.Factory(dict))
required_fields = attr.attrib(default=attr.Factory(list))
field_filters = attr.attrib(default=attr.Factory(list))
exclude_fields = attr.attrib(default=attr.Factory(list))
standard_fields = attr.attrib(default=attr.Factory(list), init=False)
essential_fields = attr.attrib(default=attr.Factory(list), init=False)
def __attrs_post_init__(self, *args, **kwargs):
from attributecode.model import About
about = About()
self.essential_fields = list(about.required_fields)
self.standard_fields = [f.name for f in about.all_fields()]
@classmethod
def default(cls):
return cls(
field_renamings={},
required_fields=[],
field_filters=[],
exclude_fields=[],
)
@classmethod
def from_file(cls, location):
with io.open(location, encoding='utf-8') as conf:
data = saneyaml.load(replace_tab_with_spaces(conf.read()))
return cls(
field_renamings=data.get('field_renamings', {}),
required_fields=data.get('required_fields', []),
field_filters=data.get('field_filters', []),
exclude_fields=data.get('exclude_fields', []),
)
def check_required_fields(self, data):
errors = []
required = set(self.essential_fields + self.required_fields)
if not required:
return []
for rn, item in enumerate(data):
missings = [rk for rk in required if not item.get(rk)]
if not missings:
continue
missings = ', '.join(missings)
msg = 'Row {rn} is missing required values for fields: {missings}'
errors.append(Error(CRITICAL, msg.format(**locals())))
return errors
def apply_renamings(self, data):
renamings = self.field_renamings
if not renamings:
return data
renamings = {n: rn for n, rn in renamings.items()}
renamed_list = []
for row in data:
renamed = OrderedDict()
for key in row:
matched = False
for renamed_key in renamings:
if key == renamings[renamed_key]:
renamed[renamed_key] = row[key]
matched = True
if not matched:
renamed[key] = row[key]
renamed_list.append(renamed)
return renamed_list
def filter_fields(self, data):
field_filters = set(self.field_filters)
for entry in data:
items = ((k, v) for k, v in entry.items() if k in field_filters)
yield OrderedDict(items)
def filter_excluded(self, data):
exclude_fields = set(self.exclude_fields)
for entry in data:
items = ((k, v) for k, v in entry.items() if k not in exclude_fields)
yield OrderedDict(items)
def check_duplicate_fields(field_names):
counted = Counter(c.lower() for c in field_names)
return [field for field, count in sorted(counted.items()) if count > 1]
def read_csv_rows(location):
with io.open(location, encoding='utf-8', errors='replace') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
yield row
def read_json(location):
with io.open(location, encoding='utf-8', errors='replace') as jsonfile:
data = json.load(jsonfile, object_pairs_hook=OrderedDict)
return data
def write_csv(location, data, field_names):
with io.open(location, 'w', encoding='utf-8', newline='\n') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=field_names)
writer.writeheader()
writer.writerows(data)
def write_json(location, data):
with open(location, 'w') as jsonfile:
json.dump(data, jsonfile, indent=3)
| true | true |
f72ce7712b5c7dee583c54beb8325116fc9f9df8 | 1,742 | py | Python | util.py | marcelkotze007/mk007---ML-Python-library | 307e51762fc821588206440daa2c18a6128f4aec | [
"MIT"
] | null | null | null | util.py | marcelkotze007/mk007---ML-Python-library | 307e51762fc821588206440daa2c18a6128f4aec | [
"MIT"
] | null | null | null | util.py | marcelkotze007/mk007---ML-Python-library | 307e51762fc821588206440daa2c18a6128f4aec | [
"MIT"
] | null | null | null | # https://deeplearningcourses.com/c/data-science-supervised-machine-learning-in-python
# https://www.udemy.com/data-science-supervised-machine-learning-in-python
from __future__ import print_function, division
from builtins import range, input
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
import pandas as pd
def get_data(limit=None):
print("Reading in and transforming data...")
df = pd.read_csv('train.csv')
data = df.values
np.random.shuffle(data)
X = data[:, 1:]
print(X[4000])
X = data[:, 1:] / 255.0 # data is from 0..255
print(X[4000])
Y = data[:, 0]
if limit is not None:
X, Y = X[:limit], Y[:limit]
return X, Y
def get_xor():
X = np.zeros((200, 2))
X[:50] = np.random.random((50, 2)) / 2 + 0.5 # (0.5-1, 0.5-1)
X[50:100] = np.random.random((50, 2)) / 2 # (0-0.5, 0-0.5)
X[100:150] = np.random.random((50, 2)) / 2 + np.array([[0, 0.5]]) # (0-0.5, 0.5-1)
X[150:] = np.random.random((50, 2)) / 2 + np.array([[0.5, 0]]) # (0.5-1, 0-0.5)
Y = np.array([0]*100 + [1]*100)
return X, Y
def get_donut():
N = 200
R_inner = 5
R_outer = 10
# distance from origin is radius + random normal
# angle theta is uniformly distributed between (0, 2pi)
R1 = np.random.randn(N//2) + R_inner
theta = 2*np.pi*np.random.random(N//2)
X_inner = np.concatenate([[R1 * np.cos(theta)], [R1 * np.sin(theta)]]).T
R2 = np.random.randn(N//2) + R_outer
theta = 2*np.pi*np.random.random(N//2)
X_outer = np.concatenate([[R2 * np.cos(theta)], [R2 * np.sin(theta)]]).T
X = np.concatenate([ X_inner, X_outer ])
Y = np.array([0]*(N//2) + [1]*(N//2))
return X, Y
get_data() | 32.259259 | 86 | 0.596441 |
from __future__ import print_function, division
from builtins import range, input
import numpy as np
import pandas as pd
def get_data(limit=None):
print("Reading in and transforming data...")
df = pd.read_csv('train.csv')
data = df.values
np.random.shuffle(data)
X = data[:, 1:]
print(X[4000])
X = data[:, 1:] / 255.0
print(X[4000])
Y = data[:, 0]
if limit is not None:
X, Y = X[:limit], Y[:limit]
return X, Y
def get_xor():
X = np.zeros((200, 2))
X[:50] = np.random.random((50, 2)) / 2 + 0.5
X[50:100] = np.random.random((50, 2)) / 2
X[100:150] = np.random.random((50, 2)) / 2 + np.array([[0, 0.5]])
X[150:] = np.random.random((50, 2)) / 2 + np.array([[0.5, 0]])
Y = np.array([0]*100 + [1]*100)
return X, Y
def get_donut():
N = 200
R_inner = 5
R_outer = 10
R1 = np.random.randn(N//2) + R_inner
theta = 2*np.pi*np.random.random(N//2)
X_inner = np.concatenate([[R1 * np.cos(theta)], [R1 * np.sin(theta)]]).T
R2 = np.random.randn(N//2) + R_outer
theta = 2*np.pi*np.random.random(N//2)
X_outer = np.concatenate([[R2 * np.cos(theta)], [R2 * np.sin(theta)]]).T
X = np.concatenate([ X_inner, X_outer ])
Y = np.array([0]*(N//2) + [1]*(N//2))
return X, Y
get_data() | true | true |
f72ce7ae5cf820ccf1451c8e5cde1d89a16c1e52 | 3,007 | py | Python | pbj/electrostatics/pb_formulation/formulations/direct_external.py | bem4solvation/pbj | 4fa9c111596359192539787ae241a79d4316b15b | [
"MIT"
] | null | null | null | pbj/electrostatics/pb_formulation/formulations/direct_external.py | bem4solvation/pbj | 4fa9c111596359192539787ae241a79d4316b15b | [
"MIT"
] | 1 | 2022-02-18T17:34:37.000Z | 2022-02-18T17:34:37.000Z | pbj/electrostatics/pb_formulation/formulations/direct_external.py | bem4solvation/pbj | 4fa9c111596359192539787ae241a79d4316b15b | [
"MIT"
] | null | null | null | import numpy as np
import bempp.api
import os
from bempp.api.operators.boundary import sparse, laplace, modified_helmholtz
from .common import calculate_potential_one_surface
invert_potential = True
def verify_parameters(self):
return True
def lhs(self):
dirichl_space = self.dirichl_space
neumann_space = self.neumann_space
ep_in = self.ep_in
ep_out = self.ep_ex
kappa = self.kappa
operator_assembler = self.operator_assembler
identity = sparse.identity(dirichl_space, dirichl_space, dirichl_space)
slp_in = laplace.single_layer(
neumann_space, dirichl_space, dirichl_space, assembler=operator_assembler
)
dlp_in = laplace.double_layer(
dirichl_space, dirichl_space, dirichl_space, assembler=operator_assembler
)
slp_out = modified_helmholtz.single_layer(
neumann_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler
)
dlp_out = modified_helmholtz.double_layer(
dirichl_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler
)
A = bempp.api.BlockedOperator(2, 2)
A[0, 0] = 0.5 * identity - dlp_out
A[0, 1] = slp_out
A[1, 0] = 0.5 * identity + dlp_in
A[1, 1] = -(ep_out / ep_in) * slp_in
self.matrices["A"] = A
def rhs(self):
dirichl_space = self.dirichl_space
neumann_space = self.neumann_space
q = self.q
x_q = self.x_q
ep_in = self.ep_in
rhs_constructor = self.rhs_constructor
if rhs_constructor == "fmm":
@bempp.api.callable(vectorized=True)
def fmm_green_func(x, n, domain_index, result):
import exafmm.laplace as _laplace
sources = _laplace.init_sources(x_q, q)
targets = _laplace.init_targets(x.T)
fmm = _laplace.LaplaceFmm(p=10, ncrit=500, filename=".rhs.tmp")
tree = _laplace.setup(sources, targets, fmm)
values = _laplace.evaluate(tree, fmm)
os.remove(".rhs.tmp")
result[:] = values[:, 0] / ep_in
@bempp.api.real_callable
def zero(x, n, domain_index, result):
result[0] = 0
rhs_1 = bempp.api.GridFunction(neumann_space, fun=zero)
rhs_2 = bempp.api.GridFunction(dirichl_space, fun=fmm_green_func)
else:
@bempp.api.real_callable
def charges_fun(x, n, domain_index, result):
nrm = np.sqrt(
(x[0] - x_q[:, 0]) ** 2
+ (x[1] - x_q[:, 1]) ** 2
+ (x[2] - x_q[:, 2]) ** 2
)
aux = np.sum(q / nrm)
result[0] = aux / (4 * np.pi * ep_in)
@bempp.api.real_callable
def zero(x, n, domain_index, result):
result[0] = 0
rhs_1 = bempp.api.GridFunction(neumann_space, fun=zero)
rhs_2 = bempp.api.GridFunction(dirichl_space, fun=charges_fun)
self.rhs["rhs_1"], self.rhs["rhs_2"] = rhs_1, rhs_2
def calculate_potential(self, rerun_all):
calculate_potential_one_surface(self, rerun_all)
| 30.373737 | 88 | 0.638178 | import numpy as np
import bempp.api
import os
from bempp.api.operators.boundary import sparse, laplace, modified_helmholtz
from .common import calculate_potential_one_surface
invert_potential = True
def verify_parameters(self):
return True
def lhs(self):
dirichl_space = self.dirichl_space
neumann_space = self.neumann_space
ep_in = self.ep_in
ep_out = self.ep_ex
kappa = self.kappa
operator_assembler = self.operator_assembler
identity = sparse.identity(dirichl_space, dirichl_space, dirichl_space)
slp_in = laplace.single_layer(
neumann_space, dirichl_space, dirichl_space, assembler=operator_assembler
)
dlp_in = laplace.double_layer(
dirichl_space, dirichl_space, dirichl_space, assembler=operator_assembler
)
slp_out = modified_helmholtz.single_layer(
neumann_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler
)
dlp_out = modified_helmholtz.double_layer(
dirichl_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler
)
A = bempp.api.BlockedOperator(2, 2)
A[0, 0] = 0.5 * identity - dlp_out
A[0, 1] = slp_out
A[1, 0] = 0.5 * identity + dlp_in
A[1, 1] = -(ep_out / ep_in) * slp_in
self.matrices["A"] = A
def rhs(self):
dirichl_space = self.dirichl_space
neumann_space = self.neumann_space
q = self.q
x_q = self.x_q
ep_in = self.ep_in
rhs_constructor = self.rhs_constructor
if rhs_constructor == "fmm":
@bempp.api.callable(vectorized=True)
def fmm_green_func(x, n, domain_index, result):
import exafmm.laplace as _laplace
sources = _laplace.init_sources(x_q, q)
targets = _laplace.init_targets(x.T)
fmm = _laplace.LaplaceFmm(p=10, ncrit=500, filename=".rhs.tmp")
tree = _laplace.setup(sources, targets, fmm)
values = _laplace.evaluate(tree, fmm)
os.remove(".rhs.tmp")
result[:] = values[:, 0] / ep_in
@bempp.api.real_callable
def zero(x, n, domain_index, result):
result[0] = 0
rhs_1 = bempp.api.GridFunction(neumann_space, fun=zero)
rhs_2 = bempp.api.GridFunction(dirichl_space, fun=fmm_green_func)
else:
@bempp.api.real_callable
def charges_fun(x, n, domain_index, result):
nrm = np.sqrt(
(x[0] - x_q[:, 0]) ** 2
+ (x[1] - x_q[:, 1]) ** 2
+ (x[2] - x_q[:, 2]) ** 2
)
aux = np.sum(q / nrm)
result[0] = aux / (4 * np.pi * ep_in)
@bempp.api.real_callable
def zero(x, n, domain_index, result):
result[0] = 0
rhs_1 = bempp.api.GridFunction(neumann_space, fun=zero)
rhs_2 = bempp.api.GridFunction(dirichl_space, fun=charges_fun)
self.rhs["rhs_1"], self.rhs["rhs_2"] = rhs_1, rhs_2
def calculate_potential(self, rerun_all):
calculate_potential_one_surface(self, rerun_all)
| true | true |
f72ce81173cc5c6016efe5504b76f86ecabf1edf | 245 | py | Python | python/merge.py | mannyrivera2010/rdf4j-learning | ef5bc6aeac0c16265605f4e7b577255fb48f96f7 | [
"Apache-2.0"
] | null | null | null | python/merge.py | mannyrivera2010/rdf4j-learning | ef5bc6aeac0c16265605f4e7b577255fb48f96f7 | [
"Apache-2.0"
] | null | null | null | python/merge.py | mannyrivera2010/rdf4j-learning | ef5bc6aeac0c16265605f4e7b577255fb48f96f7 | [
"Apache-2.0"
] | null | null | null | import glob
file_list = glob.glob("data/*.csv")
for file_name in file_list:
with open(file_name, 'r') as open_file:
for inner_line in open_file:
if "gender" not in inner_line:
print(inner_line.strip())
| 22.272727 | 43 | 0.62449 | import glob
file_list = glob.glob("data/*.csv")
for file_name in file_list:
with open(file_name, 'r') as open_file:
for inner_line in open_file:
if "gender" not in inner_line:
print(inner_line.strip())
| true | true |
f72ce8530a8392dc1dd22292d6d0dcfb86f65a5a | 2,885 | py | Python | pyramid_request_log/request_log.py | MoiTux/pyramid-request-log | 31852bbf09e4f416f93c7720ecd9eca8cfe32d38 | [
"MIT"
] | 1 | 2017-08-07T10:22:16.000Z | 2017-08-07T10:22:16.000Z | pyramid_request_log/request_log.py | MoiTux/pyramid-request-log | 31852bbf09e4f416f93c7720ecd9eca8cfe32d38 | [
"MIT"
] | null | null | null | pyramid_request_log/request_log.py | MoiTux/pyramid-request-log | 31852bbf09e4f416f93c7720ecd9eca8cfe32d38 | [
"MIT"
] | null | null | null | import logging
import sys
import time
from pyramid.events import NewResponse, NewRequest
from pyramid.events import subscriber
if sys.version_info[0] < 3:
str = basestring
log = logging.getLogger(__name__)
unlog_pattern = None
unlog_route = None
authenticated_id = ''
@subscriber(NewRequest)
def log_request(event):
request = event.request
if ignore_route(request.path):
return
request.pyramid_request_log_start = time.time()
user = 'UnAuthenticatedUser'
if request.authenticated_userid:
user = getattr(request.authenticated_userid,
authenticated_id, 'AuthenticatedUser')
if request.content_type == 'application/json' and request.body:
try:
body = request.json_body
clean(body)
except Exception:
body = 'Json error'
log.info('New request: %s %s (body: %s) (%s: %s)',
request.method, request.path_qs, body, authenticated_id, user)
else:
log.info('New request: %s %s (%s: %s)',
request.method, request.path_qs, authenticated_id, user)
@subscriber(NewResponse)
def log_response(event):
request = event.request
response = event.response
if ignore_route(request.path):
return
duration = '{:.3f}'.format(time.time() - request.pyramid_request_log_start)
extra = {
'method': request.method,
'route_url': request.path_qs,
'status': response.status,
'duration': duration,
}
user = 'UnAuthenticatedUser'
if request.authenticated_userid:
user = getattr(request.authenticated_userid,
authenticated_id, 'AuthenticatedUser')
if response.content_type == 'application/json' and response.body:
try:
body = response.json_body
clean(body)
except Exception:
body = 'Json error'
log.info(
'Response for request: %s %s: HTTPCode: %s, (body: %s) '
'(%s: %s) (endded in %ss)',
request.method, request.path_qs, response.status, body,
authenticated_id, user, duration,
extra=extra,
)
else:
log.info('Response for request: %s %s: HTTPCode: %s, (%s: %s) '
'(endded in %ss)',
request.method, request.path_qs, response.status,
authenticated_id, user, duration,
extra=extra)
def clean(body):
for key in body:
if isinstance(key, (dict, list)):
clean(key)
elif isinstance(body, dict):
if isinstance(body[key], (dict, list)):
clean(body[key])
elif unlog_pattern and unlog_pattern.match(key):
body[key] = '*'*6
def ignore_route(route):
if unlog_route and unlog_route.match(route):
return True
return False
| 27.47619 | 79 | 0.597574 | import logging
import sys
import time
from pyramid.events import NewResponse, NewRequest
from pyramid.events import subscriber
if sys.version_info[0] < 3:
str = basestring
log = logging.getLogger(__name__)
unlog_pattern = None
unlog_route = None
authenticated_id = ''
@subscriber(NewRequest)
def log_request(event):
request = event.request
if ignore_route(request.path):
return
request.pyramid_request_log_start = time.time()
user = 'UnAuthenticatedUser'
if request.authenticated_userid:
user = getattr(request.authenticated_userid,
authenticated_id, 'AuthenticatedUser')
if request.content_type == 'application/json' and request.body:
try:
body = request.json_body
clean(body)
except Exception:
body = 'Json error'
log.info('New request: %s %s (body: %s) (%s: %s)',
request.method, request.path_qs, body, authenticated_id, user)
else:
log.info('New request: %s %s (%s: %s)',
request.method, request.path_qs, authenticated_id, user)
@subscriber(NewResponse)
def log_response(event):
request = event.request
response = event.response
if ignore_route(request.path):
return
duration = '{:.3f}'.format(time.time() - request.pyramid_request_log_start)
extra = {
'method': request.method,
'route_url': request.path_qs,
'status': response.status,
'duration': duration,
}
user = 'UnAuthenticatedUser'
if request.authenticated_userid:
user = getattr(request.authenticated_userid,
authenticated_id, 'AuthenticatedUser')
if response.content_type == 'application/json' and response.body:
try:
body = response.json_body
clean(body)
except Exception:
body = 'Json error'
log.info(
'Response for request: %s %s: HTTPCode: %s, (body: %s) '
'(%s: %s) (endded in %ss)',
request.method, request.path_qs, response.status, body,
authenticated_id, user, duration,
extra=extra,
)
else:
log.info('Response for request: %s %s: HTTPCode: %s, (%s: %s) '
'(endded in %ss)',
request.method, request.path_qs, response.status,
authenticated_id, user, duration,
extra=extra)
def clean(body):
for key in body:
if isinstance(key, (dict, list)):
clean(key)
elif isinstance(body, dict):
if isinstance(body[key], (dict, list)):
clean(body[key])
elif unlog_pattern and unlog_pattern.match(key):
body[key] = '*'*6
def ignore_route(route):
if unlog_route and unlog_route.match(route):
return True
return False
| true | true |
f72cea9211379eec544b1b493a257f6c5b6255c7 | 1,066 | py | Python | api/utils/data_utils.py | wtomin/Multitask-Emotion-Recognition-with-Incomplete-Labels | e6df7ffc9b0318fdce405e40993c79785b47c785 | [
"MIT"
] | 74 | 2020-03-08T15:29:00.000Z | 2022-03-05T14:57:33.000Z | api/utils/data_utils.py | wtomin/Multitask-Emotion-Recognition-with-Incomplete-Labels | e6df7ffc9b0318fdce405e40993c79785b47c785 | [
"MIT"
] | 19 | 2020-03-06T08:56:51.000Z | 2022-03-27T05:07:35.000Z | api/utils/data_utils.py | wtomin/Multitask-Emotion-Recognition-with-Incomplete-Labels | e6df7ffc9b0318fdce405e40993c79785b47c785 | [
"MIT"
] | 23 | 2020-03-20T08:19:55.000Z | 2022-03-16T17:40:09.000Z | from PIL import Image
import numbers
class RandomCrop(object):
def __init__(self, size, v):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.v = v
def __call__(self, img):
w, h = img.size
th, tw = self.size
x1 = int(( w - tw)*self.v)
y1 = int(( h - th)*self.v)
#print("print x, y:", x1, y1)
assert(img.size[0] == w and img.size[1] == h)
if w == tw and h == th:
out_image = img
else:
out_image = img.crop((x1, y1, x1 + tw, y1 + th)) #same cropping method for all images in the same group
return out_image
class RandomHorizontalFlip(object):
"""Randomly horizontally flips the given PIL.Image with a probability of 0.5
"""
def __init__(self, v):
self.v = v
return
def __call__(self, img):
if self.v < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
#print ("horiontal flip: ",self.v)
return img | 31.352941 | 115 | 0.545028 | from PIL import Image
import numbers
class RandomCrop(object):
def __init__(self, size, v):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.v = v
def __call__(self, img):
w, h = img.size
th, tw = self.size
x1 = int(( w - tw)*self.v)
y1 = int(( h - th)*self.v)
assert(img.size[0] == w and img.size[1] == h)
if w == tw and h == th:
out_image = img
else:
out_image = img.crop((x1, y1, x1 + tw, y1 + th))
return out_image
class RandomHorizontalFlip(object):
def __init__(self, v):
self.v = v
return
def __call__(self, img):
if self.v < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
return img | true | true |
f72ceaaf5ad093604d4d028c26cd964e13cd6018 | 9,644 | py | Python | tests/test_authority.py | scottp-dpaw/azure-activedirectory-library-for-python | 3305d666c064e62f8c15526fb82b5cba02a11b80 | [
"MIT"
] | 2 | 2018-03-05T07:54:23.000Z | 2018-07-10T14:53:32.000Z | tests/test_authority.py | scottp-dpaw/azure-activedirectory-library-for-python | 3305d666c064e62f8c15526fb82b5cba02a11b80 | [
"MIT"
] | null | null | null | tests/test_authority.py | scottp-dpaw/azure-activedirectory-library-for-python | 3305d666c064e62f8c15526fb82b5cba02a11b80 | [
"MIT"
] | 1 | 2020-10-26T20:07:07.000Z | 2020-10-26T20:07:07.000Z | #------------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation.
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#------------------------------------------------------------------------------
import sys
import requests
import httpretty
import six
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from unittest import mock
except ImportError:
import mock
import adal
from adal.authority import Authority
from adal import log
from adal.authentication_context import AuthenticationContext
from tests import util
from tests.util import parameters as cp
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
class TestAuthority(unittest.TestCase):
# use this as authority to force dynamic as opposed to static instance
# discovery.
nonHardCodedAuthority = 'https://login.doesntexist.com/' + cp['tenant']
nonHardCodedAuthorizeEndpoint = nonHardCodedAuthority + '/oauth2/authorize'
dstsTestEndpoint = 'https://test-dsts.core.azure-test.net/dstsv2/common'
def setUp(self):
util.reset_logging()
util.clear_static_cache()
return super(TestAuthority, self).setUp()
def tearDown(self):
util.reset_logging()
util.clear_static_cache()
return super(TestAuthority, self).tearDown()
def setupExpectedInstanceDiscoveryRequestRetries(self, requestParametersList, authority):
pass
@httpretty.activate
def test_success_dynamic_instance_discovery(self):
instanceDiscoveryRequest = util.setup_expected_instance_discovery_request(
200,
cp['authorityHosts']['global'],
{'tenant_discovery_endpoint' : 'http://foobar'},
self.nonHardCodedAuthorizeEndpoint
)
responseOptions = { 'authority' : self.nonHardCodedAuthority }
response = util.create_response(responseOptions)
wireResponse = response['wireResponse']
util.setup_expected_client_cred_token_request_response(200, wireResponse, self.nonHardCodedAuthority)
context = adal.AuthenticationContext(self.nonHardCodedAuthority)
token_response = context.acquire_token_with_client_credentials(
response['resource'], cp['clientId'], cp['clientSecret'])
self.assertTrue(
util.is_match_token_response(response['cachedResponse'], token_response),
'The response does not match what was expected.: ' + str(token_response)
)
def performStaticInstanceDiscovery(self, authorityHost):
hardCodedAuthority = 'https://' + authorityHost + '/' + cp['tenant']
responseOptions = {
'authority' : hardCodedAuthority
}
response = util.create_response(responseOptions)
wireResponse = response['wireResponse']
tokenRequest = util.setup_expected_client_cred_token_request_response(200, wireResponse, hardCodedAuthority)
context = adal.AuthenticationContext(hardCodedAuthority)
token_response = context.acquire_token_with_client_credentials(
response['resource'], cp['clientId'], cp['clientSecret'])
self.assertTrue(
util.is_match_token_response(response['cachedResponse'], token_response),
'The response does not match what was expected.: ' + str(token_response)
)
@httpretty.activate
def test_success_static_instance_discovery(self):
self.performStaticInstanceDiscovery('login.microsoftonline.com')
self.performStaticInstanceDiscovery('login.windows.net')
self.performStaticInstanceDiscovery('login.chinacloudapi.cn')
self.performStaticInstanceDiscovery('login-us.microsoftonline.com')
self.performStaticInstanceDiscovery('test-dsts.dsts.core.windows.net')
self.performStaticInstanceDiscovery('test-dsts.dsts.core.chinacloudapi.cn')
self.performStaticInstanceDiscovery('test-dsts.dsts.core.cloudapi.de')
self.performStaticInstanceDiscovery('test-dsts.dsts.core.usgovcloudapi.net')
self.performStaticInstanceDiscovery('test-dsts.core.azure-test.net')
@httpretty.activate
def test_http_error(self):
util.setup_expected_instance_discovery_request(500, cp['authorityHosts']['global'], None, self.nonHardCodedAuthorizeEndpoint)
with six.assertRaisesRegex(self, Exception, '500'):
context = adal.AuthenticationContext(self.nonHardCodedAuthority)
token_response = context.acquire_token_with_client_credentials(
cp['resource'], cp['clientId'], cp['clientSecret'])
@httpretty.activate
def test_validation_error(self):
returnDoc = { 'error' : 'invalid_instance', 'error_description' : 'the instance was invalid' }
util.setup_expected_instance_discovery_request(400, cp['authorityHosts']['global'], returnDoc, self.nonHardCodedAuthorizeEndpoint)
with six.assertRaisesRegex(self, Exception, 'instance was invalid'):
context = adal.AuthenticationContext(self.nonHardCodedAuthority)
token_response = context.acquire_token_with_client_credentials(
cp['resource'], cp['clientId'], cp['clientSecret'])
@httpretty.activate
def test_validation_off(self):
instanceDiscoveryRequest = util.setup_expected_instance_discovery_request(
200,
cp['authorityHosts']['global'],
{'tenant_discovery_endpoint' : 'http://foobar'},
self.nonHardCodedAuthorizeEndpoint
)
responseOptions = { 'authority' : self.nonHardCodedAuthority}
response = util.create_response(responseOptions)
wireResponse = response['wireResponse']
util.setup_expected_client_cred_token_request_response(200, wireResponse, self.nonHardCodedAuthority)
context = adal.AuthenticationContext(self.nonHardCodedAuthority)
token_response = context.acquire_token_with_client_credentials(
response['resource'], cp['clientId'], cp['clientSecret'])
self.assertTrue(
util.is_match_token_response(response['cachedResponse'], token_response),
'The response does not match what was expected.: ' + str(token_response)
)
@httpretty.activate
def test_bad_url_not_https(self):
with six.assertRaisesRegex(self, ValueError, "The authority url must be an https endpoint\."):
context = AuthenticationContext('http://this.is.not.https.com/mytenant.com')
@httpretty.activate
def test_bad_url_has_query(self):
with six.assertRaisesRegex(self, ValueError, "The authority url must not have a query string\."):
context = AuthenticationContext(cp['authorityTenant'] + '?this=should¬=be&here=foo')
@httpretty.activate
def test_url_extra_path_elements(self):
with six.assertRaisesRegex(self, ValueError, "tenant"): # Some tenant specific error message
context = AuthenticationContext(self.nonHardCodedAuthority + '/extra/path')
@httpretty.activate
def test_dsts_authority(self):
try:
context = AuthenticationContext(self.dstsTestEndpoint)
except:
self.fail("AuthenticationContext() rased an exception on dstsTestEndpoint")
@httpretty.activate
def test_url_extra_slashes(self):
util.setup_expected_instance_discovery_request(200,
cp['authorityHosts']['global'],
{
'tenant_discovery_endpoint': 'http://foobar'
},
self.nonHardCodedAuthorizeEndpoint)
authority_url = self.nonHardCodedAuthority + '/' # This should pass for one or more than one slashes
authority = Authority(authority_url, True)
obj = util.create_empty_adal_object()
authority.validate(obj['call_context'])
req = httpretty.last_request()
util.match_standard_request_headers(req)
@httpretty.activate
def test_url_extra_slashes_change_authority_url(self):
authority_url = self.nonHardCodedAuthority + '/' # This should pass for one or more than one slashes
authority = Authority(authority_url, True)
self.assertTrue(authority._url.geturl(), self.nonHardCodedAuthority)
if __name__ == '__main__':
unittest.main()
| 42.672566 | 138 | 0.687578 |
import sys
import requests
import httpretty
import six
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from unittest import mock
except ImportError:
import mock
import adal
from adal.authority import Authority
from adal import log
from adal.authentication_context import AuthenticationContext
from tests import util
from tests.util import parameters as cp
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
class TestAuthority(unittest.TestCase):
nonHardCodedAuthority = 'https://login.doesntexist.com/' + cp['tenant']
nonHardCodedAuthorizeEndpoint = nonHardCodedAuthority + '/oauth2/authorize'
dstsTestEndpoint = 'https://test-dsts.core.azure-test.net/dstsv2/common'
def setUp(self):
util.reset_logging()
util.clear_static_cache()
return super(TestAuthority, self).setUp()
def tearDown(self):
util.reset_logging()
util.clear_static_cache()
return super(TestAuthority, self).tearDown()
def setupExpectedInstanceDiscoveryRequestRetries(self, requestParametersList, authority):
pass
@httpretty.activate
def test_success_dynamic_instance_discovery(self):
instanceDiscoveryRequest = util.setup_expected_instance_discovery_request(
200,
cp['authorityHosts']['global'],
{'tenant_discovery_endpoint' : 'http://foobar'},
self.nonHardCodedAuthorizeEndpoint
)
responseOptions = { 'authority' : self.nonHardCodedAuthority }
response = util.create_response(responseOptions)
wireResponse = response['wireResponse']
util.setup_expected_client_cred_token_request_response(200, wireResponse, self.nonHardCodedAuthority)
context = adal.AuthenticationContext(self.nonHardCodedAuthority)
token_response = context.acquire_token_with_client_credentials(
response['resource'], cp['clientId'], cp['clientSecret'])
self.assertTrue(
util.is_match_token_response(response['cachedResponse'], token_response),
'The response does not match what was expected.: ' + str(token_response)
)
def performStaticInstanceDiscovery(self, authorityHost):
hardCodedAuthority = 'https://' + authorityHost + '/' + cp['tenant']
responseOptions = {
'authority' : hardCodedAuthority
}
response = util.create_response(responseOptions)
wireResponse = response['wireResponse']
tokenRequest = util.setup_expected_client_cred_token_request_response(200, wireResponse, hardCodedAuthority)
context = adal.AuthenticationContext(hardCodedAuthority)
token_response = context.acquire_token_with_client_credentials(
response['resource'], cp['clientId'], cp['clientSecret'])
self.assertTrue(
util.is_match_token_response(response['cachedResponse'], token_response),
'The response does not match what was expected.: ' + str(token_response)
)
@httpretty.activate
def test_success_static_instance_discovery(self):
self.performStaticInstanceDiscovery('login.microsoftonline.com')
self.performStaticInstanceDiscovery('login.windows.net')
self.performStaticInstanceDiscovery('login.chinacloudapi.cn')
self.performStaticInstanceDiscovery('login-us.microsoftonline.com')
self.performStaticInstanceDiscovery('test-dsts.dsts.core.windows.net')
self.performStaticInstanceDiscovery('test-dsts.dsts.core.chinacloudapi.cn')
self.performStaticInstanceDiscovery('test-dsts.dsts.core.cloudapi.de')
self.performStaticInstanceDiscovery('test-dsts.dsts.core.usgovcloudapi.net')
self.performStaticInstanceDiscovery('test-dsts.core.azure-test.net')
@httpretty.activate
def test_http_error(self):
util.setup_expected_instance_discovery_request(500, cp['authorityHosts']['global'], None, self.nonHardCodedAuthorizeEndpoint)
with six.assertRaisesRegex(self, Exception, '500'):
context = adal.AuthenticationContext(self.nonHardCodedAuthority)
token_response = context.acquire_token_with_client_credentials(
cp['resource'], cp['clientId'], cp['clientSecret'])
@httpretty.activate
def test_validation_error(self):
returnDoc = { 'error' : 'invalid_instance', 'error_description' : 'the instance was invalid' }
util.setup_expected_instance_discovery_request(400, cp['authorityHosts']['global'], returnDoc, self.nonHardCodedAuthorizeEndpoint)
with six.assertRaisesRegex(self, Exception, 'instance was invalid'):
context = adal.AuthenticationContext(self.nonHardCodedAuthority)
token_response = context.acquire_token_with_client_credentials(
cp['resource'], cp['clientId'], cp['clientSecret'])
@httpretty.activate
def test_validation_off(self):
instanceDiscoveryRequest = util.setup_expected_instance_discovery_request(
200,
cp['authorityHosts']['global'],
{'tenant_discovery_endpoint' : 'http://foobar'},
self.nonHardCodedAuthorizeEndpoint
)
responseOptions = { 'authority' : self.nonHardCodedAuthority}
response = util.create_response(responseOptions)
wireResponse = response['wireResponse']
util.setup_expected_client_cred_token_request_response(200, wireResponse, self.nonHardCodedAuthority)
context = adal.AuthenticationContext(self.nonHardCodedAuthority)
token_response = context.acquire_token_with_client_credentials(
response['resource'], cp['clientId'], cp['clientSecret'])
self.assertTrue(
util.is_match_token_response(response['cachedResponse'], token_response),
'The response does not match what was expected.: ' + str(token_response)
)
@httpretty.activate
def test_bad_url_not_https(self):
with six.assertRaisesRegex(self, ValueError, "The authority url must be an https endpoint\."):
context = AuthenticationContext('http://this.is.not.https.com/mytenant.com')
@httpretty.activate
def test_bad_url_has_query(self):
with six.assertRaisesRegex(self, ValueError, "The authority url must not have a query string\."):
context = AuthenticationContext(cp['authorityTenant'] + '?this=should¬=be&here=foo')
@httpretty.activate
def test_url_extra_path_elements(self):
with six.assertRaisesRegex(self, ValueError, "tenant"):
context = AuthenticationContext(self.nonHardCodedAuthority + '/extra/path')
@httpretty.activate
def test_dsts_authority(self):
try:
context = AuthenticationContext(self.dstsTestEndpoint)
except:
self.fail("AuthenticationContext() rased an exception on dstsTestEndpoint")
@httpretty.activate
def test_url_extra_slashes(self):
util.setup_expected_instance_discovery_request(200,
cp['authorityHosts']['global'],
{
'tenant_discovery_endpoint': 'http://foobar'
},
self.nonHardCodedAuthorizeEndpoint)
authority_url = self.nonHardCodedAuthority + '/'
authority = Authority(authority_url, True)
obj = util.create_empty_adal_object()
authority.validate(obj['call_context'])
req = httpretty.last_request()
util.match_standard_request_headers(req)
@httpretty.activate
def test_url_extra_slashes_change_authority_url(self):
authority_url = self.nonHardCodedAuthority + '/'
authority = Authority(authority_url, True)
self.assertTrue(authority._url.geturl(), self.nonHardCodedAuthority)
if __name__ == '__main__':
unittest.main()
| true | true |
f72ceae57491507ca3020763b8de106a6f696481 | 486 | py | Python | DjangoAPI/MyApi/migrations/0001_initial.py | sni710/Django_api | a40d049586d9396c3b1bea4cd82177c573b24c17 | [
"Apache-2.0"
] | 2 | 2020-08-27T11:26:35.000Z | 2021-03-20T16:27:20.000Z | DjangoAPI/MyApi/migrations/0001_initial.py | ankit98040/Django-ML-Project | 3e50f51e56aa34bb8a7ae31f4955a10e57176ea7 | [
"Apache-2.0"
] | null | null | null | DjangoAPI/MyApi/migrations/0001_initial.py | ankit98040/Django-ML-Project | 3e50f51e56aa34bb8a7ae31f4955a10e57176ea7 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.0.6 on 2020-08-26 04:50
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MyFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='')),
],
),
]
| 22.090909 | 114 | 0.569959 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MyFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='')),
],
),
]
| true | true |
f72ceb176086bf9c83e429c918a55d39155ac895 | 3,556 | py | Python | spyder/plugins/tours/container.py | fumitoh/spyder | 12294fec88a2f61c756538ac38bd748d8e7b3f82 | [
"MIT"
] | 1 | 2021-07-08T01:27:25.000Z | 2021-07-08T01:27:25.000Z | spyder/plugins/tours/container.py | fumitoh/spyder | 12294fec88a2f61c756538ac38bd748d8e7b3f82 | [
"MIT"
] | null | null | null | spyder/plugins/tours/container.py | fumitoh/spyder | 12294fec88a2f61c756538ac38bd748d8e7b3f82 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Project Contributors
#
# Distributed under the terms of the MIT License
# (see spyder/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Tours Container.
"""
from collections import OrderedDict
# Local imports
from spyder.api.exceptions import SpyderAPIError
from spyder.api.translations import get_translation
from spyder.api.widgets.main_container import PluginMainContainer
from spyder.plugins.tours.tours import TourIdentifiers
from spyder.plugins.tours.widgets import AnimatedTour, OpenTourDialog
# Localization
_ = get_translation('spyder')
# Set the index for the default tour
DEFAULT_TOUR = TourIdentifiers.IntroductionTour
class TourActions:
"""
Tours actions.
"""
ShowTour = "show tour"
# --- Plugin
# ----------------------------------------------------------------------------
class ToursContainer(PluginMainContainer):
"""
Tours container.
"""
def __init__(self, name, plugin, parent=None):
super().__init__(name, plugin, parent=parent)
self._main = plugin.main
self._tours = OrderedDict()
self._tour_titles = OrderedDict()
self._tour_widget = AnimatedTour(self._main)
self._tour_dialog = OpenTourDialog(
self, lambda: self.show_tour(DEFAULT_TOUR))
self.tour_action = self.create_action(
TourActions.ShowTour,
text=_("Show tour"),
icon=self.create_icon('tour'),
triggered=lambda: self.show_tour(DEFAULT_TOUR)
)
# --- PluginMainContainer API
# ------------------------------------------------------------------------
def setup(self):
self.tours_menu = self.create_menu(
"tours_menu", _("Interactive tours"))
def update_actions(self):
pass
# --- Public API
# ------------------------------------------------------------------------
def register_tour(self, tour_id, title, tour_data):
"""
Register a new interactive tour on spyder.
Parameters
----------
tour_id: str
Unique tour string identifier.
title: str
Localized tour name.
tour_data: dict
The tour steps.
"""
if tour_id in self._tours:
raise SpyderAPIError(
"Tour with id '{}' has already been registered!".format(
tour_id))
self._tours[tour_id] = tour_data
self._tour_titles[tour_id] = title
action = self.create_action(
tour_id,
text=title,
triggered=lambda: self.show_tour(tour_id),
)
self.add_item_to_menu(action, menu=self.tours_menu)
def show_tour(self, tour_id):
"""
Show interactive tour.
Parameters
----------
tour_id: str
Unique tour string identifier.
"""
tour_data = self._tours[tour_id]
dic = {'last': 0, 'tour': tour_data}
self._tour_widget.set_tour(tour_id, dic, self._main)
self._tour_widget.start_tour()
def show_tour_message(self):
"""
Show message about starting the tour the first time Spyder starts.
"""
self._tour_dialog.show()
self._tour_dialog.raise_()
| 30.135593 | 80 | 0.53009 |
from collections import OrderedDict
from spyder.api.exceptions import SpyderAPIError
from spyder.api.translations import get_translation
from spyder.api.widgets.main_container import PluginMainContainer
from spyder.plugins.tours.tours import TourIdentifiers
from spyder.plugins.tours.widgets import AnimatedTour, OpenTourDialog
_ = get_translation('spyder')
DEFAULT_TOUR = TourIdentifiers.IntroductionTour
class TourActions:
ShowTour = "show tour"
class ToursContainer(PluginMainContainer):
def __init__(self, name, plugin, parent=None):
super().__init__(name, plugin, parent=parent)
self._main = plugin.main
self._tours = OrderedDict()
self._tour_titles = OrderedDict()
self._tour_widget = AnimatedTour(self._main)
self._tour_dialog = OpenTourDialog(
self, lambda: self.show_tour(DEFAULT_TOUR))
self.tour_action = self.create_action(
TourActions.ShowTour,
text=_("Show tour"),
icon=self.create_icon('tour'),
triggered=lambda: self.show_tour(DEFAULT_TOUR)
)
def setup(self):
self.tours_menu = self.create_menu(
"tours_menu", _("Interactive tours"))
def update_actions(self):
pass
def register_tour(self, tour_id, title, tour_data):
if tour_id in self._tours:
raise SpyderAPIError(
"Tour with id '{}' has already been registered!".format(
tour_id))
self._tours[tour_id] = tour_data
self._tour_titles[tour_id] = title
action = self.create_action(
tour_id,
text=title,
triggered=lambda: self.show_tour(tour_id),
)
self.add_item_to_menu(action, menu=self.tours_menu)
def show_tour(self, tour_id):
tour_data = self._tours[tour_id]
dic = {'last': 0, 'tour': tour_data}
self._tour_widget.set_tour(tour_id, dic, self._main)
self._tour_widget.start_tour()
def show_tour_message(self):
self._tour_dialog.show()
self._tour_dialog.raise_()
| true | true |
f72ceb2273d8e23fd38d9723f4f39d414670b675 | 375 | py | Python | Python/Swap Case.py | BiswajitDeb/My_all_programs | 4717cfc0b3b1aeda75f8eec0b7ff643e8556d262 | [
"Unlicense"
] | null | null | null | Python/Swap Case.py | BiswajitDeb/My_all_programs | 4717cfc0b3b1aeda75f8eec0b7ff643e8556d262 | [
"Unlicense"
] | null | null | null | Python/Swap Case.py | BiswajitDeb/My_all_programs | 4717cfc0b3b1aeda75f8eec0b7ff643e8556d262 | [
"Unlicense"
] | null | null | null | def swap_case(s):
k = []
l = list(s)
for i in l:
j = ""
if i.islower():
j = i.upper()
elif i.isupper():
j = i.lower()
else:
k.append(i)
k.append(j)
final = ''.join(k)
return final
if __name__ == '__main__':
s = input()
result = swap_case(s)
print(result) | 16.304348 | 26 | 0.416 | def swap_case(s):
k = []
l = list(s)
for i in l:
j = ""
if i.islower():
j = i.upper()
elif i.isupper():
j = i.lower()
else:
k.append(i)
k.append(j)
final = ''.join(k)
return final
if __name__ == '__main__':
s = input()
result = swap_case(s)
print(result) | true | true |
f72ced6147cf08a6b779747e4e46d56e84081e4e | 1,139 | py | Python | plenum/test/view_change/test_start_view_change_ts_set.py | andkononykhin/indy-plenum-copy | 46c48feaf75e5578c9dceb76d4b6d09f7e63add5 | [
"Apache-2.0"
] | 1 | 2019-03-19T23:44:56.000Z | 2019-03-19T23:44:56.000Z | plenum/test/view_change/test_start_view_change_ts_set.py | andkononykhin/indy-plenum-copy | 46c48feaf75e5578c9dceb76d4b6d09f7e63add5 | [
"Apache-2.0"
] | null | null | null | plenum/test/view_change/test_start_view_change_ts_set.py | andkononykhin/indy-plenum-copy | 46c48feaf75e5578c9dceb76d4b6d09f7e63add5 | [
"Apache-2.0"
] | 2 | 2017-12-13T21:14:54.000Z | 2021-06-06T15:48:03.000Z | from contextlib import ExitStack
import pytest
from plenum.test.helper import create_new_test_node
@pytest.fixture(scope="module")
def create_node_and_not_start(testNodeClass,
node_config_helper_class,
tconf,
tdir,
allPluginsPath,
looper,
tdirWithPoolTxns,
tdirWithDomainTxns,
tdirWithNodeKeepInited):
with ExitStack() as exitStack:
node = exitStack.enter_context(create_new_test_node(testNodeClass,
node_config_helper_class,
"Alpha",
tconf,
tdir,
allPluginsPath))
yield node
node.stop()
def test_start_view_change_ts_set(looper, create_node_and_not_start):
node = create_node_and_not_start
node.start(looper)
node.on_view_change_start()
assert node.view_changer.start_view_change_ts != 0
| 33.5 | 74 | 0.515364 | from contextlib import ExitStack
import pytest
from plenum.test.helper import create_new_test_node
@pytest.fixture(scope="module")
def create_node_and_not_start(testNodeClass,
node_config_helper_class,
tconf,
tdir,
allPluginsPath,
looper,
tdirWithPoolTxns,
tdirWithDomainTxns,
tdirWithNodeKeepInited):
with ExitStack() as exitStack:
node = exitStack.enter_context(create_new_test_node(testNodeClass,
node_config_helper_class,
"Alpha",
tconf,
tdir,
allPluginsPath))
yield node
node.stop()
def test_start_view_change_ts_set(looper, create_node_and_not_start):
node = create_node_and_not_start
node.start(looper)
node.on_view_change_start()
assert node.view_changer.start_view_change_ts != 0
| true | true |
f72ced80e9d31b06e29ceff3d3a4092de61f6141 | 8,950 | py | Python | test/test_cognon_extended.py | pauh/neuron | c08f7033f954373617d7a58eb1e5b88f91ac1a27 | [
"Apache-2.0"
] | 3 | 2018-08-25T22:03:37.000Z | 2019-04-15T10:59:14.000Z | test/test_cognon_extended.py | pauh/neuron | c08f7033f954373617d7a58eb1e5b88f91ac1a27 | [
"Apache-2.0"
] | null | null | null | test/test_cognon_extended.py | pauh/neuron | c08f7033f954373617d7a58eb1e5b88f91ac1a27 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Pau Haro Negre
# based on C++ code by Carl Staelin Copyright 2009-2011
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cognon_extended import Neuron
from cognon_extended import Synapse
from cognon_extended import Word
from cognon_extended import WordSet
from nose.tools import assert_false
from nose.tools import assert_greater_equal
from nose.tools import assert_in
from nose.tools import assert_is_none
from nose.tools import assert_less
from nose.tools import assert_less_equal
from nose.tools import assert_true
from nose.tools import eq_
from nose.tools import ok_
from nose.tools import raises
class TestSynapse:
@raises(TypeError)
def test_construct_requires_args(self):
s = Synapse()
def test_named_attributes(self):
s = Synapse(1, 0)
eq_(s.offset, 1)
eq_(s.delay, 0)
class TestWord:
def test_empty(self):
w = Word()
eq_(len(w.synapses), 0)
@raises(ValueError)
def test_negative_synapse_offset(self):
w = Word([(-1, 0)])
def test_fire_1_3_8(self):
w = Word([(1,0),(3,0),(8,0)])
eq_(len(w.synapses), 3)
assert_in((1,0), w.synapses)
assert_in((3,0), w.synapses)
assert_in((8,0), w.synapses)
def test_delay_0(self):
w = Word([(1,0),(3,0),(8,0)])
for offset, delay in w.synapses:
eq_(delay, 0)
class TestWordSet:
def test_small(self):
num_words = 5
word_length = 16
num_delays = 4
num_active = 4
ws = WordSet(num_words, word_length, num_delays, num_active)
eq_(len(ws.words), num_words)
eq_(len(ws.delays), num_words)
for word in ws.words:
eq_(len(word.synapses), num_active)
for synapse in word.synapses:
assert_greater_equal(synapse.offset, 0)
assert_less(synapse.offset, word_length)
assert_greater_equal(synapse.delay, 0)
assert_less(synapse.delay, num_delays)
def test_refractory_period(self):
num_words = 5
word_length = 16
num_delays = 4
num_active = None
refractory_period = 4
ws = WordSet(num_words, word_length, num_delays, num_active,
refractory_period)
eq_(len(ws.words), num_words)
eq_(len(ws.delays), num_words)
for word in ws.words:
for synapse in word.synapses:
assert_greater_equal(synapse.offset, 0)
assert_less(synapse.offset, word_length)
assert_greater_equal(synapse.delay, 0)
assert_less(synapse.delay, num_delays)
class TestNeuron:
def test_defaults(self):
n = Neuron()
eq_(n.S0, 200)
eq_(n.H, 5.0)
eq_(n.G, 2.0)
eq_(n.C, 1)
eq_(n.D1, 4)
eq_(n.D2, 7)
assert_false(n.training)
eq_(len(n.synapses), n.S0)
assert_true((n.synapses['strength'] == 1.0).all())
assert_true((n.synapses['delay'] >= 0).all())
assert_true((n.synapses['delay'] < n.D2).all())
assert_true((n.synapses['container'] >= 0).all())
assert_true((n.synapses['container'] < n.C).all())
def test_attributes_in_range(self):
n = Neuron()
assert_greater_equal(n.H, 1.0)
assert_greater_equal(n.C, 1)
assert_less_equal(n.D1, n.D2)
assert_true((n.synapses['strength'] >= 0.0).all())
def test_expose_not_training(self):
n = Neuron(S0 = 16, H = 4.0, G = 2.0, C = 1, D1 = 1, D2 = 1)
w = Word([(1,0), (6,0), (9,0)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
w = Word([(1,0), (3,0), (4,0), (5,0), (6,0), (8,0), (9,0), (14,0)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 0)
eq_(container, 0)
@raises(IndexError)
def test_expose_index_error(self):
n = Neuron(S0 = 16)
w = Word([(16,0)])
n.expose(w)
def test_expose_multiple_containers(self):
n = Neuron(S0 = 16, H = 2.0, G = 2.0, C = 3, D1 = 1, D2 = 1)
# Set container assignment manually to remove randomness
n.synapses['container'][ 0:10] = 0
n.synapses['container'][10:14] = 1
n.synapses['container'][14:16] = 2
w = Word([(1,0), (2,0), (6,0)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
w = Word([(1,0), (2,0), (3,0), (4,0), (5,0), (6,0)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 0)
eq_(container, 0)
w = Word([(10,0), (11,0), (12,0), (13,0)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 0)
eq_(container, 1)
w = Word([(14,0), (15,0)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
def test_expose_with_delays(self):
n = Neuron(S0 = 16, H = 2.0, G = 2.0, C = 1, D1 = 2, D2 = 3)
# Set delay assignment manually to remove randomness
n.synapses['delay'][ 0:10] = 0
n.synapses['delay'][10:14] = 1
n.synapses['delay'][14:16] = 2
w = Word([(1,0), (2,0), (6,0)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
w = Word([(1,0), (2,0), (3,0), (4,0), (5,0), (6,0)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 0)
eq_(container, 0)
w = Word([(1,1), (2,1), (3,1), (4,1), (5,0), (6,0)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 1)
eq_(container, 0)
w = Word([(1,0), (2,0), (3,0), (4,1), (5,1), (6,1)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
w = Word([(10,1), (11,1), (12,1), (13,1)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 2)
eq_(container, 0)
w = Word([(12,0), (13,0), (14,0), (15,0)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
def test_train(self):
n = Neuron(S0 = 16, H = 4.0, G = 2.0, C = 1, D1 = 1, D2 = 1)
# Train neuron with 2 patterns
wA = Word([(1,0), (6,0), (9,0), (14,0)])
wB = Word([(3,0), (4,0), (9,0), (13,0)])
n.start_training()
assert_true(n.train(wA))
assert_true(n.train(wB))
n.finish_training()
# Test recognition
wD = Word([(2,0), (6,0), (12,0), (14,0)])
fired, delay, container = n.expose(wD)
assert_false(fired)
wE = Word([(3,0), (7,0), (9,0), (13,0)])
fired, delay, container = n.expose(wE)
assert_false(fired)
wF = Word([(1,0), (4,0), (9,0), (14,0)])
fired, delay, container = n.expose(wF)
assert_true(fired) # False alarm
def test_train_not_training(self):
n = Neuron()
w = Word()
assert_false(n.train(w))
def test_train_with_delays(self):
n = Neuron(S0 = 16, H = 4.0, G = 2.0, C = 1, D1 = 2, D2 = 2)
# Fix neuron delays manually for the test
n.synapses['delay'] = 1
n.synapses['delay'][1] = 0
n.synapses['delay'][14] = 0
# Train neuron with 2 patterns
wA = Word([(1,1), (6,0), (9,0), (14,1)])
wB = Word([(3,0), (4,0), (9,0), (13,0)])
n.start_training()
assert_true(n.train(wA))
assert_true(n.train(wB))
n.finish_training()
# Recognize
wD = Word([(2,0), (6,0), (12,0), (14,0)])
fired, delay, container = n.expose(wD)
assert_false(fired)
wE = Word([(1,1), (3,0), (9,0), (13,0)])
fired, delay, container = n.expose(wE)
assert_true(fired) # False alarm
wF = Word([(1,0), (4,1), (7,0), (9,0), (11,0), (14,0)])
fired, delay, container = n.expose(wF)
assert_false(fired)
| 30.13468 | 75 | 0.562682 |
from cognon_extended import Neuron
from cognon_extended import Synapse
from cognon_extended import Word
from cognon_extended import WordSet
from nose.tools import assert_false
from nose.tools import assert_greater_equal
from nose.tools import assert_in
from nose.tools import assert_is_none
from nose.tools import assert_less
from nose.tools import assert_less_equal
from nose.tools import assert_true
from nose.tools import eq_
from nose.tools import ok_
from nose.tools import raises
class TestSynapse:
@raises(TypeError)
def test_construct_requires_args(self):
s = Synapse()
def test_named_attributes(self):
s = Synapse(1, 0)
eq_(s.offset, 1)
eq_(s.delay, 0)
class TestWord:
def test_empty(self):
w = Word()
eq_(len(w.synapses), 0)
@raises(ValueError)
def test_negative_synapse_offset(self):
w = Word([(-1, 0)])
def test_fire_1_3_8(self):
w = Word([(1,0),(3,0),(8,0)])
eq_(len(w.synapses), 3)
assert_in((1,0), w.synapses)
assert_in((3,0), w.synapses)
assert_in((8,0), w.synapses)
def test_delay_0(self):
w = Word([(1,0),(3,0),(8,0)])
for offset, delay in w.synapses:
eq_(delay, 0)
class TestWordSet:
def test_small(self):
num_words = 5
word_length = 16
num_delays = 4
num_active = 4
ws = WordSet(num_words, word_length, num_delays, num_active)
eq_(len(ws.words), num_words)
eq_(len(ws.delays), num_words)
for word in ws.words:
eq_(len(word.synapses), num_active)
for synapse in word.synapses:
assert_greater_equal(synapse.offset, 0)
assert_less(synapse.offset, word_length)
assert_greater_equal(synapse.delay, 0)
assert_less(synapse.delay, num_delays)
def test_refractory_period(self):
num_words = 5
word_length = 16
num_delays = 4
num_active = None
refractory_period = 4
ws = WordSet(num_words, word_length, num_delays, num_active,
refractory_period)
eq_(len(ws.words), num_words)
eq_(len(ws.delays), num_words)
for word in ws.words:
for synapse in word.synapses:
assert_greater_equal(synapse.offset, 0)
assert_less(synapse.offset, word_length)
assert_greater_equal(synapse.delay, 0)
assert_less(synapse.delay, num_delays)
class TestNeuron:
def test_defaults(self):
n = Neuron()
eq_(n.S0, 200)
eq_(n.H, 5.0)
eq_(n.G, 2.0)
eq_(n.C, 1)
eq_(n.D1, 4)
eq_(n.D2, 7)
assert_false(n.training)
eq_(len(n.synapses), n.S0)
assert_true((n.synapses['strength'] == 1.0).all())
assert_true((n.synapses['delay'] >= 0).all())
assert_true((n.synapses['delay'] < n.D2).all())
assert_true((n.synapses['container'] >= 0).all())
assert_true((n.synapses['container'] < n.C).all())
def test_attributes_in_range(self):
n = Neuron()
assert_greater_equal(n.H, 1.0)
assert_greater_equal(n.C, 1)
assert_less_equal(n.D1, n.D2)
assert_true((n.synapses['strength'] >= 0.0).all())
def test_expose_not_training(self):
n = Neuron(S0 = 16, H = 4.0, G = 2.0, C = 1, D1 = 1, D2 = 1)
w = Word([(1,0), (6,0), (9,0)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
w = Word([(1,0), (3,0), (4,0), (5,0), (6,0), (8,0), (9,0), (14,0)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 0)
eq_(container, 0)
@raises(IndexError)
def test_expose_index_error(self):
n = Neuron(S0 = 16)
w = Word([(16,0)])
n.expose(w)
def test_expose_multiple_containers(self):
n = Neuron(S0 = 16, H = 2.0, G = 2.0, C = 3, D1 = 1, D2 = 1)
n.synapses['container'][ 0:10] = 0
n.synapses['container'][10:14] = 1
n.synapses['container'][14:16] = 2
w = Word([(1,0), (2,0), (6,0)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
w = Word([(1,0), (2,0), (3,0), (4,0), (5,0), (6,0)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 0)
eq_(container, 0)
w = Word([(10,0), (11,0), (12,0), (13,0)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 0)
eq_(container, 1)
w = Word([(14,0), (15,0)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
def test_expose_with_delays(self):
n = Neuron(S0 = 16, H = 2.0, G = 2.0, C = 1, D1 = 2, D2 = 3)
n.synapses['delay'][ 0:10] = 0
n.synapses['delay'][10:14] = 1
n.synapses['delay'][14:16] = 2
w = Word([(1,0), (2,0), (6,0)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
w = Word([(1,0), (2,0), (3,0), (4,0), (5,0), (6,0)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 0)
eq_(container, 0)
w = Word([(1,1), (2,1), (3,1), (4,1), (5,0), (6,0)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 1)
eq_(container, 0)
w = Word([(1,0), (2,0), (3,0), (4,1), (5,1), (6,1)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
w = Word([(10,1), (11,1), (12,1), (13,1)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 2)
eq_(container, 0)
w = Word([(12,0), (13,0), (14,0), (15,0)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
def test_train(self):
n = Neuron(S0 = 16, H = 4.0, G = 2.0, C = 1, D1 = 1, D2 = 1)
wA = Word([(1,0), (6,0), (9,0), (14,0)])
wB = Word([(3,0), (4,0), (9,0), (13,0)])
n.start_training()
assert_true(n.train(wA))
assert_true(n.train(wB))
n.finish_training()
wD = Word([(2,0), (6,0), (12,0), (14,0)])
fired, delay, container = n.expose(wD)
assert_false(fired)
wE = Word([(3,0), (7,0), (9,0), (13,0)])
fired, delay, container = n.expose(wE)
assert_false(fired)
wF = Word([(1,0), (4,0), (9,0), (14,0)])
fired, delay, container = n.expose(wF)
assert_true(fired)
def test_train_not_training(self):
n = Neuron()
w = Word()
assert_false(n.train(w))
def test_train_with_delays(self):
n = Neuron(S0 = 16, H = 4.0, G = 2.0, C = 1, D1 = 2, D2 = 2)
n.synapses['delay'] = 1
n.synapses['delay'][1] = 0
n.synapses['delay'][14] = 0
wA = Word([(1,1), (6,0), (9,0), (14,1)])
wB = Word([(3,0), (4,0), (9,0), (13,0)])
n.start_training()
assert_true(n.train(wA))
assert_true(n.train(wB))
n.finish_training()
wD = Word([(2,0), (6,0), (12,0), (14,0)])
fired, delay, container = n.expose(wD)
assert_false(fired)
wE = Word([(1,1), (3,0), (9,0), (13,0)])
fired, delay, container = n.expose(wE)
assert_true(fired)
wF = Word([(1,0), (4,1), (7,0), (9,0), (11,0), (14,0)])
fired, delay, container = n.expose(wF)
assert_false(fired)
| true | true |
f72ced891d0ab304ac2986df0441a610cd13e4c7 | 896 | py | Python | 2020/day25/day25.py | ChrisCh7/advent-of-code | d6f1dda4a67aae18ac1e15b9eccb3e6e94d705c1 | [
"Unlicense"
] | 3 | 2020-12-03T23:20:27.000Z | 2020-12-03T23:20:53.000Z | 2020/day25/day25.py | ChrisCh7/advent-of-code | d6f1dda4a67aae18ac1e15b9eccb3e6e94d705c1 | [
"Unlicense"
] | null | null | null | 2020/day25/day25.py | ChrisCh7/advent-of-code | d6f1dda4a67aae18ac1e15b9eccb3e6e94d705c1 | [
"Unlicense"
] | null | null | null | def main():
with open('in.txt') as file:
card_pubkey, door_pubkey = map(lambda n: int(n), file.read().splitlines())
print('card public key:', card_pubkey)
print('door public key:', door_pubkey)
print('card loop size:', card_loop_size := get_loop_size(card_pubkey))
print('door loop size:', door_loop_size := get_loop_size(door_pubkey))
print('encryption key:', get_encryption_key(card_loop_size, door_pubkey))
def get_loop_size(pubkey):
value = 1
loop_size = 0
while value != pubkey:
value *= 7
value %= 20201227
loop_size += 1
return loop_size
def get_encryption_key(first_loop_size, second_public_key):
value = 1
loop_size = 0
while loop_size < first_loop_size:
value *= second_public_key
value %= 20201227
loop_size += 1
return value
if __name__ == '__main__':
main()
| 24.888889 | 82 | 0.65067 | def main():
with open('in.txt') as file:
card_pubkey, door_pubkey = map(lambda n: int(n), file.read().splitlines())
print('card public key:', card_pubkey)
print('door public key:', door_pubkey)
print('card loop size:', card_loop_size := get_loop_size(card_pubkey))
print('door loop size:', door_loop_size := get_loop_size(door_pubkey))
print('encryption key:', get_encryption_key(card_loop_size, door_pubkey))
def get_loop_size(pubkey):
value = 1
loop_size = 0
while value != pubkey:
value *= 7
value %= 20201227
loop_size += 1
return loop_size
def get_encryption_key(first_loop_size, second_public_key):
value = 1
loop_size = 0
while loop_size < first_loop_size:
value *= second_public_key
value %= 20201227
loop_size += 1
return value
if __name__ == '__main__':
main()
| true | true |
f72cedd7827c3a103f775708f54a774487557133 | 4,338 | py | Python | doom/test.py | luxinglong/ViZDoom-SL | fbc54c401b1ca320e9e804f2c97fdedc5d0c534d | [
"MIT"
] | null | null | null | doom/test.py | luxinglong/ViZDoom-SL | fbc54c401b1ca320e9e804f2c97fdedc5d0c534d | [
"MIT"
] | null | null | null | doom/test.py | luxinglong/ViZDoom-SL | fbc54c401b1ca320e9e804f2c97fdedc5d0c534d | [
"MIT"
] | null | null | null | import sys
import argparse
import numpy as np
from actions import ActionBuilder
from game import Game
# use_continuous speed action_combinations crouch freelook
FALSY_STRINGS = {'off', 'false', '0'}
TRUTHY_STRINGS = {'on', 'true', '1'}
def bool_flag(string):
"""
Parse boolean arguments from the command line.
"""
if string.lower() in FALSY_STRINGS:
return False
elif string.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("invalid value for a boolean flag. "
"use 0 or 1")
def main():
parser = argparse.ArgumentParser(description='LUBAN runner')
parser.add_argument("--use_continuous", type=bool_flag, default=False,
help="weather use continuous actions")
# Available actions
# combination of actions the agent is allowed to do.
# this is for non-continuous mode only, and is ignored in continuous mode
parser.add_argument("--action_combinations", type=str,
default='move_fb+turn_lr+move_lr+attack',
help="Allowed combinations of actions")
# freelook: allow the agent to look up and down
parser.add_argument("--freelook", type=bool_flag, default=False,
help="Enable freelook (look up / look down)")
parser.add_argument("--human_player", type=bool_flag, default=False,
help="DoomGame mode")
# speed and crouch buttons: in non-continuous mode, the network can not
# have control on these buttons, and they must be set to always 'on' or
# 'off'. In continuous mode, the network can manually control crouch and
# speed.
parser.add_argument("--speed", type=str, default='off',
help="Crouch: on / off / manual")
parser.add_argument("--crouch", type=str, default='off',
help="Crouch: on / off / manual")
# for process_buffers
parser.add_argument("--height", type=int, default=60,
help="Image height")
parser.add_argument("--width", type=int, default=108,
help="Image width")
parser.add_argument("--gray", type=bool_flag, default=False,
help="Use grayscale")
parser.add_argument("--use_screen_buffer", type=bool_flag, default=True,
help="Use the screen buffer")
parser.add_argument("--use_depth_buffer", type=bool_flag, default=False,
help="Use the depth buffer")
parser.add_argument("--labels_mapping", type=str, default='',
help="Map labels to different feature maps")
parser.add_argument("--dump_freq", type=int, default=0,
help="Dump every X iterations (0 to disable)")
# for observe_state
parser.add_argument("--hist_size", type=int, default=4,
help="History size")
params, unparsed = parser.parse_known_args(sys.argv)
print(sys.argv)
params.game_variables = [('health', 101), ('sel_ammo', 301)]
print(params)
action_builder = ActionBuilder(params)
print(action_builder.n_actions)
print(action_builder.available_actions)
game = Game(
scenario='full_deathmatch',
action_builder=action_builder,
score_variable='USER2',
freedoom=True,
screen_resolution='RES_800X450',
use_screen_buffer=True,
use_depth_buffer=True,
labels_mapping="",
game_features="target,enemy",
mode=('SPECTATOR' if params.human_player else 'PLAYER'),
render_hud=True,
render_crosshair=True,
render_weapon=True,
freelook=params.freelook,
visible=0,
n_bots=10,
use_scripted_marines=True
)
game.start(map_id = 2)
game.init_bots_health(100)
episodes = 100000
last_states = []
for _ in range(episodes):
if game.is_player_dead():
game.respawn_player()
game.observe_state(params, last_states)
action = np.random.randint(0, 29)
game.make_action(action, frame_skip=1, sleep=None)
game.close()
if __name__ == '__main__':
main()
| 37.396552 | 78 | 0.605579 | import sys
import argparse
import numpy as np
from actions import ActionBuilder
from game import Game
FALSY_STRINGS = {'off', 'false', '0'}
TRUTHY_STRINGS = {'on', 'true', '1'}
def bool_flag(string):
if string.lower() in FALSY_STRINGS:
return False
elif string.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("invalid value for a boolean flag. "
"use 0 or 1")
def main():
parser = argparse.ArgumentParser(description='LUBAN runner')
parser.add_argument("--use_continuous", type=bool_flag, default=False,
help="weather use continuous actions")
parser.add_argument("--action_combinations", type=str,
default='move_fb+turn_lr+move_lr+attack',
help="Allowed combinations of actions")
parser.add_argument("--freelook", type=bool_flag, default=False,
help="Enable freelook (look up / look down)")
parser.add_argument("--human_player", type=bool_flag, default=False,
help="DoomGame mode")
parser.add_argument("--speed", type=str, default='off',
help="Crouch: on / off / manual")
parser.add_argument("--crouch", type=str, default='off',
help="Crouch: on / off / manual")
parser.add_argument("--height", type=int, default=60,
help="Image height")
parser.add_argument("--width", type=int, default=108,
help="Image width")
parser.add_argument("--gray", type=bool_flag, default=False,
help="Use grayscale")
parser.add_argument("--use_screen_buffer", type=bool_flag, default=True,
help="Use the screen buffer")
parser.add_argument("--use_depth_buffer", type=bool_flag, default=False,
help="Use the depth buffer")
parser.add_argument("--labels_mapping", type=str, default='',
help="Map labels to different feature maps")
parser.add_argument("--dump_freq", type=int, default=0,
help="Dump every X iterations (0 to disable)")
parser.add_argument("--hist_size", type=int, default=4,
help="History size")
params, unparsed = parser.parse_known_args(sys.argv)
print(sys.argv)
params.game_variables = [('health', 101), ('sel_ammo', 301)]
print(params)
action_builder = ActionBuilder(params)
print(action_builder.n_actions)
print(action_builder.available_actions)
game = Game(
scenario='full_deathmatch',
action_builder=action_builder,
score_variable='USER2',
freedoom=True,
screen_resolution='RES_800X450',
use_screen_buffer=True,
use_depth_buffer=True,
labels_mapping="",
game_features="target,enemy",
mode=('SPECTATOR' if params.human_player else 'PLAYER'),
render_hud=True,
render_crosshair=True,
render_weapon=True,
freelook=params.freelook,
visible=0,
n_bots=10,
use_scripted_marines=True
)
game.start(map_id = 2)
game.init_bots_health(100)
episodes = 100000
last_states = []
for _ in range(episodes):
if game.is_player_dead():
game.respawn_player()
game.observe_state(params, last_states)
action = np.random.randint(0, 29)
game.make_action(action, frame_skip=1, sleep=None)
game.close()
if __name__ == '__main__':
main()
| true | true |
f72cef269ff93973871d0381495aa221dec684e9 | 576 | py | Python | processdata/migrations/0001_initial.py | shinysong/covid19-dashboard | c4c536e3781caecb7f1cfcfdb27c1324fe493eb2 | [
"MIT"
] | null | null | null | processdata/migrations/0001_initial.py | shinysong/covid19-dashboard | c4c536e3781caecb7f1cfcfdb27c1324fe493eb2 | [
"MIT"
] | null | null | null | processdata/migrations/0001_initial.py | shinysong/covid19-dashboard | c4c536e3781caecb7f1cfcfdb27c1324fe493eb2 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2022-03-29 07:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='test_api',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dmName', models.TextField()),
('mainURL', models.URLField()),
('dsCount', models.IntegerField()),
],
),
]
| 24 | 114 | 0.548611 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='test_api',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dmName', models.TextField()),
('mainURL', models.URLField()),
('dsCount', models.IntegerField()),
],
),
]
| true | true |
f72cefc08416d058c79ffed4f4ba0c15d2eb9ff0 | 17,944 | py | Python | homeassistant/components/climate/wink.py | jamescurtin/home-assistant | 6a9968ccb9b0082f5629e50955549d432aba7d90 | [
"Apache-2.0"
] | 2 | 2020-02-20T18:47:55.000Z | 2021-11-09T11:33:28.000Z | homeassistant/components/climate/wink.py | moose51789/home-assistant | 63c9d59d5455850fd4b37c2475fe6f10effb5245 | [
"Apache-2.0"
] | 1 | 2021-02-08T20:56:06.000Z | 2021-02-08T20:56:06.000Z | homeassistant/components/climate/wink.py | moose51789/home-assistant | 63c9d59d5455850fd4b37c2475fe6f10effb5245 | [
"Apache-2.0"
] | 1 | 2019-09-15T04:45:12.000Z | 2019-09-15T04:45:12.000Z | """
Support for Wink thermostats, Air Conditioners, and Water Heaters.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.wink/
"""
import logging
import asyncio
from homeassistant.components.wink import WinkDevice, DOMAIN
from homeassistant.components.climate import (
STATE_AUTO, STATE_COOL, STATE_HEAT, ClimateDevice,
ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW,
ATTR_TEMPERATURE, STATE_FAN_ONLY,
ATTR_CURRENT_HUMIDITY, STATE_ECO, STATE_ELECTRIC,
STATE_PERFORMANCE, STATE_HIGH_DEMAND,
STATE_HEAT_PUMP, STATE_GAS)
from homeassistant.const import (
TEMP_CELSIUS, STATE_ON,
STATE_OFF, STATE_UNKNOWN)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['wink']
SPEED_LOW = 'low'
SPEED_MEDIUM = 'medium'
SPEED_HIGH = 'high'
HA_STATE_TO_WINK = {STATE_AUTO: 'auto',
STATE_ECO: 'eco',
STATE_FAN_ONLY: 'fan_only',
STATE_HEAT: 'heat_only',
STATE_COOL: 'cool_only',
STATE_PERFORMANCE: 'performance',
STATE_HIGH_DEMAND: 'high_demand',
STATE_HEAT_PUMP: 'heat_pump',
STATE_ELECTRIC: 'electric_only',
STATE_GAS: 'gas',
STATE_OFF: 'off'}
WINK_STATE_TO_HA = {value: key for key, value in HA_STATE_TO_WINK.items()}
ATTR_EXTERNAL_TEMPERATURE = "external_temperature"
ATTR_SMART_TEMPERATURE = "smart_temperature"
ATTR_ECO_TARGET = "eco_target"
ATTR_OCCUPIED = "occupied"
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Wink climate devices."""
import pywink
for climate in pywink.get_thermostats():
_id = climate.object_id() + climate.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkThermostat(climate, hass)])
for climate in pywink.get_air_conditioners():
_id = climate.object_id() + climate.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkAC(climate, hass)])
for water_heater in pywink.get_water_heaters():
_id = water_heater.object_id() + water_heater.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkWaterHeater(water_heater, hass)])
# pylint: disable=abstract-method
class WinkThermostat(WinkDevice, ClimateDevice):
"""Representation of a Wink thermostat."""
@asyncio.coroutine
def async_added_to_hass(self):
"""Callback when entity is added to hass."""
self.hass.data[DOMAIN]['entities']['climate'].append(self)
@property
def temperature_unit(self):
"""Return the unit of measurement."""
# The Wink API always returns temp in Celsius
return TEMP_CELSIUS
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
data = {}
target_temp_high = self.target_temperature_high
target_temp_low = self.target_temperature_low
if target_temp_high is not None:
data[ATTR_TARGET_TEMP_HIGH] = self._convert_for_display(
self.target_temperature_high)
if target_temp_low is not None:
data[ATTR_TARGET_TEMP_LOW] = self._convert_for_display(
self.target_temperature_low)
if self.external_temperature:
data[ATTR_EXTERNAL_TEMPERATURE] = self._convert_for_display(
self.external_temperature)
if self.smart_temperature:
data[ATTR_SMART_TEMPERATURE] = self.smart_temperature
if self.occupied:
data[ATTR_OCCUPIED] = self.occupied
if self.eco_target:
data[ATTR_ECO_TARGET] = self.eco_target
current_humidity = self.current_humidity
if current_humidity is not None:
data[ATTR_CURRENT_HUMIDITY] = current_humidity
return data
@property
def current_temperature(self):
"""Return the current temperature."""
return self.wink.current_temperature()
@property
def current_humidity(self):
"""Return the current humidity."""
if self.wink.current_humidity() is not None:
# The API states humidity will be a float 0-1
# the only example API response with humidity listed show an int
# This will address both possibilities
if self.wink.current_humidity() < 1:
return self.wink.current_humidity() * 100
return self.wink.current_humidity()
return None
@property
def external_temperature(self):
"""Return the current external temperature."""
return self.wink.current_external_temperature()
@property
def smart_temperature(self):
"""Return the current average temp of all remote sensor."""
return self.wink.current_smart_temperature()
@property
def eco_target(self):
"""Return status of eco target (Is the termostat in eco mode)."""
return self.wink.eco_target()
@property
def occupied(self):
"""Return status of if the thermostat has detected occupancy."""
return self.wink.occupied()
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
if not self.wink.is_on():
current_op = STATE_OFF
else:
current_op = WINK_STATE_TO_HA.get(self.wink.current_hvac_mode())
if current_op == 'aux':
return STATE_HEAT
if current_op is None:
current_op = STATE_UNKNOWN
return current_op
@property
def target_humidity(self):
"""Return the humidity we try to reach."""
target_hum = None
if self.wink.current_humidifier_mode() == 'on':
if self.wink.current_humidifier_set_point() is not None:
target_hum = self.wink.current_humidifier_set_point() * 100
elif self.wink.current_dehumidifier_mode() == 'on':
if self.wink.current_dehumidifier_set_point() is not None:
target_hum = self.wink.current_dehumidifier_set_point() * 100
else:
target_hum = None
return target_hum
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self.current_operation != STATE_AUTO and not self.is_away_mode_on:
if self.current_operation == STATE_COOL:
return self.wink.current_max_set_point()
elif self.current_operation == STATE_HEAT:
return self.wink.current_min_set_point()
return None
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
if self.current_operation == STATE_AUTO:
return self.wink.current_min_set_point()
return None
@property
def target_temperature_high(self):
"""Return the higher bound temperature we try to reach."""
if self.current_operation == STATE_AUTO:
return self.wink.current_max_set_point()
return None
@property
def is_away_mode_on(self):
"""Return if away mode is on."""
return self.wink.away()
@property
def is_aux_heat_on(self):
"""Return true if aux heater."""
if 'aux' not in self.wink.hvac_modes():
return None
if self.wink.current_hvac_mode() == 'aux':
return True
return False
def set_temperature(self, **kwargs):
"""Set new target temperature."""
target_temp = kwargs.get(ATTR_TEMPERATURE)
target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)
target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
if target_temp is not None:
if self.current_operation == STATE_COOL:
target_temp_high = target_temp
if self.current_operation == STATE_HEAT:
target_temp_low = target_temp
if target_temp_low is not None:
target_temp_low = target_temp_low
if target_temp_high is not None:
target_temp_high = target_temp_high
self.wink.set_temperature(target_temp_low, target_temp_high)
def set_operation_mode(self, operation_mode):
"""Set operation mode."""
op_mode_to_set = HA_STATE_TO_WINK.get(operation_mode)
# The only way to disable aux heat is with the toggle
if self.is_aux_heat_on and op_mode_to_set == STATE_HEAT:
return
self.wink.set_operation_mode(op_mode_to_set)
@property
def operation_list(self):
"""List of available operation modes."""
op_list = ['off']
modes = self.wink.hvac_modes()
for mode in modes:
if mode == 'aux':
continue
ha_mode = WINK_STATE_TO_HA.get(mode)
if ha_mode is not None:
op_list.append(ha_mode)
else:
error = "Invaid operation mode mapping. " + mode + \
" doesn't map. Please report this."
_LOGGER.error(error)
return op_list
def turn_away_mode_on(self):
"""Turn away on."""
self.wink.set_away_mode()
def turn_away_mode_off(self):
"""Turn away off."""
self.wink.set_away_mode(False)
@property
def current_fan_mode(self):
"""Return whether the fan is on."""
if self.wink.current_fan_mode() == 'on':
return STATE_ON
elif self.wink.current_fan_mode() == 'auto':
return STATE_AUTO
# No Fan available so disable slider
return None
@property
def fan_list(self):
"""List of available fan modes."""
if self.wink.has_fan():
return self.wink.fan_modes()
return None
def set_fan_mode(self, fan):
"""Turn fan on/off."""
self.wink.set_fan_mode(fan.lower())
def turn_aux_heat_on(self):
"""Turn auxiliary heater on."""
self.wink.set_operation_mode('aux')
def turn_aux_heat_off(self):
"""Turn auxiliary heater off."""
self.set_operation_mode(STATE_HEAT)
@property
def min_temp(self):
"""Return the minimum temperature."""
minimum = 7 # Default minimum
min_min = self.wink.min_min_set_point()
min_max = self.wink.min_max_set_point()
return_value = minimum
if self.current_operation == STATE_HEAT:
if min_min:
return_value = min_min
else:
return_value = minimum
elif self.current_operation == STATE_COOL:
if min_max:
return_value = min_max
else:
return_value = minimum
elif self.current_operation == STATE_AUTO:
if min_min and min_max:
return_value = min(min_min, min_max)
else:
return_value = minimum
else:
return_value = minimum
return return_value
@property
def max_temp(self):
"""Return the maximum temperature."""
maximum = 35 # Default maximum
max_min = self.wink.max_min_set_point()
max_max = self.wink.max_max_set_point()
return_value = maximum
if self.current_operation == STATE_HEAT:
if max_min:
return_value = max_min
else:
return_value = maximum
elif self.current_operation == STATE_COOL:
if max_max:
return_value = max_max
else:
return_value = maximum
elif self.current_operation == STATE_AUTO:
if max_min and max_max:
return_value = min(max_min, max_max)
else:
return_value = maximum
else:
return_value = maximum
return return_value
class WinkAC(WinkDevice, ClimateDevice):
"""Representation of a Wink air conditioner."""
@property
def temperature_unit(self):
"""Return the unit of measurement."""
# The Wink API always returns temp in Celsius
return TEMP_CELSIUS
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
data = {}
target_temp_high = self.target_temperature_high
target_temp_low = self.target_temperature_low
if target_temp_high is not None:
data[ATTR_TARGET_TEMP_HIGH] = self._convert_for_display(
self.target_temperature_high)
if target_temp_low is not None:
data[ATTR_TARGET_TEMP_LOW] = self._convert_for_display(
self.target_temperature_low)
data["total_consumption"] = self.wink.total_consumption()
data["schedule_enabled"] = self.wink.schedule_enabled()
return data
@property
def current_temperature(self):
"""Return the current temperature."""
return self.wink.current_temperature()
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
if not self.wink.is_on():
current_op = STATE_OFF
else:
current_op = WINK_STATE_TO_HA.get(self.wink.current_hvac_mode())
if current_op is None:
current_op = STATE_UNKNOWN
return current_op
@property
def operation_list(self):
"""List of available operation modes."""
op_list = ['off']
modes = self.wink.modes()
for mode in modes:
ha_mode = WINK_STATE_TO_HA.get(mode)
if ha_mode is not None:
op_list.append(ha_mode)
else:
error = "Invaid operation mode mapping. " + mode + \
" doesn't map. Please report this."
_LOGGER.error(error)
return op_list
def set_temperature(self, **kwargs):
"""Set new target temperature."""
target_temp = kwargs.get(ATTR_TEMPERATURE)
self.wink.set_temperature(target_temp)
def set_operation_mode(self, operation_mode):
"""Set operation mode."""
op_mode_to_set = HA_STATE_TO_WINK.get(operation_mode)
if op_mode_to_set == 'eco':
op_mode_to_set = 'auto_eco'
self.wink.set_operation_mode(op_mode_to_set)
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.wink.current_max_set_point()
@property
def current_fan_mode(self):
"""Return the current fan mode."""
speed = self.wink.current_fan_speed()
if speed <= 0.4 and speed > 0.3:
return SPEED_LOW
elif speed <= 0.8 and speed > 0.5:
return SPEED_MEDIUM
elif speed <= 1.0 and speed > 0.8:
return SPEED_HIGH
return STATE_UNKNOWN
@property
def fan_list(self):
"""Return a list of available fan modes."""
return [SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
def set_fan_mode(self, fan):
"""Set fan speed."""
if fan == SPEED_LOW:
speed = 0.4
elif fan == SPEED_MEDIUM:
speed = 0.8
elif fan == SPEED_HIGH:
speed = 1.0
self.wink.set_ac_fan_speed(speed)
class WinkWaterHeater(WinkDevice, ClimateDevice):
"""Representation of a Wink water heater."""
@property
def temperature_unit(self):
"""Return the unit of measurement."""
# The Wink API always returns temp in Celsius
return TEMP_CELSIUS
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
data = {}
data["vacation_mode"] = self.wink.vacation_mode_enabled()
data["rheem_type"] = self.wink.rheem_type()
return data
@property
def current_operation(self):
"""
Return current operation one of the following.
["eco", "performance", "heat_pump",
"high_demand", "electric_only", "gas]
"""
if not self.wink.is_on():
current_op = STATE_OFF
else:
current_op = WINK_STATE_TO_HA.get(self.wink.current_mode())
if current_op is None:
current_op = STATE_UNKNOWN
return current_op
@property
def operation_list(self):
"""List of available operation modes."""
op_list = ['off']
modes = self.wink.modes()
for mode in modes:
if mode == 'aux':
continue
ha_mode = WINK_STATE_TO_HA.get(mode)
if ha_mode is not None:
op_list.append(ha_mode)
else:
error = "Invaid operation mode mapping. " + mode + \
" doesn't map. Please report this."
_LOGGER.error(error)
return op_list
def set_temperature(self, **kwargs):
"""Set new target temperature."""
target_temp = kwargs.get(ATTR_TEMPERATURE)
self.wink.set_temperature(target_temp)
def set_operation_mode(self, operation_mode):
"""Set operation mode."""
op_mode_to_set = HA_STATE_TO_WINK.get(operation_mode)
self.wink.set_operation_mode(op_mode_to_set)
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.wink.current_set_point()
def turn_away_mode_on(self):
"""Turn away on."""
self.wink.set_vacation_mode(True)
def turn_away_mode_off(self):
"""Turn away off."""
self.wink.set_vacation_mode(False)
@property
def min_temp(self):
"""Return the minimum temperature."""
return self.wink.min_set_point()
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.wink.max_set_point()
| 33.729323 | 77 | 0.615805 | import logging
import asyncio
from homeassistant.components.wink import WinkDevice, DOMAIN
from homeassistant.components.climate import (
STATE_AUTO, STATE_COOL, STATE_HEAT, ClimateDevice,
ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW,
ATTR_TEMPERATURE, STATE_FAN_ONLY,
ATTR_CURRENT_HUMIDITY, STATE_ECO, STATE_ELECTRIC,
STATE_PERFORMANCE, STATE_HIGH_DEMAND,
STATE_HEAT_PUMP, STATE_GAS)
from homeassistant.const import (
TEMP_CELSIUS, STATE_ON,
STATE_OFF, STATE_UNKNOWN)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['wink']
SPEED_LOW = 'low'
SPEED_MEDIUM = 'medium'
SPEED_HIGH = 'high'
HA_STATE_TO_WINK = {STATE_AUTO: 'auto',
STATE_ECO: 'eco',
STATE_FAN_ONLY: 'fan_only',
STATE_HEAT: 'heat_only',
STATE_COOL: 'cool_only',
STATE_PERFORMANCE: 'performance',
STATE_HIGH_DEMAND: 'high_demand',
STATE_HEAT_PUMP: 'heat_pump',
STATE_ELECTRIC: 'electric_only',
STATE_GAS: 'gas',
STATE_OFF: 'off'}
WINK_STATE_TO_HA = {value: key for key, value in HA_STATE_TO_WINK.items()}
ATTR_EXTERNAL_TEMPERATURE = "external_temperature"
ATTR_SMART_TEMPERATURE = "smart_temperature"
ATTR_ECO_TARGET = "eco_target"
ATTR_OCCUPIED = "occupied"
def setup_platform(hass, config, add_devices, discovery_info=None):
import pywink
for climate in pywink.get_thermostats():
_id = climate.object_id() + climate.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkThermostat(climate, hass)])
for climate in pywink.get_air_conditioners():
_id = climate.object_id() + climate.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkAC(climate, hass)])
for water_heater in pywink.get_water_heaters():
_id = water_heater.object_id() + water_heater.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkWaterHeater(water_heater, hass)])
class WinkThermostat(WinkDevice, ClimateDevice):
@asyncio.coroutine
def async_added_to_hass(self):
self.hass.data[DOMAIN]['entities']['climate'].append(self)
@property
def temperature_unit(self):
return TEMP_CELSIUS
@property
def device_state_attributes(self):
data = {}
target_temp_high = self.target_temperature_high
target_temp_low = self.target_temperature_low
if target_temp_high is not None:
data[ATTR_TARGET_TEMP_HIGH] = self._convert_for_display(
self.target_temperature_high)
if target_temp_low is not None:
data[ATTR_TARGET_TEMP_LOW] = self._convert_for_display(
self.target_temperature_low)
if self.external_temperature:
data[ATTR_EXTERNAL_TEMPERATURE] = self._convert_for_display(
self.external_temperature)
if self.smart_temperature:
data[ATTR_SMART_TEMPERATURE] = self.smart_temperature
if self.occupied:
data[ATTR_OCCUPIED] = self.occupied
if self.eco_target:
data[ATTR_ECO_TARGET] = self.eco_target
current_humidity = self.current_humidity
if current_humidity is not None:
data[ATTR_CURRENT_HUMIDITY] = current_humidity
return data
@property
def current_temperature(self):
return self.wink.current_temperature()
@property
def current_humidity(self):
if self.wink.current_humidity() is not None:
if self.wink.current_humidity() < 1:
return self.wink.current_humidity() * 100
return self.wink.current_humidity()
return None
@property
def external_temperature(self):
return self.wink.current_external_temperature()
@property
def smart_temperature(self):
return self.wink.current_smart_temperature()
@property
def eco_target(self):
return self.wink.eco_target()
@property
def occupied(self):
return self.wink.occupied()
@property
def current_operation(self):
if not self.wink.is_on():
current_op = STATE_OFF
else:
current_op = WINK_STATE_TO_HA.get(self.wink.current_hvac_mode())
if current_op == 'aux':
return STATE_HEAT
if current_op is None:
current_op = STATE_UNKNOWN
return current_op
@property
def target_humidity(self):
target_hum = None
if self.wink.current_humidifier_mode() == 'on':
if self.wink.current_humidifier_set_point() is not None:
target_hum = self.wink.current_humidifier_set_point() * 100
elif self.wink.current_dehumidifier_mode() == 'on':
if self.wink.current_dehumidifier_set_point() is not None:
target_hum = self.wink.current_dehumidifier_set_point() * 100
else:
target_hum = None
return target_hum
@property
def target_temperature(self):
if self.current_operation != STATE_AUTO and not self.is_away_mode_on:
if self.current_operation == STATE_COOL:
return self.wink.current_max_set_point()
elif self.current_operation == STATE_HEAT:
return self.wink.current_min_set_point()
return None
@property
def target_temperature_low(self):
if self.current_operation == STATE_AUTO:
return self.wink.current_min_set_point()
return None
@property
def target_temperature_high(self):
if self.current_operation == STATE_AUTO:
return self.wink.current_max_set_point()
return None
@property
def is_away_mode_on(self):
return self.wink.away()
@property
def is_aux_heat_on(self):
if 'aux' not in self.wink.hvac_modes():
return None
if self.wink.current_hvac_mode() == 'aux':
return True
return False
def set_temperature(self, **kwargs):
target_temp = kwargs.get(ATTR_TEMPERATURE)
target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)
target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
if target_temp is not None:
if self.current_operation == STATE_COOL:
target_temp_high = target_temp
if self.current_operation == STATE_HEAT:
target_temp_low = target_temp
if target_temp_low is not None:
target_temp_low = target_temp_low
if target_temp_high is not None:
target_temp_high = target_temp_high
self.wink.set_temperature(target_temp_low, target_temp_high)
def set_operation_mode(self, operation_mode):
op_mode_to_set = HA_STATE_TO_WINK.get(operation_mode)
if self.is_aux_heat_on and op_mode_to_set == STATE_HEAT:
return
self.wink.set_operation_mode(op_mode_to_set)
@property
def operation_list(self):
op_list = ['off']
modes = self.wink.hvac_modes()
for mode in modes:
if mode == 'aux':
continue
ha_mode = WINK_STATE_TO_HA.get(mode)
if ha_mode is not None:
op_list.append(ha_mode)
else:
error = "Invaid operation mode mapping. " + mode + \
" doesn't map. Please report this."
_LOGGER.error(error)
return op_list
def turn_away_mode_on(self):
self.wink.set_away_mode()
def turn_away_mode_off(self):
self.wink.set_away_mode(False)
@property
def current_fan_mode(self):
if self.wink.current_fan_mode() == 'on':
return STATE_ON
elif self.wink.current_fan_mode() == 'auto':
return STATE_AUTO
# No Fan available so disable slider
return None
@property
def fan_list(self):
if self.wink.has_fan():
return self.wink.fan_modes()
return None
def set_fan_mode(self, fan):
self.wink.set_fan_mode(fan.lower())
def turn_aux_heat_on(self):
self.wink.set_operation_mode('aux')
def turn_aux_heat_off(self):
self.set_operation_mode(STATE_HEAT)
@property
def min_temp(self):
minimum = 7 # Default minimum
min_min = self.wink.min_min_set_point()
min_max = self.wink.min_max_set_point()
return_value = minimum
if self.current_operation == STATE_HEAT:
if min_min:
return_value = min_min
else:
return_value = minimum
elif self.current_operation == STATE_COOL:
if min_max:
return_value = min_max
else:
return_value = minimum
elif self.current_operation == STATE_AUTO:
if min_min and min_max:
return_value = min(min_min, min_max)
else:
return_value = minimum
else:
return_value = minimum
return return_value
@property
def max_temp(self):
maximum = 35 # Default maximum
max_min = self.wink.max_min_set_point()
max_max = self.wink.max_max_set_point()
return_value = maximum
if self.current_operation == STATE_HEAT:
if max_min:
return_value = max_min
else:
return_value = maximum
elif self.current_operation == STATE_COOL:
if max_max:
return_value = max_max
else:
return_value = maximum
elif self.current_operation == STATE_AUTO:
if max_min and max_max:
return_value = min(max_min, max_max)
else:
return_value = maximum
else:
return_value = maximum
return return_value
class WinkAC(WinkDevice, ClimateDevice):
@property
def temperature_unit(self):
# The Wink API always returns temp in Celsius
return TEMP_CELSIUS
@property
def device_state_attributes(self):
data = {}
target_temp_high = self.target_temperature_high
target_temp_low = self.target_temperature_low
if target_temp_high is not None:
data[ATTR_TARGET_TEMP_HIGH] = self._convert_for_display(
self.target_temperature_high)
if target_temp_low is not None:
data[ATTR_TARGET_TEMP_LOW] = self._convert_for_display(
self.target_temperature_low)
data["total_consumption"] = self.wink.total_consumption()
data["schedule_enabled"] = self.wink.schedule_enabled()
return data
@property
def current_temperature(self):
return self.wink.current_temperature()
@property
def current_operation(self):
if not self.wink.is_on():
current_op = STATE_OFF
else:
current_op = WINK_STATE_TO_HA.get(self.wink.current_hvac_mode())
if current_op is None:
current_op = STATE_UNKNOWN
return current_op
@property
def operation_list(self):
op_list = ['off']
modes = self.wink.modes()
for mode in modes:
ha_mode = WINK_STATE_TO_HA.get(mode)
if ha_mode is not None:
op_list.append(ha_mode)
else:
error = "Invaid operation mode mapping. " + mode + \
" doesn't map. Please report this."
_LOGGER.error(error)
return op_list
def set_temperature(self, **kwargs):
target_temp = kwargs.get(ATTR_TEMPERATURE)
self.wink.set_temperature(target_temp)
def set_operation_mode(self, operation_mode):
op_mode_to_set = HA_STATE_TO_WINK.get(operation_mode)
if op_mode_to_set == 'eco':
op_mode_to_set = 'auto_eco'
self.wink.set_operation_mode(op_mode_to_set)
@property
def target_temperature(self):
return self.wink.current_max_set_point()
@property
def current_fan_mode(self):
speed = self.wink.current_fan_speed()
if speed <= 0.4 and speed > 0.3:
return SPEED_LOW
elif speed <= 0.8 and speed > 0.5:
return SPEED_MEDIUM
elif speed <= 1.0 and speed > 0.8:
return SPEED_HIGH
return STATE_UNKNOWN
@property
def fan_list(self):
return [SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
def set_fan_mode(self, fan):
if fan == SPEED_LOW:
speed = 0.4
elif fan == SPEED_MEDIUM:
speed = 0.8
elif fan == SPEED_HIGH:
speed = 1.0
self.wink.set_ac_fan_speed(speed)
class WinkWaterHeater(WinkDevice, ClimateDevice):
@property
def temperature_unit(self):
return TEMP_CELSIUS
@property
def device_state_attributes(self):
data = {}
data["vacation_mode"] = self.wink.vacation_mode_enabled()
data["rheem_type"] = self.wink.rheem_type()
return data
@property
def current_operation(self):
if not self.wink.is_on():
current_op = STATE_OFF
else:
current_op = WINK_STATE_TO_HA.get(self.wink.current_mode())
if current_op is None:
current_op = STATE_UNKNOWN
return current_op
@property
def operation_list(self):
op_list = ['off']
modes = self.wink.modes()
for mode in modes:
if mode == 'aux':
continue
ha_mode = WINK_STATE_TO_HA.get(mode)
if ha_mode is not None:
op_list.append(ha_mode)
else:
error = "Invaid operation mode mapping. " + mode + \
" doesn't map. Please report this."
_LOGGER.error(error)
return op_list
def set_temperature(self, **kwargs):
target_temp = kwargs.get(ATTR_TEMPERATURE)
self.wink.set_temperature(target_temp)
def set_operation_mode(self, operation_mode):
op_mode_to_set = HA_STATE_TO_WINK.get(operation_mode)
self.wink.set_operation_mode(op_mode_to_set)
@property
def target_temperature(self):
return self.wink.current_set_point()
def turn_away_mode_on(self):
self.wink.set_vacation_mode(True)
def turn_away_mode_off(self):
self.wink.set_vacation_mode(False)
@property
def min_temp(self):
return self.wink.min_set_point()
@property
def max_temp(self):
return self.wink.max_set_point()
| true | true |
f72ceff1b9b1d547913aa773c1b821be3ae401f9 | 55 | py | Python | contest/abc120/A.py | mola1129/atcoder | 1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db | [
"MIT"
] | null | null | null | contest/abc120/A.py | mola1129/atcoder | 1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db | [
"MIT"
] | null | null | null | contest/abc120/A.py | mola1129/atcoder | 1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db | [
"MIT"
] | null | null | null | A,B,C = map(int, input().split())
print(min(B//A,C))
| 18.333333 | 34 | 0.545455 | A,B,C = map(int, input().split())
print(min(B//A,C))
| true | true |
f72cf037943e51b4783520ebbf6f67e18bf38ba4 | 3,648 | py | Python | src/Calibration.py | Mohamed-Abdulaty/UDACITY-CarND-P2-Advanced-Lane-Lines | e5d5fdff45c523a4f17635897b9de4b2e50d273d | [
"MIT"
] | null | null | null | src/Calibration.py | Mohamed-Abdulaty/UDACITY-CarND-P2-Advanced-Lane-Lines | e5d5fdff45c523a4f17635897b9de4b2e50d273d | [
"MIT"
] | null | null | null | src/Calibration.py | Mohamed-Abdulaty/UDACITY-CarND-P2-Advanced-Lane-Lines | e5d5fdff45c523a4f17635897b9de4b2e50d273d | [
"MIT"
] | null | null | null | import os
import cv2
import numpy as np
class Calibration:
def __init__(
self,
source_images_directory,
destination_image_sub_directory,
chessboard_shape,
logger
):
self.source_images_directory = source_images_directory
self.destination_image_sub_directory= destination_image_sub_directory
self.cornered_output_images = str(self.destination_image_sub_directory+'/Cornered')
self.undistorted_output_images = str(self.destination_image_sub_directory+'/Undistorted')
self.chessboard_x, self.chessboard_y= chessboard_shape
self.logger = logger
self.name_list_of_boards = os.listdir(self.source_images_directory)
self.number_of_boards = len(self.name_list_of_boards)
self.image_size = None
self.object_points = []
self.image_points = []
self.camera_matrix, self.distortion_coefficient = \
self.__calculate_calibration_parameters()
def get_calibration_parameters(self):
return self.camera_matrix, self.distortion_coefficient
def __calculate_calibration_parameters(self):
object_points = np.zeros((self.chessboard_x*self.chessboard_y, 3), np.float32)
object_points[:, :2] = np.mgrid[0:self.chessboard_x, 0:self.chessboard_y].T.reshape(-1, 2)
for img_name in self.name_list_of_boards:
# Read the image
image_path = '{}/{}'.format(str(self.source_images_directory), str(img_name))
image_obj = cv2.imread(image_path)
# Gray it
gray_image = cv2.cvtColor(image_obj, cv2.COLOR_BGR2GRAY)
self.image_size = gray_image.shape[::-1]
# Find its corners
ret, corners = cv2.findChessboardCorners(gray_image, (self.chessboard_x, self.chessboard_y), None)
if ret:
self.object_points.append(object_points)
self.image_points.append(corners)
# save image with corners
image = cv2.drawChessboardCorners(\
image_obj, \
(self.chessboard_y, self.chessboard_x), \
corners, \
ret)
# Saved image with corners
self.logger.save_image(str(self.cornered_output_images), img_name, image)
else:
self.logger.log_error('Can not find all needed corners in {}'.format(str(img_name)))
# Calibrate the camera
calibration_parameters = \
cv2.calibrateCamera(self.object_points, \
self.image_points, \
self.image_size, \
None, None)
# save corrected images
self.__save_undistorted_images(calibration_parameters[1], calibration_parameters[2])
# return onlt camera_matrix, and dis_coef
return calibration_parameters[1], calibration_parameters[2]
def __save_undistorted_images(self, camera_matrix, distortion_coef):
cornered_images_list = os.listdir(str('./results/'+self.cornered_output_images))
for cornered_img in cornered_images_list:
image_path = '{}/{}'.format(str('./results/'+self.cornered_output_images), str(cornered_img))
image_obj = cv2.imread(image_path)
self.logger.save_image( \
str(self.undistorted_output_images), \
cornered_img,
cv2.undistort(image_obj, camera_matrix, distortion_coef, None, camera_matrix)) | 41.931034 | 110 | 0.621436 | import os
import cv2
import numpy as np
class Calibration:
def __init__(
self,
source_images_directory,
destination_image_sub_directory,
chessboard_shape,
logger
):
self.source_images_directory = source_images_directory
self.destination_image_sub_directory= destination_image_sub_directory
self.cornered_output_images = str(self.destination_image_sub_directory+'/Cornered')
self.undistorted_output_images = str(self.destination_image_sub_directory+'/Undistorted')
self.chessboard_x, self.chessboard_y= chessboard_shape
self.logger = logger
self.name_list_of_boards = os.listdir(self.source_images_directory)
self.number_of_boards = len(self.name_list_of_boards)
self.image_size = None
self.object_points = []
self.image_points = []
self.camera_matrix, self.distortion_coefficient = \
self.__calculate_calibration_parameters()
def get_calibration_parameters(self):
return self.camera_matrix, self.distortion_coefficient
def __calculate_calibration_parameters(self):
object_points = np.zeros((self.chessboard_x*self.chessboard_y, 3), np.float32)
object_points[:, :2] = np.mgrid[0:self.chessboard_x, 0:self.chessboard_y].T.reshape(-1, 2)
for img_name in self.name_list_of_boards:
image_path = '{}/{}'.format(str(self.source_images_directory), str(img_name))
image_obj = cv2.imread(image_path)
gray_image = cv2.cvtColor(image_obj, cv2.COLOR_BGR2GRAY)
self.image_size = gray_image.shape[::-1]
ret, corners = cv2.findChessboardCorners(gray_image, (self.chessboard_x, self.chessboard_y), None)
if ret:
self.object_points.append(object_points)
self.image_points.append(corners)
image = cv2.drawChessboardCorners(\
image_obj, \
(self.chessboard_y, self.chessboard_x), \
corners, \
ret)
self.logger.save_image(str(self.cornered_output_images), img_name, image)
else:
self.logger.log_error('Can not find all needed corners in {}'.format(str(img_name)))
calibration_parameters = \
cv2.calibrateCamera(self.object_points, \
self.image_points, \
self.image_size, \
None, None)
self.__save_undistorted_images(calibration_parameters[1], calibration_parameters[2])
return calibration_parameters[1], calibration_parameters[2]
def __save_undistorted_images(self, camera_matrix, distortion_coef):
cornered_images_list = os.listdir(str('./results/'+self.cornered_output_images))
for cornered_img in cornered_images_list:
image_path = '{}/{}'.format(str('./results/'+self.cornered_output_images), str(cornered_img))
image_obj = cv2.imread(image_path)
self.logger.save_image( \
str(self.undistorted_output_images), \
cornered_img,
cv2.undistort(image_obj, camera_matrix, distortion_coef, None, camera_matrix)) | true | true |
f72cf0613aadf94eb6e2c98f6e1c046325378d82 | 1,979 | py | Python | dove/utils/bed.py | barslmn/dove | df6344286633422219c0e93e15d4327f9d082041 | [
"MIT"
] | null | null | null | dove/utils/bed.py | barslmn/dove | df6344286633422219c0e93e15d4327f9d082041 | [
"MIT"
] | null | null | null | dove/utils/bed.py | barslmn/dove | df6344286633422219c0e93e15d4327f9d082041 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = 'bars'
from io import StringIO
import pandas as pd
from collections import defaultdict
class Bed:
"""description"""
def __init__(self, bed_file, mode='file'):
self.bed_file = bed_file
self.mode = mode
def get_header(self):
lines_to_skip = 0
header = defaultdict(list)
if self.mode == 'str':
for line in self.bed_file.split('\n'):
if line.startswith('track'):
header['track'].append(line)
lines_to_skip += 1
elif line.startswith('browser'):
header['browser'].append(line)
lines_to_skip += 1
else:
break
else:
with open(self.bed_file) as f:
lines = f.read().splitlines()
for line in lines:
if line.startswith('track'):
header['track'].append(line)
lines_to_skip += 1
elif line.startswith('browser'):
header['browser'].append(line)
lines_to_skip += 1
else:
break
return lines_to_skip, header
def from_file(self):
lines_to_skip, header = self.get_header()
df_bed = pd.read_csv(
self.bed_file,
sep='\t',
usecols=[0, 1, 2],
names=['CHR', 'START', 'END'],
dtype={'START': int, 'END': int},
skiprows=lines_to_skip
)
return df_bed
def from_string(self):
lines_to_skip, header = self.get_header()
df_bed = pd.read_csv(
StringIO(self.bed_file),
sep='\t',
usecols=[0, 1, 2],
names=['CHR', 'START', 'END'],
dtype={'START': int, 'END': int},
skiprows=lines_to_skip
)
return df_bed
| 29.984848 | 54 | 0.471956 |
__author__ = 'bars'
from io import StringIO
import pandas as pd
from collections import defaultdict
class Bed:
def __init__(self, bed_file, mode='file'):
self.bed_file = bed_file
self.mode = mode
def get_header(self):
lines_to_skip = 0
header = defaultdict(list)
if self.mode == 'str':
for line in self.bed_file.split('\n'):
if line.startswith('track'):
header['track'].append(line)
lines_to_skip += 1
elif line.startswith('browser'):
header['browser'].append(line)
lines_to_skip += 1
else:
break
else:
with open(self.bed_file) as f:
lines = f.read().splitlines()
for line in lines:
if line.startswith('track'):
header['track'].append(line)
lines_to_skip += 1
elif line.startswith('browser'):
header['browser'].append(line)
lines_to_skip += 1
else:
break
return lines_to_skip, header
def from_file(self):
lines_to_skip, header = self.get_header()
df_bed = pd.read_csv(
self.bed_file,
sep='\t',
usecols=[0, 1, 2],
names=['CHR', 'START', 'END'],
dtype={'START': int, 'END': int},
skiprows=lines_to_skip
)
return df_bed
def from_string(self):
lines_to_skip, header = self.get_header()
df_bed = pd.read_csv(
StringIO(self.bed_file),
sep='\t',
usecols=[0, 1, 2],
names=['CHR', 'START', 'END'],
dtype={'START': int, 'END': int},
skiprows=lines_to_skip
)
return df_bed
| true | true |
f72cf0c8eddcbcd99e6cd753b8ca73ce4cc13dcd | 10,400 | py | Python | nnutils/dibr_kaolin.py | junzhezhang/cmr | f0b2ded813535493f124852ce64b26efa761a35c | [
"MIT"
] | null | null | null | nnutils/dibr_kaolin.py | junzhezhang/cmr | f0b2ded813535493f124852ce64b26efa761a35c | [
"MIT"
] | null | null | null | nnutils/dibr_kaolin.py | junzhezhang/cmr | f0b2ded813535493f124852ce64b26efa761a35c | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.misc
import tqdm
import cv2
import torch
from nnutils import geom_utils
# from kaolin.graphics.dib_renderer.rasterizer import linear_rasterizer
# from kaolin.graphics.dib_renderer.utils import datanormalize
# from kaolin.graphics.dib_renderer.renderer.phongrender import PhongRender
from kaolin.graphics.dib_renderer.renderer.texrender import TexRender
from kaolin.graphics.dib_renderer.utils.perspective import lookatnp, perspectiveprojectionnp
from kaolin.graphics.dib_renderer.utils.mesh import loadobj, face2pfmtx, loadobjtex, savemesh
def quaternion_to_matrix(quaternions):
"""
Convert rotations given as quaternions to rotation matrices.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
r, i, j, k = torch.unbind(quaternions, -1)
two_s = 2.0 / (quaternions * quaternions).sum(-1)
o = torch.stack(
(
1 - two_s * (j * j + k * k),
two_s * (i * j - k * r),
two_s * (i * k + j * r),
two_s * (i * j + k * r),
1 - two_s * (i * i + k * k),
two_s * (j * k - i * r),
two_s * (i * k - j * r),
two_s * (j * k + i * r),
1 - two_s * (i * i + j * j),
),
-1,
)
return o.reshape(quaternions.shape[:-1] + (3, 3))
class NeuralRenderer(torch.nn.Module):
"""
replace NeuralRenderer from nmr.py with the kaolin's
"""
# 512 --> 256 TODO
def __init__(self, img_size=256,uv_sampler=None):
self.img_size = img_size
super(NeuralRenderer, self).__init__()
self.renderer = TexRender(height=img_size,width=img_size)
# self.renderer = NeuralMeshRenderer(image_size=img_size, camera_mode='look_at',perspective=False,viewing_angle=30,light_intensity_ambient=0.8)
self.offset_z = 5.
self.proj_fn = geom_utils.orthographic_proj_withz
if uv_sampler is not None:
self.uv_sampler = uv_sampler.clone()
else:
print('no uv sampler')
print('DIB-R...')
def ambient_light_only(self):
# Make light only ambient.
# self.renderer.light_intensity_ambient = 1
# self.renderer.light_intensity_directional = 0
print("TODO: ambient_light_only")
pass
def set_bgcolor(self, color):
# self.renderer.background_color = color
print("TODO: set_bgcolor")
pass
def project_points(self, verts, cams):
proj = self.proj_fn(verts, cams)
return proj[:, :, :2]
def forward(self, vertices, faces, cams, textures=None):
### TODO save mesh
if textures is not None:
v_np = vertices[0].detach().cpu().numpy()
f_np = faces[0].detach().cpu().numpy()
file_name = 'vis/bird.obj'
try:
savemesh(v_np, f_np, file_name)
except:
import pdb; pdb.set_trace()
# ours = False
ours = True
if ours:
translation = cams[:,:3]
quant = cams[:,-4:]
tfcamviewmtx_bx3x3 = quaternion_to_matrix(quant)
tfcamshift_bx3 = - translation
# camfovy = 45 / 180.0 * np.pi
camfovy = 90 / 180.0 * np.pi
camprojmtx = perspectiveprojectionnp(camfovy, 1.0 * 1.0 / 1.0)
tfcamproj_3x1 = torch.from_numpy(camprojmtx).cuda()
tfcameras = [tfcamviewmtx_bx3x3,
tfcamshift_bx3,
tfcamproj_3x1]
else:
tfcameras = self.get_sample_cams(bs=vertices.shape[0])
# import pdb; pdb.set_trace()
print('1:',tfcameras[0].shape)
print('2:',tfcameras[1].shape)
print('3:',tfcameras[2].shape)
if textures is None:
tex_flag = False
# shape = [vertices.shape[0], 1280, 6,6,6,3]
# textures = torch.ones(vertices.shape[0], 1280, 6,6,6,3).cuda()*256
textures = torch.ones(vertices.shape[0],3,self.img_size,self.img_size).cuda()
else:
tex_flag = True
# # TODO try with convmesh output
imfile = '/mnt/lustre/zhangjunzhe/tm/convmesh/output/pretrained_cub_512x512_class/mesh_0.png'
# textures_np = cv2.imread(imfile)[:, :, ::-1].astype(np.float32) / 255.0
textures_np = cv2.imread(imfile)[:, :, ::-1].astype(np.float32)
dim = (self.img_size, self.img_size)
resized = cv2.resize(textures_np, dim, interpolation = cv2.INTER_AREA)
textures = torch.from_numpy(resized).cuda().unsqueeze(0)
textures = textures.permute([0, 3, 1, 2])
# print('tex shape:', textures.shape)
# # import pdb; pdb.set_trace()
# textures = torch.ones(vertices.shape[0],3,self.img_size,self.img_size).cuda()
# print(texture)
# renderer.set_smooth(pfmtx) # TODO for phong renderer
tfp_bxpx3 = vertices
tff_fx3 = faces[0] # TODO to verify if fixed topology within a batch
# tff_fx3 = tff_fx3.type(int64)
tff_fx3 = tff_fx3.type(torch.long)
points = [tfp_bxpx3, tff_fx3]
uvs = self.uv_sampler
# TODO texture to clone?
# TODOL ft_fx3
# ft_fx3??? TODO
#only keep rgb, no alpha and depth
print('uv shape:',uvs.shape)
imgs = self.renderer(points=points,
cameras=tfcameras,
uv_bxpx2 = uvs,
texture_bx3xthxtw=textures,
ft_fx3=None)[0]
if tex_flag:
for i, img in enumerate(imgs):
img = img.detach().cpu().numpy()
cv2.imwrite('./vis/lam'+str(i)+'.jpg',img*255)
print('saved img')
print('!!!imgs:',imgs.shape)
imgs = imgs.permute([0,3,1,2])
print('new shape:',imgs.shape)
# print(' cam:',cams)
return imgs
def get_sample_cams(self,bs):
##########################################################
# campos = np.array([0, 0, 1.5], dtype=np.float32) # where camera it is
# campos = np.array([0, 0, 4], dtype=np.float32)
# campos = np.array([0, 4, 0], dtype=np.float32)
campos = np.array([4, 0, 0], dtype=np.float32)
camcenter = np.array([0, 0, 0], dtype=np.float32) # where camra is looking at
# camup = np.array([-1, 1, 0], dtype=np.float32) # y axis of camera view
# camup = np.array([-1, 0, 1], dtype=np.float32)
# camup = np.array([0, -1, 1], dtype=np.float32)
# camup = np.array([0, 1, -1], dtype=np.float32)
# camup = np.array([1, -1, 0], dtype=np.float32)
# camup = np.array([1, 0, -1], dtype=np.float32)
# camup = np.array([1, 1, 0], dtype=np.float32)
# camup = np.array([-1, 0, -1], dtype=np.float32)
camup = np.array([1, 0, 1], dtype=np.float32)
camviewmtx, camviewshift = lookatnp(campos.reshape(3, 1), camcenter.reshape(3, 1), camup.reshape(3, 1))
camviewshift = -np.dot(camviewmtx.transpose(), camviewshift)
camfovy = 45 / 180.0 * np.pi
camprojmtx = perspectiveprojectionnp(camfovy, 1.0 * 1.0 / 1.0)
#####################################################
# tfp_px3 = torch.from_numpy(p)
# tfp_px3.requires_grad = True
# tff_fx3 = torch.from_numpy(f)
# tfuv_tx2 = torch.from_numpy(uv)
# tfuv_tx2.requires_grad = True
# tfft_fx3 = torch.from_numpy(ft)
# tftex_thxtwx3 = torch.from_numpy(np.ascontiguousarray(texturenp))
# tftex_thxtwx3.requires_grad = True
tfcamviewmtx = torch.from_numpy(camviewmtx)
tfcamshift = torch.from_numpy(camviewshift)
tfcamproj = torch.from_numpy(camprojmtx)
##########################################################
# tfp_1xpx3 = torch.unsqueeze(tfp_px3, dim=0)
# tfuv_1xtx2 = torch.unsqueeze(tfuv_tx2, dim=0)
# tftex_1xthxtwx3 = torch.unsqueeze(tftex_thxtwx3, dim=0)
tfcamviewmtx_1x3x3 = torch.unsqueeze(tfcamviewmtx, dim=0)
tfcamshift_1x3 = tfcamshift.view(-1, 3)
tfcamproj_3x1 = tfcamproj
# bs = 4
# tfp_bxpx3 = tfp_1xpx3.repeat([bs, 1, 1])
# tfuv_bxtx2 = tfuv_1xtx2.repeat([bs, 1, 1])
# tftex_bxthxtwx3 = tftex_1xthxtwx3.repeat([bs, 1, 1, 1])
tfcamviewmtx_bx3x3 = tfcamviewmtx_1x3x3.repeat([bs, 1, 1])
tfcamshift_bx3 = tfcamshift_1x3.repeat([bs, 1])
tfcameras = [tfcamviewmtx_bx3x3.cuda(),
tfcamshift_bx3.cuda(),
tfcamproj_3x1.cuda()]
return tfcameras
# def compute_uvsampler(self,verts_t, faces_t, tex_size=2):
# """
# NOTE: copied from utils/mesh.py
# tex_size texture resolution per face default = 6
# TODO : merge with backbone
# For this mesh, pre-computes the UV coordinates for
# F x T x T points.
# Returns F x T x T x 2
# """
# verts = verts_t[0].clone().detach().cpu().numpy()
# faces = faces_t[0].clone().detach().cpu().numpy()
# # import pdb; pdb.set_trace()
# alpha = np.arange(tex_size, dtype=np.float) / (tex_size-1)
# beta = np.arange(tex_size, dtype=np.float) / (tex_size-1)
# import itertools
# # Barycentric coordinate values
# coords = np.stack([p for p in itertools.product(*[alpha, beta])])
# vs = verts[faces]
# # Compute alpha, beta (this is the same order as NMR)
# v2 = vs[:, 2]
# v0v2 = vs[:, 0] - vs[:, 2]
# v1v2 = vs[:, 1] - vs[:, 2]
# # F x 3 x T*2
# samples = np.dstack([v0v2, v1v2]).dot(coords.T) + v2.reshape(-1, 3, 1)
# # F x T*2 x 3 points on the sphere
# samples = np.transpose(samples, (0, 2, 1))
# # Now convert these to uv.
# uv = get_spherical_coords(samples.reshape(-1, 3))
# # uv = uv.reshape(-1, len(coords), 2)
# uv = uv.reshape(-1, tex_size, tex_size, 2)
# return uv | 38.80597 | 151 | 0.559038 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.misc
import tqdm
import cv2
import torch
from nnutils import geom_utils
from kaolin.graphics.dib_renderer.renderer.texrender import TexRender
from kaolin.graphics.dib_renderer.utils.perspective import lookatnp, perspectiveprojectionnp
from kaolin.graphics.dib_renderer.utils.mesh import loadobj, face2pfmtx, loadobjtex, savemesh
def quaternion_to_matrix(quaternions):
r, i, j, k = torch.unbind(quaternions, -1)
two_s = 2.0 / (quaternions * quaternions).sum(-1)
o = torch.stack(
(
1 - two_s * (j * j + k * k),
two_s * (i * j - k * r),
two_s * (i * k + j * r),
two_s * (i * j + k * r),
1 - two_s * (i * i + k * k),
two_s * (j * k - i * r),
two_s * (i * k - j * r),
two_s * (j * k + i * r),
1 - two_s * (i * i + j * j),
),
-1,
)
return o.reshape(quaternions.shape[:-1] + (3, 3))
class NeuralRenderer(torch.nn.Module):
def __init__(self, img_size=256,uv_sampler=None):
self.img_size = img_size
super(NeuralRenderer, self).__init__()
self.renderer = TexRender(height=img_size,width=img_size)
self.offset_z = 5.
self.proj_fn = geom_utils.orthographic_proj_withz
if uv_sampler is not None:
self.uv_sampler = uv_sampler.clone()
else:
print('no uv sampler')
print('DIB-R...')
def ambient_light_only(self):
print("TODO: ambient_light_only")
pass
def set_bgcolor(self, color):
print("TODO: set_bgcolor")
pass
def project_points(self, verts, cams):
proj = self.proj_fn(verts, cams)
return proj[:, :, :2]
def forward(self, vertices, faces, cams, textures=None):
v_np = vertices[0].detach().cpu().numpy()
f_np = faces[0].detach().cpu().numpy()
file_name = 'vis/bird.obj'
try:
savemesh(v_np, f_np, file_name)
except:
import pdb; pdb.set_trace()
ours = True
if ours:
translation = cams[:,:3]
quant = cams[:,-4:]
tfcamviewmtx_bx3x3 = quaternion_to_matrix(quant)
tfcamshift_bx3 = - translation
camfovy = 90 / 180.0 * np.pi
camprojmtx = perspectiveprojectionnp(camfovy, 1.0 * 1.0 / 1.0)
tfcamproj_3x1 = torch.from_numpy(camprojmtx).cuda()
tfcameras = [tfcamviewmtx_bx3x3,
tfcamshift_bx3,
tfcamproj_3x1]
else:
tfcameras = self.get_sample_cams(bs=vertices.shape[0])
print('1:',tfcameras[0].shape)
print('2:',tfcameras[1].shape)
print('3:',tfcameras[2].shape)
if textures is None:
tex_flag = False
textures = torch.ones(vertices.shape[0],3,self.img_size,self.img_size).cuda()
else:
tex_flag = True
tre/zhangjunzhe/tm/convmesh/output/pretrained_cub_512x512_class/mesh_0.png'
textures_np = cv2.imread(imfile)[:, :, ::-1].astype(np.float32)
dim = (self.img_size, self.img_size)
resized = cv2.resize(textures_np, dim, interpolation = cv2.INTER_AREA)
textures = torch.from_numpy(resized).cuda().unsqueeze(0)
textures = textures.permute([0, 3, 1, 2])
ices
tff_fx3 = faces[0]
tff_fx3 = tff_fx3.type(torch.long)
points = [tfp_bxpx3, tff_fx3]
uvs = self.uv_sampler
print('uv shape:',uvs.shape)
imgs = self.renderer(points=points,
cameras=tfcameras,
uv_bxpx2 = uvs,
texture_bx3xthxtw=textures,
ft_fx3=None)[0]
if tex_flag:
for i, img in enumerate(imgs):
img = img.detach().cpu().numpy()
cv2.imwrite('./vis/lam'+str(i)+'.jpg',img*255)
print('saved img')
print('!!!imgs:',imgs.shape)
imgs = imgs.permute([0,3,1,2])
print('new shape:',imgs.shape)
return imgs
def get_sample_cams(self,bs):
| true | true |
f72cf0ce56facdb68d763f793fbd3901d57d4555 | 4,872 | py | Python | sensors/jira_sensor.py | viveksyngh/stackstorm-jira | d08bc9b78bb5a5cce1c6e84c1f947f1ba3088d26 | [
"Apache-2.0"
] | null | null | null | sensors/jira_sensor.py | viveksyngh/stackstorm-jira | d08bc9b78bb5a5cce1c6e84c1f947f1ba3088d26 | [
"Apache-2.0"
] | null | null | null | sensors/jira_sensor.py | viveksyngh/stackstorm-jira | d08bc9b78bb5a5cce1c6e84c1f947f1ba3088d26 | [
"Apache-2.0"
] | 1 | 2020-01-22T16:35:49.000Z | 2020-01-22T16:35:49.000Z | # See ./requirements.txt for requirements.
import os
from jira.client import JIRA
from st2reactor.sensor.base import PollingSensor
class JIRASensor(PollingSensor):
'''
Sensor will monitor for any new projects created in JIRA and
emit trigger instance when one is created.
'''
def __init__(self, sensor_service, config=None, poll_interval=5):
super(JIRASensor, self).__init__(sensor_service=sensor_service,
config=config,
poll_interval=poll_interval)
self._jira_url = None
# The Consumer Key created while setting up the 'Incoming Authentication' in
# JIRA for the Application Link.
self._consumer_key = u''
self._rsa_key = None
self._jira_client = None
self._access_token = u''
self._access_secret = u''
self._projects_available = None
self._poll_interval = 30
self._project = None
self._issues_in_project = None
self._jql_query = None
self._trigger_name = 'issues_tracker'
self._trigger_pack = 'jira'
self._trigger_ref = '.'.join([self._trigger_pack, self._trigger_name])
def _read_cert(self, file_path):
with open(file_path) as f:
return f.read()
def setup(self):
self._jira_url = self._config['url']
auth_method = self._config['auth_method']
options = {'server': self._config['url'],
'verify': self._config['verify']}
# Getting client cert configuration
cert_file_path = self._config['client_cert_file']
key_file_path = self._config['client_key_file']
if cert_file_path and key_file_path:
options['client_cert'] = (cert_file_path, key_file_path)
if auth_method == 'oauth':
rsa_cert_file = self._config['rsa_cert_file']
if not os.path.exists(rsa_cert_file):
raise Exception(
'Cert file for JIRA OAuth not found at %s.' % rsa_cert_file
)
self._rsa_key = self._read_cert(rsa_cert_file)
self._poll_interval = self._config.get(
'poll_interval', self._poll_interval)
oauth_creds = {
'access_token': self._config['oauth_token'],
'access_token_secret': self._config['oauth_secret'],
'consumer_key': self._config['consumer_key'],
'key_cert': self._rsa_key,
}
self._jira_client = JIRA(options=options, oauth=oauth_creds)
elif auth_method == 'basic':
basic_creds = (self._config['username'], self._config['password'])
self._jira_client = JIRA(options=options, basic_auth=basic_creds)
else:
msg = ('You must set auth_method to either "oauth"',
'or "basic" your jira.yaml config file.',
)
raise Exception(msg)
if self._projects_available is None:
self._projects_available = set()
for proj in self._jira_client.projects():
self._projects_available.add(proj.key)
self._project = self._config.get('project', None)
if not self._project or self._project not in self._projects_available:
raise Exception('Invalid project (%s) to track.' % self._project)
self._jql_query = 'project=%s' % self._project
all_issues = self._jira_client.search_issues(
self._jql_query, maxResults=None)
self._issues_in_project = {issue.key: issue for issue in all_issues}
def poll(self):
self._detect_new_issues()
def cleanup(self):
pass
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _detect_new_issues(self):
new_issues = self._jira_client.search_issues(
self._jql_query, maxResults=50, startAt=0
)
for issue in new_issues:
if issue.key not in self._issues_in_project:
self._dispatch_issues_trigger(issue)
self._issues_in_project[issue.key] = issue
def _dispatch_issues_trigger(self, issue):
trigger = self._trigger_ref
payload = {}
payload['issue_name'] = issue.key
payload['issue_url'] = issue.self
payload['issue_browse_url'] = self._jira_url + '/browse/' + issue.key
payload['project'] = self._project
payload['created'] = issue.raw['fields']['created']
payload['assignee'] = issue.raw['fields']['assignee']
payload['fix_versions'] = issue.raw['fields']['fixVersions']
payload['issue_type'] = issue.raw['fields']['issuetype']['name']
self._sensor_service.dispatch(trigger, payload)
| 37.767442 | 84 | 0.610016 |
import os
from jira.client import JIRA
from st2reactor.sensor.base import PollingSensor
class JIRASensor(PollingSensor):
def __init__(self, sensor_service, config=None, poll_interval=5):
super(JIRASensor, self).__init__(sensor_service=sensor_service,
config=config,
poll_interval=poll_interval)
self._jira_url = None
self._consumer_key = u''
self._rsa_key = None
self._jira_client = None
self._access_token = u''
self._access_secret = u''
self._projects_available = None
self._poll_interval = 30
self._project = None
self._issues_in_project = None
self._jql_query = None
self._trigger_name = 'issues_tracker'
self._trigger_pack = 'jira'
self._trigger_ref = '.'.join([self._trigger_pack, self._trigger_name])
def _read_cert(self, file_path):
with open(file_path) as f:
return f.read()
def setup(self):
self._jira_url = self._config['url']
auth_method = self._config['auth_method']
options = {'server': self._config['url'],
'verify': self._config['verify']}
cert_file_path = self._config['client_cert_file']
key_file_path = self._config['client_key_file']
if cert_file_path and key_file_path:
options['client_cert'] = (cert_file_path, key_file_path)
if auth_method == 'oauth':
rsa_cert_file = self._config['rsa_cert_file']
if not os.path.exists(rsa_cert_file):
raise Exception(
'Cert file for JIRA OAuth not found at %s.' % rsa_cert_file
)
self._rsa_key = self._read_cert(rsa_cert_file)
self._poll_interval = self._config.get(
'poll_interval', self._poll_interval)
oauth_creds = {
'access_token': self._config['oauth_token'],
'access_token_secret': self._config['oauth_secret'],
'consumer_key': self._config['consumer_key'],
'key_cert': self._rsa_key,
}
self._jira_client = JIRA(options=options, oauth=oauth_creds)
elif auth_method == 'basic':
basic_creds = (self._config['username'], self._config['password'])
self._jira_client = JIRA(options=options, basic_auth=basic_creds)
else:
msg = ('You must set auth_method to either "oauth"',
'or "basic" your jira.yaml config file.',
)
raise Exception(msg)
if self._projects_available is None:
self._projects_available = set()
for proj in self._jira_client.projects():
self._projects_available.add(proj.key)
self._project = self._config.get('project', None)
if not self._project or self._project not in self._projects_available:
raise Exception('Invalid project (%s) to track.' % self._project)
self._jql_query = 'project=%s' % self._project
all_issues = self._jira_client.search_issues(
self._jql_query, maxResults=None)
self._issues_in_project = {issue.key: issue for issue in all_issues}
def poll(self):
self._detect_new_issues()
def cleanup(self):
pass
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _detect_new_issues(self):
new_issues = self._jira_client.search_issues(
self._jql_query, maxResults=50, startAt=0
)
for issue in new_issues:
if issue.key not in self._issues_in_project:
self._dispatch_issues_trigger(issue)
self._issues_in_project[issue.key] = issue
def _dispatch_issues_trigger(self, issue):
trigger = self._trigger_ref
payload = {}
payload['issue_name'] = issue.key
payload['issue_url'] = issue.self
payload['issue_browse_url'] = self._jira_url + '/browse/' + issue.key
payload['project'] = self._project
payload['created'] = issue.raw['fields']['created']
payload['assignee'] = issue.raw['fields']['assignee']
payload['fix_versions'] = issue.raw['fields']['fixVersions']
payload['issue_type'] = issue.raw['fields']['issuetype']['name']
self._sensor_service.dispatch(trigger, payload)
| true | true |
f72cf166961a193b694eac3676ad404f15972d73 | 6,202 | py | Python | __init__.py | kriswans/logBlizzard | 56ac597b4a499fa331742d441cb42c8c99360e0e | [
"MIT"
] | null | null | null | __init__.py | kriswans/logBlizzard | 56ac597b4a499fa331742d441cb42c8c99360e0e | [
"MIT"
] | null | null | null | __init__.py | kriswans/logBlizzard | 56ac597b4a499fa331742d441cb42c8c99360e0e | [
"MIT"
] | null | null | null | """
Author: Kris Swanson, kriswans@cisco.com
Tested with Python 3.6.1 on WIN10
"""
import socket
import struct
import time
import sys
import multiprocessing
import datetime
import glob
import json
from crypto import Crypto as cryp
from syslog import syslog
from nodemanager import NodeManager as nm
from localsearch import SearchLocal as sl
def logMonitor_Rx(password,params):
"""
fn listens for messages and updates message log.
"""
print("Starting Rx Process...\n")
with open('network_cfg.json','r') as nwc:
nw=json.load(nwc)
LOGMSG_GRP = nw['LOGMSG_GRP']
LOGMSG_PORT = nw['LOGMSG_PORT']
SCH_GRP = nw['SCH_GRP']
SCH_PORT = nw['SCH_PORT']
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', LOGMSG_PORT)) # use LOGMSG_GRP instead of '' to listen only
# to LOGMSG_GRP, not all groups on LOGMSG_PORT
mreq = struct.pack("4sl", socket.inet_aton(LOGMSG_GRP), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
filter_tag='%(node_num)s:%(role)s:%(cluster_id)s:%(localnode)s' % params
print(filter_tag)
ts = 0
i=0
dcdmsg=''
search_list=[]
quick_search_tag='LogQSearch:::'
write_mem_tag='!WRITECACHE!'
zero_disk_tag='!DELETEDISKCACHE!'
zero_mem_tag='!DELETEMEMCACHE!'
ts_start=time.time()
log_name='msglog_'+str(ts_start)+'.json'
schjob=[]
while True:
try:
search=False
rx_msg=sock.recv(2048)
dcdmsg=rx_msg.decode("utf-8")
dcdmsg=bytes(dcdmsg,'ascii')
dcdmsg=cryp.DecryptMsg(dcdmsg,password)
if quick_search_tag in dcdmsg:
search=True
print('quick search!')
sl.searchMem(search_list,dcdmsg,password,'off')
if filter_tag not in dcdmsg and search==False:
jlm=json.loads(dcdmsg)
search_list.append({"source_time":jlm["source_time"],'sending_node':jlm['sending_node'],'sending_hostname':jlm['sending_hostname'],"cluster":params["cluster_id"],'orig_message':jlm['orig_message'],'orig_addr':jlm['orig_addr']})
i+=1
if i % 10 == 0:
with open ('msglog_temp.json','w') as log:
json.dump(search_list,log)
continue
if i % 105 == 0:
ts_start=time.time()
log_name='msglog_'+str(ts_start)+'.json'
with open (log_name,'w') as log:
json.dump(search_list,log)
search_list=[]
continue
else:
continue
except:
print('Rx Process Exception')
pass
def logMonitor_Tx(msg, params,password, nw):
LOGMSG_GRP = nw['LOGMSG_GRP']
LOGMSG_PORT = nw['LOGMSG_PORT']
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 32)
print("Starting Tx process...\n")
localnode=params['localnode']
role=params['role']
node=params['localnode']
cluster=params['cluster_id']
hostname=(socket.gethostname())
jobs=[]
z = multiprocessing.Process(target=nm.infHBeat,args=(params,nw))
jobs.append(z)
z.daemon = True
z.start()
n = multiprocessing.Process(target=nm.messageTagGen,args=(nw,))
jobs.append(n)
n.daemon = True
n.start()
if role == 'RxTx':
p = multiprocessing.Process(target=logMonitor_Rx,args=(password,params,))
jobs.append(p)
p.daemon = True
p.start()
ds =multiprocessing.Process(target=sl.deepSearch)
jobs.append(ds)
ds.daemon = True
ds.start()
q = multiprocessing.Process(target=syslog)
jobs.append(q)
q.daemon = True
q.start()
lenfr=0
send_throttle=2
lfr=[0,0]
while True:
lfr[0]=lfr[1]
if max(lfr) > 100:
with open ('syslog.log','w') as f:
f.close()
lfr=[0,0]
time.sleep(send_throttle)
try:
with open ('droplist.json','r') as dlj:
drop_tag=json.load(dlj)
drop_tag=str(drop_tag)
except :
print('possible JSONDecodeError')
drop_tag='[]'
pass
while True:
with open('syslog.log','r') as f:
fr=f.readlines()
lfr[1]=len(fr)
if lfr[1] > lfr[0]:
msg=''
for i in fr[lfr[0]:lfr[1]]:
msg=i.rstrip()
parse_msg=json.loads(msg)
ts = time.time()
msg={'source_time':ts,'sending_node':localnode,'sending_hostname':hostname,'orig_message':parse_msg['log_message'],'orig_addr':parse_msg['orig_addr'],'drop_tag':drop_tag}
msg=json.dumps(msg)
msg=bytes(msg, "ascii")
msg=cryp.EncryptMsg(msg,password)
try:
sock.sendto(msg, (LOGMSG_GRP, LOGMSG_PORT))
except OSError:
msg = ("Attempting to send %s log messages from overran Tx buffer" % str(len(fr)))
msg=localnode+'@'+hostname+"# "+'"'+msg+'"'+drop_tag
msg=bytes(msg, "ascii")
msg=cryp.EncryptMsg(msg,password)
sock.sendto(msg, (LOGMSG_GRP, LOGMSG_PORT))
pass
if lfr[0] == lfr[1]:
pass
else:
pass
break
sys.exit()
"""
main fn to pull user info and kick off logMonitor_Tx fn. logMonitor_Tx kicks off heartbeat and Rx functions.
"""
def main():
params, nw =nm.localParams()
with open('pwf','r') as p:
password=p.read()
password=password.rstrip()
jobs = []
msg=None
r = multiprocessing.Process(target=logMonitor_Tx(msg,params,password,nw))
jobs.append(r)
r.start()
if __name__ == '__main__':
main()
| 27.442478 | 241 | 0.565946 |
import socket
import struct
import time
import sys
import multiprocessing
import datetime
import glob
import json
from crypto import Crypto as cryp
from syslog import syslog
from nodemanager import NodeManager as nm
from localsearch import SearchLocal as sl
def logMonitor_Rx(password,params):
print("Starting Rx Process...\n")
with open('network_cfg.json','r') as nwc:
nw=json.load(nwc)
LOGMSG_GRP = nw['LOGMSG_GRP']
LOGMSG_PORT = nw['LOGMSG_PORT']
SCH_GRP = nw['SCH_GRP']
SCH_PORT = nw['SCH_PORT']
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', LOGMSG_PORT))
mreq = struct.pack("4sl", socket.inet_aton(LOGMSG_GRP), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
filter_tag='%(node_num)s:%(role)s:%(cluster_id)s:%(localnode)s' % params
print(filter_tag)
ts = 0
i=0
dcdmsg=''
search_list=[]
quick_search_tag='LogQSearch:::'
write_mem_tag='!WRITECACHE!'
zero_disk_tag='!DELETEDISKCACHE!'
zero_mem_tag='!DELETEMEMCACHE!'
ts_start=time.time()
log_name='msglog_'+str(ts_start)+'.json'
schjob=[]
while True:
try:
search=False
rx_msg=sock.recv(2048)
dcdmsg=rx_msg.decode("utf-8")
dcdmsg=bytes(dcdmsg,'ascii')
dcdmsg=cryp.DecryptMsg(dcdmsg,password)
if quick_search_tag in dcdmsg:
search=True
print('quick search!')
sl.searchMem(search_list,dcdmsg,password,'off')
if filter_tag not in dcdmsg and search==False:
jlm=json.loads(dcdmsg)
search_list.append({"source_time":jlm["source_time"],'sending_node':jlm['sending_node'],'sending_hostname':jlm['sending_hostname'],"cluster":params["cluster_id"],'orig_message':jlm['orig_message'],'orig_addr':jlm['orig_addr']})
i+=1
if i % 10 == 0:
with open ('msglog_temp.json','w') as log:
json.dump(search_list,log)
continue
if i % 105 == 0:
ts_start=time.time()
log_name='msglog_'+str(ts_start)+'.json'
with open (log_name,'w') as log:
json.dump(search_list,log)
search_list=[]
continue
else:
continue
except:
print('Rx Process Exception')
pass
def logMonitor_Tx(msg, params,password, nw):
LOGMSG_GRP = nw['LOGMSG_GRP']
LOGMSG_PORT = nw['LOGMSG_PORT']
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 32)
print("Starting Tx process...\n")
localnode=params['localnode']
role=params['role']
node=params['localnode']
cluster=params['cluster_id']
hostname=(socket.gethostname())
jobs=[]
z = multiprocessing.Process(target=nm.infHBeat,args=(params,nw))
jobs.append(z)
z.daemon = True
z.start()
n = multiprocessing.Process(target=nm.messageTagGen,args=(nw,))
jobs.append(n)
n.daemon = True
n.start()
if role == 'RxTx':
p = multiprocessing.Process(target=logMonitor_Rx,args=(password,params,))
jobs.append(p)
p.daemon = True
p.start()
ds =multiprocessing.Process(target=sl.deepSearch)
jobs.append(ds)
ds.daemon = True
ds.start()
q = multiprocessing.Process(target=syslog)
jobs.append(q)
q.daemon = True
q.start()
lenfr=0
send_throttle=2
lfr=[0,0]
while True:
lfr[0]=lfr[1]
if max(lfr) > 100:
with open ('syslog.log','w') as f:
f.close()
lfr=[0,0]
time.sleep(send_throttle)
try:
with open ('droplist.json','r') as dlj:
drop_tag=json.load(dlj)
drop_tag=str(drop_tag)
except :
print('possible JSONDecodeError')
drop_tag='[]'
pass
while True:
with open('syslog.log','r') as f:
fr=f.readlines()
lfr[1]=len(fr)
if lfr[1] > lfr[0]:
msg=''
for i in fr[lfr[0]:lfr[1]]:
msg=i.rstrip()
parse_msg=json.loads(msg)
ts = time.time()
msg={'source_time':ts,'sending_node':localnode,'sending_hostname':hostname,'orig_message':parse_msg['log_message'],'orig_addr':parse_msg['orig_addr'],'drop_tag':drop_tag}
msg=json.dumps(msg)
msg=bytes(msg, "ascii")
msg=cryp.EncryptMsg(msg,password)
try:
sock.sendto(msg, (LOGMSG_GRP, LOGMSG_PORT))
except OSError:
msg = ("Attempting to send %s log messages from overran Tx buffer" % str(len(fr)))
msg=localnode+'@'+hostname+"# "+'"'+msg+'"'+drop_tag
msg=bytes(msg, "ascii")
msg=cryp.EncryptMsg(msg,password)
sock.sendto(msg, (LOGMSG_GRP, LOGMSG_PORT))
pass
if lfr[0] == lfr[1]:
pass
else:
pass
break
sys.exit()
def main():
params, nw =nm.localParams()
with open('pwf','r') as p:
password=p.read()
password=password.rstrip()
jobs = []
msg=None
r = multiprocessing.Process(target=logMonitor_Tx(msg,params,password,nw))
jobs.append(r)
r.start()
if __name__ == '__main__':
main()
| true | true |
f72cf1840ab9da367881e535c5609e8a88e5c71b | 1,349 | py | Python | actor/skills/grass_cutter.py | tamamiyasita/Roguelike-Tutorial-2020 | db4d4e5369010567bc39bdd404c4f3a7998670fd | [
"MIT"
] | null | null | null | actor/skills/grass_cutter.py | tamamiyasita/Roguelike-Tutorial-2020 | db4d4e5369010567bc39bdd404c4f3a7998670fd | [
"MIT"
] | null | null | null | actor/skills/grass_cutter.py | tamamiyasita/Roguelike-Tutorial-2020 | db4d4e5369010567bc39bdd404c4f3a7998670fd | [
"MIT"
] | null | null | null | from constants import *
from data import IMAGE_ID
from random import randint
from actor.skills.base_skill import BaseSkill
from util import dice
class GrassCutter(BaseSkill):
def __init__(self, x=0, y=0, name="grass_cutter"):
super().__init__(
name=name,
image=IMAGE_ID[name],
x=x,
y=y,
)
#attackに渡される属性
self._damage = 5
self.hit_rate = 95
self.attr = "physical"
self.effect = None
self.owner = None
self._level = 1
self.tag = [Tag.item, Tag.equip, Tag.weapon, Tag.skill, Tag.passive]
self.item_weight = 1.1
self.explanatory_text = f"damage: {self.level}D{self.damage}\nhit rate: {self.hit_rate}"
self.icon = IMAGE_ID["grass_cutter_icon"]
@property
def damage(self):
if self.owner:
return dice((self.level / 3 + 1), ((self.owner.fighter.STR+self._damage))/2, (self.level/2))
def update_animation(self, delta_time):
super().update_animation(delta_time)
try:
if self.master.state == state.ATTACK and Tag.weapon in self.tag:
self.item_margin_x = (self.item_position_x + 5) * SPRITE_SCALE
self.angle += 90
else:
self.angle = 0
except:
pass
| 24.089286 | 104 | 0.575982 | from constants import *
from data import IMAGE_ID
from random import randint
from actor.skills.base_skill import BaseSkill
from util import dice
class GrassCutter(BaseSkill):
def __init__(self, x=0, y=0, name="grass_cutter"):
super().__init__(
name=name,
image=IMAGE_ID[name],
x=x,
y=y,
)
self._damage = 5
self.hit_rate = 95
self.attr = "physical"
self.effect = None
self.owner = None
self._level = 1
self.tag = [Tag.item, Tag.equip, Tag.weapon, Tag.skill, Tag.passive]
self.item_weight = 1.1
self.explanatory_text = f"damage: {self.level}D{self.damage}\nhit rate: {self.hit_rate}"
self.icon = IMAGE_ID["grass_cutter_icon"]
@property
def damage(self):
if self.owner:
return dice((self.level / 3 + 1), ((self.owner.fighter.STR+self._damage))/2, (self.level/2))
def update_animation(self, delta_time):
super().update_animation(delta_time)
try:
if self.master.state == state.ATTACK and Tag.weapon in self.tag:
self.item_margin_x = (self.item_position_x + 5) * SPRITE_SCALE
self.angle += 90
else:
self.angle = 0
except:
pass
| true | true |
f72cf2ba17b0645159849d98585fab8cba690efd | 3,280 | py | Python | enroll_certificates.py | marcelojcn/psd2-tpp-enroller | 86a03287e74a38f1ebb0d46886c2fc0ec0345ff2 | [
"MIT"
] | 2 | 2021-03-23T05:07:53.000Z | 2021-07-04T20:42:20.000Z | enroll_certificates.py | marcelojcn/psd2-tpp-enroller | 86a03287e74a38f1ebb0d46886c2fc0ec0345ff2 | [
"MIT"
] | null | null | null | enroll_certificates.py | marcelojcn/psd2-tpp-enroller | 86a03287e74a38f1ebb0d46886c2fc0ec0345ff2 | [
"MIT"
] | 1 | 2021-03-22T05:45:38.000Z | 2021-03-22T05:45:38.000Z | import argparse
from src.openbanking_tpp_proxy.proxy import Proxy
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Process parametes for certificate enrollment")
parser.add_argument('--api_url', type=str, required=True,
help='API url needed for certificate integration')
parser.add_argument('--tpp_id', type=str, required=True,
help="ID of the TPP certificate which can be found under 'subject=*'.")
parser.add_argument('--tpp_name', type=str, required=True,
help="Name of TPP used for integration purposes.")
parser.add_argument('--qwac_cert', type=str, required=True,
help="Path QWAC certificate in DER format.")
parser.add_argument('--qwac_key', type=str, required=True,
help="Path QWAC key in PEM format.")
parser.add_argument('--qseal_cert', type=str, required=True,
help="Path QSEAL certificate in DER format.")
parser.add_argument('--qseal_key', type=str, required=True,
help="Path QSEAL key in PEM format.")
parser.add_argument('--intermediate_cert', type=str, required=True,
help="Path intermediate certificate in DER format.")
parser.add_argument('--root_cert', type=str, required=True,
help="Path root certificate in DER format.")
# Optional arguments
parser.add_argument('--qseal_key_file_password',
default=None,
type=str,
required=False,
help="(OPTIONAL) Password to qseal key file.")
parser.add_argument('--http_proxy',
default=None,
type=str,
required=False,
help="(OPTIONAL) HTTP proxy URI with port.")
parser.add_argument('--https_proxy',
default=None,
type=str,
required=False,
help="(OPTIONAL) HTTPS proxy URI with port.")
args = parser.parse_args()
# Enrollment process
proxy = Proxy(args.qwac_cert,
args.qwac_key,
args.qseal_cert,
args.qseal_key,
args.qseal_key_file_password,
args.http_proxy,
args.https_proxy)
enrollment_path = args.api_url + "/eidas/1.0/v1/enrollment"
enrollment_response = proxy.enroll_certificates(enrollment_path,
args.intermediate_cert,
args.root_cert,
args.tpp_id,
args.tpp_name)
print(enrollment_response.status_code)
print(enrollment_response.content)
# Perform connection checks
response = proxy.proxy_request("GET", args.api_url + "/eidas/1.0/v1/consents/health-check")
print(response.status_code)
print(response.content)
response = proxy.proxy_request("GET", args.api_url + "/eidas/1.0/v1/payments/health-check")
print(response.status_code)
print(response.content)
| 46.857143 | 96 | 0.559451 | import argparse
from src.openbanking_tpp_proxy.proxy import Proxy
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Process parametes for certificate enrollment")
parser.add_argument('--api_url', type=str, required=True,
help='API url needed for certificate integration')
parser.add_argument('--tpp_id', type=str, required=True,
help="ID of the TPP certificate which can be found under 'subject=*'.")
parser.add_argument('--tpp_name', type=str, required=True,
help="Name of TPP used for integration purposes.")
parser.add_argument('--qwac_cert', type=str, required=True,
help="Path QWAC certificate in DER format.")
parser.add_argument('--qwac_key', type=str, required=True,
help="Path QWAC key in PEM format.")
parser.add_argument('--qseal_cert', type=str, required=True,
help="Path QSEAL certificate in DER format.")
parser.add_argument('--qseal_key', type=str, required=True,
help="Path QSEAL key in PEM format.")
parser.add_argument('--intermediate_cert', type=str, required=True,
help="Path intermediate certificate in DER format.")
parser.add_argument('--root_cert', type=str, required=True,
help="Path root certificate in DER format.")
parser.add_argument('--qseal_key_file_password',
default=None,
type=str,
required=False,
help="(OPTIONAL) Password to qseal key file.")
parser.add_argument('--http_proxy',
default=None,
type=str,
required=False,
help="(OPTIONAL) HTTP proxy URI with port.")
parser.add_argument('--https_proxy',
default=None,
type=str,
required=False,
help="(OPTIONAL) HTTPS proxy URI with port.")
args = parser.parse_args()
proxy = Proxy(args.qwac_cert,
args.qwac_key,
args.qseal_cert,
args.qseal_key,
args.qseal_key_file_password,
args.http_proxy,
args.https_proxy)
enrollment_path = args.api_url + "/eidas/1.0/v1/enrollment"
enrollment_response = proxy.enroll_certificates(enrollment_path,
args.intermediate_cert,
args.root_cert,
args.tpp_id,
args.tpp_name)
print(enrollment_response.status_code)
print(enrollment_response.content)
response = proxy.proxy_request("GET", args.api_url + "/eidas/1.0/v1/consents/health-check")
print(response.status_code)
print(response.content)
response = proxy.proxy_request("GET", args.api_url + "/eidas/1.0/v1/payments/health-check")
print(response.status_code)
print(response.content)
| true | true |
f72cf2bd30e710234b5f0ce409d6c011a60b05e1 | 827 | py | Python | tests/test_service.py | GameServerGurus/Nitrado-SDK | d72536be5def0b51a7ac89ccf62e35095f4ea705 | [
"MIT"
] | 1 | 2022-02-01T18:12:00.000Z | 2022-02-01T18:12:00.000Z | tests/test_service.py | GameServerGurus/Nitrado-SDK | d72536be5def0b51a7ac89ccf62e35095f4ea705 | [
"MIT"
] | 1 | 2022-01-31T21:04:53.000Z | 2022-02-01T02:16:52.000Z | tests/test_service.py | GameServerGurus/Nitrado-SDK | d72536be5def0b51a7ac89ccf62e35095f4ea705 | [
"MIT"
] | null | null | null | import os
from nitrado import Service, initialize_client
def set_client():
url = "https://api.nitrado.net/"
key = os.getenv('NITRADO_KEY')
initialize_client(key, url)
def test_services():
set_client()
services = Service.all()
assert len(services) > 0
def test_logs():
set_client()
service = Service.all()[0]
logs = service.logs()
assert type(logs) == list
def test_tasks():
set_client()
service = Service.all()[0]
tasks = service.tasks()
assert type(tasks) == list
def test_notifications():
set_client()
service = Service.all()[0]
notif = service.notifications()
assert type(notif) == list
def tests():
test_services()
test_notifications()
test_logs()
test_tasks()
if __name__ == "__main__":
tests()
print("passing")
| 17.229167 | 46 | 0.636034 | import os
from nitrado import Service, initialize_client
def set_client():
url = "https://api.nitrado.net/"
key = os.getenv('NITRADO_KEY')
initialize_client(key, url)
def test_services():
set_client()
services = Service.all()
assert len(services) > 0
def test_logs():
set_client()
service = Service.all()[0]
logs = service.logs()
assert type(logs) == list
def test_tasks():
set_client()
service = Service.all()[0]
tasks = service.tasks()
assert type(tasks) == list
def test_notifications():
set_client()
service = Service.all()[0]
notif = service.notifications()
assert type(notif) == list
def tests():
test_services()
test_notifications()
test_logs()
test_tasks()
if __name__ == "__main__":
tests()
print("passing")
| true | true |
f72cf30f4db51c6f09ee40560b05c6f53cdebd84 | 5,436 | py | Python | sdk/python/pulumi_aws/ec2/vpn_connection_route.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/ec2/vpn_connection_route.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/ec2/vpn_connection_route.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = ['VpnConnectionRoute']
class VpnConnectionRoute(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
destination_cidr_block: Optional[pulumi.Input[str]] = None,
vpn_connection_id: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a static route between a VPN connection and a customer gateway.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
vpc = aws.ec2.Vpc("vpc", cidr_block="10.0.0.0/16")
vpn_gateway = aws.ec2.VpnGateway("vpnGateway", vpc_id=vpc.id)
customer_gateway = aws.ec2.CustomerGateway("customerGateway",
bgp_asn="65000",
ip_address="172.0.0.1",
type="ipsec.1")
main = aws.ec2.VpnConnection("main",
vpn_gateway_id=vpn_gateway.id,
customer_gateway_id=customer_gateway.id,
type="ipsec.1",
static_routes_only=True)
office = aws.ec2.VpnConnectionRoute("office",
destination_cidr_block="192.168.10.0/24",
vpn_connection_id=main.id)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] destination_cidr_block: The CIDR block associated with the local subnet of the customer network.
:param pulumi.Input[str] vpn_connection_id: The ID of the VPN connection.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if destination_cidr_block is None:
raise TypeError("Missing required property 'destination_cidr_block'")
__props__['destination_cidr_block'] = destination_cidr_block
if vpn_connection_id is None:
raise TypeError("Missing required property 'vpn_connection_id'")
__props__['vpn_connection_id'] = vpn_connection_id
super(VpnConnectionRoute, __self__).__init__(
'aws:ec2/vpnConnectionRoute:VpnConnectionRoute',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
destination_cidr_block: Optional[pulumi.Input[str]] = None,
vpn_connection_id: Optional[pulumi.Input[str]] = None) -> 'VpnConnectionRoute':
"""
Get an existing VpnConnectionRoute resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] destination_cidr_block: The CIDR block associated with the local subnet of the customer network.
:param pulumi.Input[str] vpn_connection_id: The ID of the VPN connection.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["destination_cidr_block"] = destination_cidr_block
__props__["vpn_connection_id"] = vpn_connection_id
return VpnConnectionRoute(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="destinationCidrBlock")
def destination_cidr_block(self) -> pulumi.Output[str]:
"""
The CIDR block associated with the local subnet of the customer network.
"""
return pulumi.get(self, "destination_cidr_block")
@property
@pulumi.getter(name="vpnConnectionId")
def vpn_connection_id(self) -> pulumi.Output[str]:
"""
The ID of the VPN connection.
"""
return pulumi.get(self, "vpn_connection_id")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 42.46875 | 134 | 0.653054 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = ['VpnConnectionRoute']
class VpnConnectionRoute(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
destination_cidr_block: Optional[pulumi.Input[str]] = None,
vpn_connection_id: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if destination_cidr_block is None:
raise TypeError("Missing required property 'destination_cidr_block'")
__props__['destination_cidr_block'] = destination_cidr_block
if vpn_connection_id is None:
raise TypeError("Missing required property 'vpn_connection_id'")
__props__['vpn_connection_id'] = vpn_connection_id
super(VpnConnectionRoute, __self__).__init__(
'aws:ec2/vpnConnectionRoute:VpnConnectionRoute',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
destination_cidr_block: Optional[pulumi.Input[str]] = None,
vpn_connection_id: Optional[pulumi.Input[str]] = None) -> 'VpnConnectionRoute':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["destination_cidr_block"] = destination_cidr_block
__props__["vpn_connection_id"] = vpn_connection_id
return VpnConnectionRoute(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="destinationCidrBlock")
def destination_cidr_block(self) -> pulumi.Output[str]:
return pulumi.get(self, "destination_cidr_block")
@property
@pulumi.getter(name="vpnConnectionId")
def vpn_connection_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "vpn_connection_id")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
f72cf3e78bbce35be30e75fc278005c291dd24d7 | 3,977 | py | Python | src/beanmachine/ppl/inference/proposer/nmc/single_site_half_space_nmc_proposer.py | horizon-blue/beanmachine-1 | b13e4e3e28ffb860947eb8046863b0cabb581222 | [
"MIT"
] | 1 | 2021-12-22T13:19:14.000Z | 2021-12-22T13:19:14.000Z | src/beanmachine/ppl/inference/proposer/nmc/single_site_half_space_nmc_proposer.py | horizon-blue/beanmachine-1 | b13e4e3e28ffb860947eb8046863b0cabb581222 | [
"MIT"
] | null | null | null | src/beanmachine/ppl/inference/proposer/nmc/single_site_half_space_nmc_proposer.py | horizon-blue/beanmachine-1 | b13e4e3e28ffb860947eb8046863b0cabb581222 | [
"MIT"
] | null | null | null | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Tuple
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.single_site_ancestral_proposer import (
SingleSiteAncestralProposer,
)
from beanmachine.ppl.legacy.inference.proposer.newtonian_monte_carlo_utils import (
is_valid,
hessian_of_log_prob,
)
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.utils import tensorops
from beanmachine.ppl.world import World
LOGGER = logging.getLogger("beanmachine")
class SingleSiteHalfSpaceNMCProposer(SingleSiteAncestralProposer):
"""
Single-Site Half Space Newtonian Monte Carlo Proposers.
See sec. 3.2 of [1]
[1] Arora, Nim, et al. `Newtonian Monte Carlo: single-site MCMC meets second-order gradient methods`
"""
def __init__(self, node: RVIdentifier):
super().__init__(node)
self._proposal_distribution = None
def compute_alpha_beta(
self, world: World
) -> Tuple[bool, torch.Tensor, torch.Tensor]:
"""
Computes alpha and beta of the Gamma proposal given the node.
alpha = 1 - hessian_diag * x^2
beta = -1 * x * hessian_diag - first_grad
"""
node_val = world[self.node]
first_gradient, hessian_diag = hessian_of_log_prob(
world, self.node, node_val, tensorops.halfspace_gradients
)
if not is_valid(first_gradient) or not is_valid(hessian_diag):
LOGGER.warning(
"Gradient or Hessian is invalid at node {n}.\n".format(n=str(self.node))
)
return False, torch.tensor(0.0), torch.tensor(0.0)
node_val_reshaped = node_val.reshape(-1)
predicted_alpha = (
1 - hessian_diag * (node_val_reshaped * node_val_reshaped)
).t()
predicted_beta = -1 * node_val_reshaped * hessian_diag - first_gradient
condition = (predicted_alpha > 0) & (predicted_beta > 0)
predicted_alpha = torch.where(
condition, predicted_alpha, torch.tensor(1.0).to(dtype=predicted_beta.dtype)
)
node_var = world.get_variable(self.node)
mean = (
node_var.distribution.mean.reshape(-1)
if is_valid(node_var.distribution.mean)
else torch.ones_like(predicted_beta)
)
predicted_beta = torch.where(condition, predicted_beta, mean)
predicted_alpha = predicted_alpha.reshape(node_val.shape)
predicted_beta = predicted_beta.reshape(node_val.shape)
return True, predicted_alpha, predicted_beta
def get_proposal_distribution(self, world: World) -> dist.Distribution:
"""
Returns the proposal distribution of the node.
Args:
world: the world in which we're proposing a new value for node.
Returns:
The proposal distribution.
"""
# if the number of variables in the world is 1 and proposal distribution
# has already been computed, we can use the old proposal distribution
# and skip re-computing the gradient, since there are no other variable
# in the world that may change the gradient and the old one is still
# correct.
if self._proposal_distribution is not None and len(world.latent_nodes) == 1:
return self._proposal_distribution
is_valid, alpha, beta = self.compute_alpha_beta(world)
if not is_valid:
LOGGER.warning(
"Node {n} has invalid proposal solution. ".format(n=self.node)
+ "Proposer falls back to SingleSiteAncestralProposer.\n"
)
return super().get_proposal_distribution(world)
self._proposal_distribution = dist.Gamma(alpha, beta)
return self._proposal_distribution
| 38.61165 | 104 | 0.670355 |
import logging
from typing import Tuple
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.single_site_ancestral_proposer import (
SingleSiteAncestralProposer,
)
from beanmachine.ppl.legacy.inference.proposer.newtonian_monte_carlo_utils import (
is_valid,
hessian_of_log_prob,
)
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.utils import tensorops
from beanmachine.ppl.world import World
LOGGER = logging.getLogger("beanmachine")
class SingleSiteHalfSpaceNMCProposer(SingleSiteAncestralProposer):
def __init__(self, node: RVIdentifier):
super().__init__(node)
self._proposal_distribution = None
def compute_alpha_beta(
self, world: World
) -> Tuple[bool, torch.Tensor, torch.Tensor]:
node_val = world[self.node]
first_gradient, hessian_diag = hessian_of_log_prob(
world, self.node, node_val, tensorops.halfspace_gradients
)
if not is_valid(first_gradient) or not is_valid(hessian_diag):
LOGGER.warning(
"Gradient or Hessian is invalid at node {n}.\n".format(n=str(self.node))
)
return False, torch.tensor(0.0), torch.tensor(0.0)
node_val_reshaped = node_val.reshape(-1)
predicted_alpha = (
1 - hessian_diag * (node_val_reshaped * node_val_reshaped)
).t()
predicted_beta = -1 * node_val_reshaped * hessian_diag - first_gradient
condition = (predicted_alpha > 0) & (predicted_beta > 0)
predicted_alpha = torch.where(
condition, predicted_alpha, torch.tensor(1.0).to(dtype=predicted_beta.dtype)
)
node_var = world.get_variable(self.node)
mean = (
node_var.distribution.mean.reshape(-1)
if is_valid(node_var.distribution.mean)
else torch.ones_like(predicted_beta)
)
predicted_beta = torch.where(condition, predicted_beta, mean)
predicted_alpha = predicted_alpha.reshape(node_val.shape)
predicted_beta = predicted_beta.reshape(node_val.shape)
return True, predicted_alpha, predicted_beta
def get_proposal_distribution(self, world: World) -> dist.Distribution:
if self._proposal_distribution is not None and len(world.latent_nodes) == 1:
return self._proposal_distribution
is_valid, alpha, beta = self.compute_alpha_beta(world)
if not is_valid:
LOGGER.warning(
"Node {n} has invalid proposal solution. ".format(n=self.node)
+ "Proposer falls back to SingleSiteAncestralProposer.\n"
)
return super().get_proposal_distribution(world)
self._proposal_distribution = dist.Gamma(alpha, beta)
return self._proposal_distribution
| true | true |
f72cf4a833b1d28314f79d0d0b59dfce9cb476dc | 627 | py | Python | setup.py | peterpaohuang/depablo-box | edf21fa7f2bcd7009136a2e14b802d004b12b406 | [
"MIT"
] | 1 | 2019-08-06T15:45:34.000Z | 2019-08-06T15:45:34.000Z | setup.py | peterpaohuang/depablo-box | edf21fa7f2bcd7009136a2e14b802d004b12b406 | [
"MIT"
] | null | null | null | setup.py | peterpaohuang/depablo-box | edf21fa7f2bcd7009136a2e14b802d004b12b406 | [
"MIT"
] | null | null | null | import subprocess
print("Installing required packages...")
subprocess.check_output(['conda','install','--yes','--file','requirements.txt'])
print("Installed requirements")
print("Installing openbabel...")
subprocess.check_output(['conda','install','--yes','-c','openbabel', 'openbabel'])
print("Installed openbabel")
print("Installing beautifulsoup4...")
subprocess.check_output(['conda','install','--yes','-c','anaconda', 'beautifulsoup4'])
print("Installed beautifulsoup4")
print("Installing requests...")
subprocess.check_output(['conda','install','--yes','-c','anaconda', 'requests'])
print("Finished install all packages") | 48.230769 | 86 | 0.728868 | import subprocess
print("Installing required packages...")
subprocess.check_output(['conda','install','--yes','--file','requirements.txt'])
print("Installed requirements")
print("Installing openbabel...")
subprocess.check_output(['conda','install','--yes','-c','openbabel', 'openbabel'])
print("Installed openbabel")
print("Installing beautifulsoup4...")
subprocess.check_output(['conda','install','--yes','-c','anaconda', 'beautifulsoup4'])
print("Installed beautifulsoup4")
print("Installing requests...")
subprocess.check_output(['conda','install','--yes','-c','anaconda', 'requests'])
print("Finished install all packages") | true | true |
f72cf697921980b575ca1e6593cdff20ac9b8c90 | 2,031 | py | Python | v3/htmlexample_module.py | ambadhan/OnlinePythonTutor | 857bab941fbde20f1f020b05b7723094ddead62a | [
"MIT"
] | 17 | 2021-12-09T11:31:44.000Z | 2021-12-29T03:07:14.000Z | v3/htmlexample_module.py | heysachin/OnlinePythonTutor | 0dcdacc7ff5be504dd6a47236ebc69dc0069f991 | [
"MIT"
] | 22 | 2017-09-17T03:59:16.000Z | 2017-11-14T17:33:57.000Z | v3/htmlexample_module.py | heysachin/OnlinePythonTutor | 0dcdacc7ff5be504dd6a47236ebc69dc0069f991 | [
"MIT"
] | 12 | 2021-12-09T11:31:46.000Z | 2022-01-07T03:14:46.000Z | # Example module for Online Python Tutor
# Philip Guo
# 2013-08-03
# To get the Online Python Tutor backend to import this custom module,
# add its filename ('htmlexample_module') to the CUSTOM_MODULE_IMPORTS
# tuple in pg_logger.py
# To see an example of this module at work, write the following code in
# http://pythontutor.com/visualize.html
'''
from htmlexample_module import ColorTable
t = ColorTable(3, 4)
t.set_color(0, 0, 'red')
t.render_HTML()
t.set_color(1, 1, 'green')
t.render_HTML()
t.set_color(2, 2, 'blue')
t.render_HTML()
for i in range(3):
for j in range(4):
t.set_color(i, j, 'gray')
t.render_HTML()
'''
# defines a simple table where you can set colors for individual rows and columns
class ColorTable:
def __init__(self, num_rows, num_columns):
self.num_rows = num_rows
self.num_columns = num_columns
# create a 2D matrix of empty strings
self.table = []
for i in range(self.num_rows):
new_lst = ['' for e in range(self.num_columns)]
self.table.append(new_lst)
# color must be a legal HTML color string
def set_color(self, row, column, color):
assert 0 <= row < self.num_rows
assert 0 <= column < self.num_columns
self.table[row][column] = color
# call this function whenever you want to render this table in HTML
def render_HTML(self):
# incrementally build up an HTML table string
html_string = '<table>'
for i in range(self.num_rows):
html_string += '<tr>'
for j in range(self.num_columns):
color = self.table[i][j]
if not color:
color = "white"
html_string += '''<td style="width: 30px; height: 30px; border: 1px solid black;
background-color: %s;"></td>''' % color
html_string += '</tr>'
html_string += '</table>'
# then call the magic setHTML function
setHTML(html_string)
| 28.208333 | 96 | 0.613983 |
class ColorTable:
def __init__(self, num_rows, num_columns):
self.num_rows = num_rows
self.num_columns = num_columns
self.table = []
for i in range(self.num_rows):
new_lst = ['' for e in range(self.num_columns)]
self.table.append(new_lst)
def set_color(self, row, column, color):
assert 0 <= row < self.num_rows
assert 0 <= column < self.num_columns
self.table[row][column] = color
def render_HTML(self):
html_string = '<table>'
for i in range(self.num_rows):
html_string += '<tr>'
for j in range(self.num_columns):
color = self.table[i][j]
if not color:
color = "white"
html_string += '''<td style="width: 30px; height: 30px; border: 1px solid black;
background-color: %s;"></td>''' % color
html_string += '</tr>'
html_string += '</table>'
setHTML(html_string)
| true | true |
f72cf6de2f57109ea400d2d7cfa8931507537fd0 | 3,018 | py | Python | examples/epd_2in13_test.py | philipempl/ResearchyPi | 74cf9cc78ace39d1d843b2b64fe39704aaafe778 | [
"MIT"
] | null | null | null | examples/epd_2in13_test.py | philipempl/ResearchyPi | 74cf9cc78ace39d1d843b2b64fe39704aaafe778 | [
"MIT"
] | null | null | null | examples/epd_2in13_test.py | philipempl/ResearchyPi | 74cf9cc78ace39d1d843b2b64fe39704aaafe778 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding:utf-8 -*-
import sys
import os
picdir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'pic')
libdir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'lib')
if os.path.exists(libdir):
sys.path.append(libdir)
import logging
from waveshare_epd import epd2in13
import time
from PIL import Image,ImageDraw,ImageFont
import traceback
logging.basicConfig(level=logging.DEBUG)
try:
logging.info("epd2in13 Demo")
epd = epd2in13.EPD()
logging.info("init and Clear")
epd.init(epd.lut_full_update)
epd.Clear(0xFF)
# Drawing on the image
font15 = ImageFont.truetype(os.path.join(picdir, 'Font.ttc'), 15)
font24 = ImageFont.truetype(os.path.join(picdir, 'Font.ttc'), 24)
logging.info("1.Drawing on the image...")
image = Image.new('1', (epd.height, epd.width), 255) # 255: clear the frame
draw = ImageDraw.Draw(image)
draw.rectangle([(0,0),(50,50)],outline = 0)
draw.rectangle([(55,0),(100,50)],fill = 0)
draw.line([(0,0),(50,50)], fill = 0,width = 1)
draw.line([(0,50),(50,0)], fill = 0,width = 1)
draw.chord((10, 60, 50, 100), 0, 360, fill = 0)
draw.ellipse((55, 60, 95, 100), outline = 0)
draw.pieslice((55, 60, 95, 100), 90, 180, outline = 0)
draw.pieslice((55, 60, 95, 100), 270, 360, fill = 0)
draw.polygon([(110,0),(110,50),(150,25)],outline = 0)
draw.polygon([(190,0),(190,50),(150,25)],fill = 0)
draw.text((120, 60), 'e-Paper demo', font = font15, fill = 0)
draw.text((110, 90), u'微雪电子', font = font24, fill = 0)
epd.display(epd.getbuffer(image))
time.sleep(2)
# read bmp file
logging.info("2.read bmp file...")
image = Image.open(os.path.join(picdir, '2in13.bmp'))
epd.display(epd.getbuffer(image))
time.sleep(2)
# read bmp file on window
logging.info("3.read bmp file on window...")
# epd.Clear(0xFF)
image1 = Image.new('1', (epd.height, epd.width), 255) # 255: clear the frame
bmp = Image.open(os.path.join(picdir, '100x100.bmp'))
image1.paste(bmp, (2,2))
epd.display(epd.getbuffer(image1))
time.sleep(2)
# # partial update
logging.info("4.show time...")
epd.init(epd.lut_partial_update)
epd.Clear(0xFF)
time_image = Image.new('1', (epd.height, epd.width), 255)
time_draw = ImageDraw.Draw(time_image)
num = 0
while (True):
time_draw.rectangle((120, 80, 220, 105), fill = 255)
time_draw.text((120, 80), time.strftime('%H:%M:%S'), font = font24, fill = 0)
epd.display(epd.getbuffer(time_image))
num = num + 1
if(num == 10):
break
logging.info("Clear...")
epd.init(epd.lut_full_update)
epd.Clear(0xFF)
logging.info("Goto Sleep...")
epd.sleep()
except IOError as e:
logging.info(e)
except KeyboardInterrupt:
logging.info("ctrl + c:")
epd2in13.epdconfig.module_exit()
exit()
| 32.106383 | 90 | 0.611995 |
import sys
import os
picdir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'pic')
libdir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'lib')
if os.path.exists(libdir):
sys.path.append(libdir)
import logging
from waveshare_epd import epd2in13
import time
from PIL import Image,ImageDraw,ImageFont
import traceback
logging.basicConfig(level=logging.DEBUG)
try:
logging.info("epd2in13 Demo")
epd = epd2in13.EPD()
logging.info("init and Clear")
epd.init(epd.lut_full_update)
epd.Clear(0xFF)
font15 = ImageFont.truetype(os.path.join(picdir, 'Font.ttc'), 15)
font24 = ImageFont.truetype(os.path.join(picdir, 'Font.ttc'), 24)
logging.info("1.Drawing on the image...")
image = Image.new('1', (epd.height, epd.width), 255)
draw = ImageDraw.Draw(image)
draw.rectangle([(0,0),(50,50)],outline = 0)
draw.rectangle([(55,0),(100,50)],fill = 0)
draw.line([(0,0),(50,50)], fill = 0,width = 1)
draw.line([(0,50),(50,0)], fill = 0,width = 1)
draw.chord((10, 60, 50, 100), 0, 360, fill = 0)
draw.ellipse((55, 60, 95, 100), outline = 0)
draw.pieslice((55, 60, 95, 100), 90, 180, outline = 0)
draw.pieslice((55, 60, 95, 100), 270, 360, fill = 0)
draw.polygon([(110,0),(110,50),(150,25)],outline = 0)
draw.polygon([(190,0),(190,50),(150,25)],fill = 0)
draw.text((120, 60), 'e-Paper demo', font = font15, fill = 0)
draw.text((110, 90), u'微雪电子', font = font24, fill = 0)
epd.display(epd.getbuffer(image))
time.sleep(2)
logging.info("2.read bmp file...")
image = Image.open(os.path.join(picdir, '2in13.bmp'))
epd.display(epd.getbuffer(image))
time.sleep(2)
logging.info("3.read bmp file on window...")
image1 = Image.new('1', (epd.height, epd.width), 255)
bmp = Image.open(os.path.join(picdir, '100x100.bmp'))
image1.paste(bmp, (2,2))
epd.display(epd.getbuffer(image1))
time.sleep(2)
o("4.show time...")
epd.init(epd.lut_partial_update)
epd.Clear(0xFF)
time_image = Image.new('1', (epd.height, epd.width), 255)
time_draw = ImageDraw.Draw(time_image)
num = 0
while (True):
time_draw.rectangle((120, 80, 220, 105), fill = 255)
time_draw.text((120, 80), time.strftime('%H:%M:%S'), font = font24, fill = 0)
epd.display(epd.getbuffer(time_image))
num = num + 1
if(num == 10):
break
logging.info("Clear...")
epd.init(epd.lut_full_update)
epd.Clear(0xFF)
logging.info("Goto Sleep...")
epd.sleep()
except IOError as e:
logging.info(e)
except KeyboardInterrupt:
logging.info("ctrl + c:")
epd2in13.epdconfig.module_exit()
exit()
| true | true |
f72cf7000c24d4d32e896071d41604079d19da89 | 3,666 | py | Python | mpsci/distributions/binomial.py | WarrenWeckesser/mpsci | 675f0f3b76700529558a3bae2a1b2ca09552233b | [
"BSD-2-Clause"
] | 7 | 2019-03-27T17:25:41.000Z | 2022-03-31T03:55:29.000Z | mpsci/distributions/binomial.py | WarrenWeckesser/mpsci | 675f0f3b76700529558a3bae2a1b2ca09552233b | [
"BSD-2-Clause"
] | 2 | 2019-05-09T16:09:45.000Z | 2021-01-04T03:55:09.000Z | mpsci/distributions/binomial.py | WarrenWeckesser/mpsci | 675f0f3b76700529558a3bae2a1b2ca09552233b | [
"BSD-2-Clause"
] | null | null | null | """
Binomial distribution
---------------------
"""
import mpmath
from ..fun import logbinomial
__all__ = ['pmf', 'logpmf', 'cdf', 'sf', 'mean', 'var']
def _validate_np(n, p):
if p < 0 or p > 1:
raise ValueError('p must be in the range [0, 1]')
if n < 0:
raise ValueError('n must be a nonnegative integer.')
return
def pmf(k, n, p):
"""
Probability mass function of the binomial distribution.
"""
_validate_np(n, p)
with mpmath.extradps(5):
p = mpmath.mpf(p)
return (mpmath.binomial(n, k) *
mpmath.power(p, k) *
mpmath.power(1 - p, n - k))
def logpmf(k, n, p):
"""
Natural log of the probability mass function of the binomial distribution.
"""
_validate_np(n, p)
with mpmath.extradps(5):
return (logbinomial(n, k)
+ k*mpmath.log(p)
+ mpmath.fsum([n, -k])*mpmath.log1p(-p))
def cdf(k, n, p, method='incbeta'):
"""
Cumulative distribution function of the binomial distribution.
`method` must be either "sumpmf" or "incbeta". When `method` is "sumpmf",
the CDF is computed with a simple sum of the PMF values. When `method`
is "incbeta", the incomplete beta function is used. This method is
generally faster than the "sumpmf" method, but for large values of k
or n, the incomplete beta function of mpmath might fail.
"""
_validate_np(n, p)
if method not in ['sumpmf', 'incbeta']:
raise ValueError('method must be "sum" or "incbeta"')
if method == 'incbeta':
with mpmath.extradps(5):
p = mpmath.mpf(p)
# XXX For large values of k and/or n, betainc fails. The failure
# occurs in one of the hypergeometric functions.
return mpmath.betainc(n - k, k + 1, x1=0, x2=1 - p,
regularized=True)
else:
# method is "sumpmf"
with mpmath.extradps(5):
c = mpmath.fsum([mpmath.exp(logpmf(t, n, p))
for t in range(k + 1)])
return c
def sf(k, n, p, method='incbeta'):
"""
Survival function of the binomial distribution.
`method` must be either "sumpmf" or "incbeta". When `method` is "sumpmf",
the survival function is computed with a simple sum of the PMF values.
When `method` is "incbeta", the incomplete beta function is used. This
method is generally faster than the "sumpmf" method, but for large values
of k or n, the incomplete beta function of mpmath might fail.
"""
_validate_np(n, p)
if method not in ['sumpmf', 'incbeta']:
raise ValueError('method must be "sum" or "incbeta"')
if method == 'incbeta':
with mpmath.extradps(5):
p = mpmath.mpf(p)
# XXX For large values of k and/or n, betainc fails. The failure
# occurs in one of the hypergeometric functions.
return mpmath.betainc(n - k, k + 1, x1=1-p, x2=1,
regularized=True)
else:
# method is "sumpmf"
with mpmath.extradps(5):
c = mpmath.fsum([mpmath.exp(logpmf(t, n, p))
for t in range(k + 1, n + 1)])
return c
def mean(n, p):
"""
Mean of the binomial distribution.
"""
_validate_np(n, p)
with mpmath.extradps(5):
n = mpmath.mpf(n)
p = mpmath.mpf(p)
return n*p
def var(n, p):
"""
Variance of the binomial distribution.
"""
_validate_np(n, p)
with mpmath.extradps(5):
n = mpmath.mpf(n)
p = mpmath.mpf(p)
return n * p * (1 - p)
| 30.297521 | 78 | 0.563011 |
import mpmath
from ..fun import logbinomial
__all__ = ['pmf', 'logpmf', 'cdf', 'sf', 'mean', 'var']
def _validate_np(n, p):
if p < 0 or p > 1:
raise ValueError('p must be in the range [0, 1]')
if n < 0:
raise ValueError('n must be a nonnegative integer.')
return
def pmf(k, n, p):
_validate_np(n, p)
with mpmath.extradps(5):
p = mpmath.mpf(p)
return (mpmath.binomial(n, k) *
mpmath.power(p, k) *
mpmath.power(1 - p, n - k))
def logpmf(k, n, p):
_validate_np(n, p)
with mpmath.extradps(5):
return (logbinomial(n, k)
+ k*mpmath.log(p)
+ mpmath.fsum([n, -k])*mpmath.log1p(-p))
def cdf(k, n, p, method='incbeta'):
_validate_np(n, p)
if method not in ['sumpmf', 'incbeta']:
raise ValueError('method must be "sum" or "incbeta"')
if method == 'incbeta':
with mpmath.extradps(5):
p = mpmath.mpf(p)
return mpmath.betainc(n - k, k + 1, x1=0, x2=1 - p,
regularized=True)
else:
with mpmath.extradps(5):
c = mpmath.fsum([mpmath.exp(logpmf(t, n, p))
for t in range(k + 1)])
return c
def sf(k, n, p, method='incbeta'):
_validate_np(n, p)
if method not in ['sumpmf', 'incbeta']:
raise ValueError('method must be "sum" or "incbeta"')
if method == 'incbeta':
with mpmath.extradps(5):
p = mpmath.mpf(p)
return mpmath.betainc(n - k, k + 1, x1=1-p, x2=1,
regularized=True)
else:
with mpmath.extradps(5):
c = mpmath.fsum([mpmath.exp(logpmf(t, n, p))
for t in range(k + 1, n + 1)])
return c
def mean(n, p):
_validate_np(n, p)
with mpmath.extradps(5):
n = mpmath.mpf(n)
p = mpmath.mpf(p)
return n*p
def var(n, p):
_validate_np(n, p)
with mpmath.extradps(5):
n = mpmath.mpf(n)
p = mpmath.mpf(p)
return n * p * (1 - p)
| true | true |
f72cf726678d2cd74a55d44450a33b0c8d9c834f | 792 | py | Python | pytezos/rpc/errors.py | arvidj/pytezos | a8545d9408f086eba91b4af7e12c488672144ff6 | [
"MIT"
] | 1 | 2020-08-11T02:31:24.000Z | 2020-08-11T02:31:24.000Z | pytezos/rpc/errors.py | arvidj/pytezos | a8545d9408f086eba91b4af7e12c488672144ff6 | [
"MIT"
] | null | null | null | pytezos/rpc/errors.py | arvidj/pytezos | a8545d9408f086eba91b4af7e12c488672144ff6 | [
"MIT"
] | 1 | 2022-03-20T19:01:00.000Z | 2022-03-20T19:01:00.000Z | from pytezos.rpc.node import RpcError
class MichelsonBadContractParameter(RpcError, error_id='michelson_v1.bad_contract_parameter'):
""" Either no parameter was supplied to a contract with a non-unit parameter type, a non-unit parameter was passed
to an account, or a parameter was supplied of the wrong type
"""
class MichelsonBadReturn(RpcError, error_id='michelson_v1.bad_return'):
""" Unexpected stack at the end of a lambda or script
"""
class MichelsonRuntimeError(RpcError, error_id='michelson_v1'):
""" Catch all michelson_v1 errors
"""
class TezArithmeticError(RpcError, error_id='tez'):
""" Catch all tez errors
"""
class MichelsonScriptRejected(RpcError, error_id='script_rejected'):
""" A FAILWITH instruction was reached
"""
| 28.285714 | 118 | 0.736111 | from pytezos.rpc.node import RpcError
class MichelsonBadContractParameter(RpcError, error_id='michelson_v1.bad_contract_parameter'):
class MichelsonBadReturn(RpcError, error_id='michelson_v1.bad_return'):
class MichelsonRuntimeError(RpcError, error_id='michelson_v1'):
class TezArithmeticError(RpcError, error_id='tez'):
class MichelsonScriptRejected(RpcError, error_id='script_rejected'):
| true | true |
f72cf8174f5a2872015dadbef99938a1a8e72272 | 1,116 | py | Python | thingsboard_gateway/storage/event_storage_reader_pointer.py | xinge-ok/thingsboard-gateway | 6dab6030a6becf0ce9d34bc95a3a1f1e0838cb14 | [
"Apache-2.0"
] | 1 | 2020-02-24T09:08:35.000Z | 2020-02-24T09:08:35.000Z | thingsboard_gateway/storage/event_storage_reader_pointer.py | xinge-ok/thingsboard-gateway | 6dab6030a6becf0ce9d34bc95a3a1f1e0838cb14 | [
"Apache-2.0"
] | null | null | null | thingsboard_gateway/storage/event_storage_reader_pointer.py | xinge-ok/thingsboard-gateway | 6dab6030a6becf0ce9d34bc95a3a1f1e0838cb14 | [
"Apache-2.0"
] | null | null | null |
# Copyright 2019. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class EventStorageReaderPointer:
def __init__(self, file, line):
self.file = file
self.line = line
def __eq__(self, other):
return self.file == other.file and self.line == other.line
def __hash__(self):
return hash((self.file, self.line))
def get_file(self):
return self.file
def get_line(self):
return self.line
def set_file(self, file):
self.file = file
def set_line(self, line):
self.line = line
| 28.615385 | 78 | 0.662186 |
class EventStorageReaderPointer:
def __init__(self, file, line):
self.file = file
self.line = line
def __eq__(self, other):
return self.file == other.file and self.line == other.line
def __hash__(self):
return hash((self.file, self.line))
def get_file(self):
return self.file
def get_line(self):
return self.line
def set_file(self, file):
self.file = file
def set_line(self, line):
self.line = line
| true | true |
f72cf8cdaafe9d7009fc1fbe038716d9fe13a281 | 81,150 | py | Python | pandas/tests/frame/test_analytics.py | dhimmel/pandas | 776fed3ab63d74ddef6e5af1a702b10c2a30bbb6 | [
"BSD-3-Clause"
] | 1 | 2018-08-01T05:39:55.000Z | 2018-08-01T05:39:55.000Z | pandas/tests/frame/test_analytics.py | dhimmel/pandas | 776fed3ab63d74ddef6e5af1a702b10c2a30bbb6 | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/frame/test_analytics.py | dhimmel/pandas | 776fed3ab63d74ddef6e5af1a702b10c2a30bbb6 | [
"BSD-3-Clause"
] | 1 | 2019-05-26T12:57:52.000Z | 2019-05-26T12:57:52.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
import warnings
from datetime import timedelta
import operator
import pytest
from string import ascii_lowercase
from numpy import nan
from numpy.random import randn
import numpy as np
from pandas.compat import lrange, PY35
from pandas import (compat, isna, notna, DataFrame, Series,
MultiIndex, date_range, Timestamp, Categorical,
_np_version_under1p12,
to_datetime, to_timedelta)
import pandas as pd
import pandas.core.nanops as nanops
import pandas.core.algorithms as algorithms
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tests.frame.common import TestData
class TestDataFrameAnalytics(TestData):
# ---------------------------------------------------------------------=
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self):
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self):
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self):
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('spearman')
def _check_method(self, method='pearson', check_minp=False):
if not check_minp:
correls = self.frame.corr(method=method)
exp = self.frame['A'].corr(self.frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], exp)
else:
result = self.frame.corr(min_periods=len(self.frame) - 8)
expected = self.frame.corr()
expected.loc['A', 'B'] = expected.loc['B', 'A'] = nan
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self):
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
# exclude non-numeric types
result = self.mixed_frame.corr()
expected = self.mixed_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
# RuntimeWarning
with warnings.catch_warnings(record=True):
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_cov(self):
# min_periods no NAs (corner case)
expected = self.frame.cov()
result = self.frame.cov(min_periods=len(self.frame))
tm.assert_frame_equal(expected, result)
result = self.frame.cov(min_periods=len(self.frame) + 1)
assert isna(result.values).all()
# with NAs
frame = self.frame.copy()
frame['A'][:5] = nan
frame['B'][5:10] = nan
result = self.frame.cov(min_periods=len(self.frame) - 8)
expected = self.frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
self.frame['A'][:5] = nan
self.frame['B'][:10] = nan
cov = self.frame.cov()
tm.assert_almost_equal(cov['A']['C'],
self.frame['A'].cov(self.frame['C']))
# exclude non-numeric types
result = self.mixed_frame.cov()
expected = self.mixed_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self):
a = self.tsframe
noise = Series(randn(len(a)), index=a.index)
b = self.tsframe.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(randn(5, 4), index=index, columns=columns)
df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self):
result = self.tsframe.corrwith(self.tsframe['A'])
expected = self.tsframe.apply(self.tsframe['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
tm.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
res = df.describe()
tm.assert_frame_equal(res, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(res) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
res = df.describe(include='all')
tm.assert_frame_equal(res, expected)
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_count(self):
f = lambda s: notna(s).sum()
self._check_stat_op('count', f,
has_skipna=False,
has_numeric_only=True,
check_dtype=False,
check_dates=True)
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH #423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_nunique(self):
f = lambda s: len(algorithms.unique1d(s.dropna()))
self._check_stat_op('nunique', f, has_skipna=False,
check_dtype=False, check_dates=True)
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
def test_sum(self):
self._check_stat_op('sum', np.sum, has_numeric_only=True,
skipna_alternative=np.nansum)
# mixed types (with upcasting happening)
self._check_stat_op('sum', np.sum,
frame=self.mixed_float.astype('float32'),
has_numeric_only=True, check_dtype=False,
check_less_precise=True)
@pytest.mark.parametrize(
"method", ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH #676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'],
dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean, check_dates=True)
def test_product(self):
self._check_stat_op('product', np.prod)
def test_median(self):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, check_dates=True)
def test_min(self):
with warnings.catch_warnings(record=True):
self._check_stat_op('min', np.min, check_dates=True)
self._check_stat_op('min', np.min, frame=self.intframe)
def test_cummin(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
# axis = 0
cummin = self.tsframe.cummin()
expected = self.tsframe.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
cummin = self.tsframe.cummin(axis=1)
expected = self.tsframe.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = self.tsframe.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(self.tsframe)
def test_cummax(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
# axis = 0
cummax = self.tsframe.cummax()
expected = self.tsframe.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
cummax = self.tsframe.cummax(axis=1)
expected = self.tsframe.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = self.tsframe.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(self.tsframe)
def test_max(self):
with warnings.catch_warnings(record=True):
self._check_stat_op('max', np.max, check_dates=True)
self._check_stat_op('max', np.max, frame=self.intframe)
def test_mad(self):
f = lambda x: np.abs(x - x.mean()).mean()
self._check_stat_op('mad', f)
def test_var_std(self):
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
result = self.tsframe.std(ddof=4)
expected = self.tsframe.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = self.tsframe.var(ddof=4)
expected = self.tsframe.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH #9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
pytest.raises(TypeError, lambda: getattr(df1, meth)(
axis=1, numeric_only=False))
pytest.raises(TypeError, lambda: getattr(df2, meth)(
axis=1, numeric_only=False))
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH 16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_cumsum(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
# axis = 0
cumsum = self.tsframe.cumsum()
expected = self.tsframe.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = self.tsframe.cumsum(axis=1)
expected = self.tsframe.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = self.tsframe.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(self.tsframe)
def test_cumprod(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
# axis = 0
cumprod = self.tsframe.cumprod()
expected = self.tsframe.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = self.tsframe.cumprod(axis=1)
expected = self.tsframe.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = self.tsframe.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(self.tsframe)
# ints
df = self.tsframe.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = self.tsframe.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_sem(self):
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
result = self.tsframe.sem(ddof=4)
expected = self.tsframe.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_skew(self):
from scipy.stats import skew
def alt(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', alt)
@td.skip_if_no_scipy
def test_kurt(self):
from scipy.stats import kurtosis
def alt(x):
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
self._check_stat_op('kurt', alt)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,
has_numeric_only=False, check_dtype=True,
check_dates=False, check_less_precise=False,
skipna_alternative=None):
if frame is None:
frame = self.frame
# set some NAs
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
f = getattr(frame, name)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
_f = getattr(df, name)
result = _f()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, name)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if name in ['sum', 'prod']:
exp = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, exp, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# result = f(axis=1)
# comp = frame.apply(alternative, axis=1).reindex(result.index)
# assert_series_equal(result, comp)
# bad axis
tm.assert_raises_regex(ValueError, 'No axis named 2', f, axis=2)
# make sure works on mixed-type frame
getattr(self.mixed_frame, name)(axis=0)
getattr(self.mixed_frame, name)(axis=1)
if has_numeric_only:
getattr(self.mixed_frame, name)(axis=0, numeric_only=True)
getattr(self.mixed_frame, name)(axis=1, numeric_only=True)
getattr(self.frame, name)(axis=0, numeric_only=False)
getattr(self.frame, name)(axis=1, numeric_only=False)
# all NA case
if has_skipna:
all_na = self.frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if name in ['sum', 'prod']:
unit = int(name == 'prod')
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not compat.PY3, reason="only PY3")
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
from datetime import timedelta
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
from pandas.core.tools.timedeltas import (
_coerce_scalar_to_timedelta_type as _coerce)
result = mixed.min()
expected = Series([_coerce(timedelta(seconds=5 * 60 + 5)),
_coerce(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
axis0 = self.empty.sum(0)
axis1 = self.empty.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self):
values = self.frame.values.astype(int)
frame = DataFrame(values, index=self.frame.index,
columns=self.frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self):
# ensure this works, bug report
bools = np.isnan(self.frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self):
# unit test when have object data
the_mean = self.mixed_frame.mean(axis=0)
the_sum = self.mixed_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(self.mixed_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = self.mixed_frame.mean(axis=1)
the_sum = self.mixed_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
self.frame['bool'] = self.frame['A'] > 0
means = self.frame.mean(0)
assert means['bool'] == self.frame['bool'].values.mean()
def test_stats_mixed_type(self):
# don't blow up
self.mixed_frame.std(1)
self.mixed_frame.var(1)
self.mixed_frame.mean(1)
self.mixed_frame.skew(1)
def test_median_corner(self):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, frame=self.intframe,
check_dtype=False, check_dates=True)
# Miscellanea
def test_count_objects(self):
dm = DataFrame(self.mixed_frame._series)
df = DataFrame(self.mixed_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# Index of max / min
def test_idxmin(self):
frame = self.frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
pytest.raises(ValueError, frame.idxmin, axis=2)
def test_idxmax(self):
frame = self.frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
pytest.raises(ValueError, frame.idxmax, axis=2)
# ----------------------------------------------------------------------
# Logical reductions
def test_any_all(self):
self._check_bool_op('any', np.any, has_skipna=True, has_bool_only=True)
self._check_bool_op('all', np.all, has_skipna=True, has_bool_only=True)
def test_any_all_extra(self):
df = DataFrame({
'A': [True, False, False],
'B': [True, True, False],
'C': [True, True, True],
}, index=['a', 'b', 'c'])
result = df[['A', 'B']].any(1)
expected = Series([True, True, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df[['A', 'B']].any(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(1)
expected = Series([True, False, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df.all(1, bool_only=True)
tm.assert_series_equal(result, expected)
# Axis is None
result = df.all(axis=None).item()
assert result is False
result = df.any(axis=None).item()
assert result is True
result = df[['C']].all(axis=None).item()
assert result is True
# skip pathological failure cases
# class CantNonzero(object):
# def __nonzero__(self):
# raise ValueError
# df[4] = CantNonzero()
# it works!
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
# df[4][4] = np.nan
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
@pytest.mark.parametrize('func, data, expected', [
(np.any, {}, False),
(np.all, {}, True),
(np.any, {'A': []}, False),
(np.all, {'A': []}, True),
(np.any, {'A': [False, False]}, False),
(np.all, {'A': [False, False]}, False),
(np.any, {'A': [True, False]}, True),
(np.all, {'A': [True, False]}, False),
(np.any, {'A': [True, True]}, True),
(np.all, {'A': [True, True]}, True),
(np.any, {'A': [False], 'B': [False]}, False),
(np.all, {'A': [False], 'B': [False]}, False),
(np.any, {'A': [False, False], 'B': [False, True]}, True),
(np.all, {'A': [False, False], 'B': [False, True]}, False),
# other types
(np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False),
(np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True),
(np.all, {'A': pd.Series([0, 1], dtype=int)}, False),
(np.any, {'A': pd.Series([0, 1], dtype=int)}, True),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='M8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='m8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
(np.all, {'A': pd.Series([0, 1], dtype='category')}, False),
(np.any, {'A': pd.Series([0, 1], dtype='category')}, True),
(np.all, {'A': pd.Series([1, 2], dtype='category')}, True),
(np.any, {'A': pd.Series([1, 2], dtype='category')}, True),
# # Mix
# GH-21484
# (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'),
# 'B': pd.Series([10, 20], dtype='m8[ns]')}, True),
])
def test_any_all_np_func(self, func, data, expected):
# https://github.com/pandas-dev/pandas/issues/19976
data = DataFrame(data)
result = func(data)
assert isinstance(result, np.bool_)
assert result.item() is expected
# method version
result = getattr(DataFrame(data), func.__name__)(axis=None)
assert isinstance(result, np.bool_)
assert result.item() is expected
def test_any_all_object(self):
# https://github.com/pandas-dev/pandas/issues/19976
result = np.all(DataFrame(columns=['a', 'b'])).item()
assert result is True
result = np.any(DataFrame(columns=['a', 'b'])).item()
assert result is False
@pytest.mark.parametrize('method', ['any', 'all'])
def test_any_all_level_axis_none_raises(self, method):
df = DataFrame(
{"A": 1},
index=MultiIndex.from_product([['A', 'B'], ['a', 'b']],
names=['out', 'in'])
)
xpr = "Must specify 'axis' when aggregating by level."
with tm.assert_raises_regex(ValueError, xpr):
getattr(df, method)(axis=None, level='out')
def _check_bool_op(self, name, alternative, frame=None, has_skipna=True,
has_bool_only=False):
if frame is None:
frame = self.frame > 0
# set some NAs
frame = DataFrame(frame.values.astype(object), frame.index,
frame.columns)
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
f = getattr(frame, name)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# result = f(axis=1)
# comp = frame.apply(alternative, axis=1).reindex(result.index)
# assert_series_equal(result, comp)
# bad axis
pytest.raises(ValueError, f, axis=2)
# make sure works on mixed-type frame
mixed = self.mixed_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0
getattr(mixed, name)(axis=0)
getattr(mixed, name)(axis=1)
class NonzeroFail(object):
def __nonzero__(self):
raise ValueError
mixed['_nonzero_fail_'] = NonzeroFail()
if has_bool_only:
getattr(mixed, name)(axis=0, bool_only=True)
getattr(mixed, name)(axis=1, bool_only=True)
getattr(frame, name)(axis=0, bool_only=False)
getattr(frame, name)(axis=1, bool_only=False)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if name == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
# ----------------------------------------------------------------------
# Isin
def test_isin(self):
# GH #4211
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# see gh-16991
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
expected = DataFrame(False, df.index, df.columns)
result = df.isin(empty)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH4763
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with pytest.raises(TypeError):
df.isin('a')
with pytest.raises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
tm.assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
# GH16394
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
df['C'] = list(zip(df['A'], df['B']))
result = df['C'].isin([(1, 'a')])
tm.assert_series_equal(result,
Series([True, False, False], name="C"))
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with pytest.raises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with pytest.raises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ['B', 'B']
with pytest.raises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A', 'A'])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
tm.assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},
index=['a', 'b', 'c', 'd'])
s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected['A'].loc['a'] = True
expected.loc['d'] = True
result = df.isin(s)
tm.assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),
(0, 'b', 'bar'), (0, 'b', 'baz'),
(2, 'a', 'foo'), (2, 'a', 'bar'),
(2, 'c', 'bar'), (2, 'c', 'baz'),
(1, 'b', 'foo'), (1, 'b', 'bar'),
(1, 'c', 'bar'), (1, 'c', 'baz')])
df1 = DataFrame({'A': np.ones(12),
'B': np.zeros(12)}, index=idx)
df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(np.bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=['A', 'B'], index=idx)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
def test_isin_empty_datetimelike(self):
# GH 15473
df1_ts = DataFrame({'date':
pd.to_datetime(['2014-01-01', '2014-01-02'])})
df1_td = DataFrame({'date':
[pd.Timedelta(1, 's'), pd.Timedelta(2, 's')]})
df2 = DataFrame({'date': []})
df3 = DataFrame()
expected = DataFrame({'date': [False, False]})
result = df1_ts.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_ts.isin(df3)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df3)
tm.assert_frame_equal(result, expected)
# Rounding
def test_round(self):
# GH 2665
# Test that rounding an empty DataFrame does nothing
df = DataFrame()
tm.assert_frame_equal(df, df.round())
# Here's the test frame we'll be working with
df = DataFrame({'col1': [1.123, 2.123, 3.123],
'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(df.round(), expected_rounded)
# Round with an integer
decimals = 2
expected_rounded = DataFrame({'col1': [1.12, 2.12, 3.12],
'col2': [1.23, 2.23, 3.23]})
tm.assert_frame_equal(df.round(decimals), expected_rounded)
# This should also work with np.round (since np.round dispatches to
# df.round)
tm.assert_frame_equal(np.round(df, decimals), expected_rounded)
# Round with a list
round_list = [1, 2]
with pytest.raises(TypeError):
df.round(round_list)
# Round with a dictionary
expected_rounded = DataFrame(
{'col1': [1.1, 2.1, 3.1], 'col2': [1.23, 2.23, 3.23]})
round_dict = {'col1': 1, 'col2': 2}
tm.assert_frame_equal(df.round(round_dict), expected_rounded)
# Incomplete dict
expected_partially_rounded = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
partial_round_dict = {'col2': 1}
tm.assert_frame_equal(df.round(partial_round_dict),
expected_partially_rounded)
# Dict with unknown elements
wrong_round_dict = {'col3': 2, 'col2': 1}
tm.assert_frame_equal(df.round(wrong_round_dict),
expected_partially_rounded)
# float input to `decimals`
non_int_round_dict = {'col1': 1, 'col2': 0.5}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
# String input
non_int_round_dict = {'col1': 1, 'col2': 'foo'}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# List input
non_int_round_dict = {'col1': 1, 'col2': [1, 2]}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# Non integer Series inputs
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# Negative numbers
negative_round_dict = {'col1': -1, 'col2': -2}
big_df = df * 100
expected_neg_rounded = DataFrame(
{'col1': [110., 210, 310], 'col2': [100., 200, 300]})
tm.assert_frame_equal(big_df.round(negative_round_dict),
expected_neg_rounded)
# nan in Series round
nan_round_Series = Series({'col1': nan, 'col2': 1})
# TODO(wesm): unused?
expected_nan_round = DataFrame({ # noqa
'col1': [1.123, 2.123, 3.123],
'col2': [1.2, 2.2, 3.2]})
with pytest.raises(TypeError):
df.round(nan_round_Series)
# Make sure this doesn't break existing Series.round
tm.assert_series_equal(df['col1'].round(1), expected_rounded['col1'])
# named columns
# GH 11986
decimals = 2
expected_rounded = DataFrame(
{'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})
df.columns.name = "cols"
expected_rounded.columns.name = "cols"
tm.assert_frame_equal(df.round(decimals), expected_rounded)
# interaction of named columns & series
tm.assert_series_equal(df['col1'].round(decimals),
expected_rounded['col1'])
tm.assert_series_equal(df.round(decimals)['col1'],
expected_rounded['col1'])
def test_numpy_round(self):
# See gh-12600
df = DataFrame([[1.53, 1.36], [0.06, 7.01]])
out = np.round(df, decimals=0)
expected = DataFrame([[2., 1.], [0., 7.]])
tm.assert_frame_equal(out, expected)
msg = "the 'out' parameter is not supported"
with tm.assert_raises_regex(ValueError, msg):
np.round(df, decimals=0, out=df)
def test_round_mixed_type(self):
# GH11885
df = DataFrame({'col1': [1.1, 2.2, 3.3, 4.4],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
round_0 = DataFrame({'col1': [1., 2., 3., 4.],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
tm.assert_frame_equal(df.round(), round_0)
tm.assert_frame_equal(df.round(1), df)
tm.assert_frame_equal(df.round({'col1': 1}), df)
tm.assert_frame_equal(df.round({'col1': 0}), round_0)
tm.assert_frame_equal(df.round({'col1': 0, 'col2': 1}), round_0)
tm.assert_frame_equal(df.round({'col3': 1}), df)
def test_round_issue(self):
# GH11611
df = pd.DataFrame(np.random.random([3, 3]), columns=['A', 'B', 'C'],
index=['first', 'second', 'third'])
dfs = pd.concat((df, df), axis=1)
rounded = dfs.round()
tm.assert_index_equal(rounded.index, dfs.index)
decimals = pd.Series([1, 0, 2], index=['A', 'B', 'A'])
pytest.raises(ValueError, df.round, decimals)
def test_built_in_round(self):
if not compat.PY3:
pytest.skip("build in round cannot be overridden "
"prior to Python 3")
# GH11763
# Here's the test frame we'll be working with
df = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(round(df), expected_rounded)
def test_pct_change(self):
# GH 11150
pnl = DataFrame([np.arange(0, 40, 10), np.arange(0, 40, 10), np.arange(
0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
result = pnl.pct_change(axis=axis, fill_method='pad')
tm.assert_frame_equal(result, expected)
# Clip
def test_clip(self):
median = self.frame.median().median()
original = self.frame.copy()
capped = self.frame.clip_upper(median)
assert not (capped.values > median).any()
floored = self.frame.clip_lower(median)
assert not (floored.values < median).any()
double = self.frame.clip(upper=median, lower=median)
assert not (double.values != median).any()
# Verify that self.frame was not changed inplace
assert (self.frame.values == original.values).all()
def test_inplace_clip(self):
# GH #15388
median = self.frame.median().median()
frame_copy = self.frame.copy()
frame_copy.clip_upper(median, inplace=True)
assert not (frame_copy.values > median).any()
frame_copy = self.frame.copy()
frame_copy.clip_lower(median, inplace=True)
assert not (frame_copy.values < median).any()
frame_copy = self.frame.copy()
frame_copy.clip(upper=median, lower=median, inplace=True)
assert not (frame_copy.values != median).any()
def test_dataframe_clip(self):
# GH #2747
df = DataFrame(np.random.randn(1000, 2))
for lb, ub in [(-1, 1), (1, -1)]:
clipped_df = df.clip(lb, ub)
lb, ub = min(lb, ub), max(ub, lb)
lb_mask = df.values <= lb
ub_mask = df.values >= ub
mask = ~lb_mask & ~ub_mask
assert (clipped_df.values[lb_mask] == lb).all()
assert (clipped_df.values[ub_mask] == ub).all()
assert (clipped_df.values[mask] == df.values[mask]).all()
def test_clip_mixed_numeric(self):
# TODO(jreback)
# clip on mixed integer or floats
# with integer clippers coerces to float
df = DataFrame({'A': [1, 2, 3],
'B': [1., np.nan, 3.]})
result = df.clip(1, 2)
expected = DataFrame({'A': [1, 2, 2.],
'B': [1., np.nan, 2.]})
tm.assert_frame_equal(result, expected, check_like=True)
@pytest.mark.parametrize("inplace", [True, False])
def test_clip_against_series(self, inplace):
# GH #6966
df = DataFrame(np.random.randn(1000, 2))
lb = Series(np.random.randn(1000))
ub = lb + 1
original = df.copy()
clipped_df = df.clip(lb, ub, axis=0, inplace=inplace)
if inplace:
clipped_df = df
for i in range(2):
lb_mask = original.iloc[:, i] <= lb
ub_mask = original.iloc[:, i] >= ub
mask = ~lb_mask & ~ub_mask
result = clipped_df.loc[lb_mask, i]
tm.assert_series_equal(result, lb[lb_mask], check_names=False)
assert result.name == i
result = clipped_df.loc[ub_mask, i]
tm.assert_series_equal(result, ub[ub_mask], check_names=False)
assert result.name == i
tm.assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("lower", [[2, 3, 4], np.asarray([2, 3, 4])])
@pytest.mark.parametrize("axis,res", [
(0, [[2., 2., 3.], [4., 5., 6.], [7., 7., 7.]]),
(1, [[2., 3., 4.], [4., 5., 6.], [5., 6., 7.]])
])
def test_clip_against_list_like(self, inplace, lower, axis, res):
# GH #15390
original = self.simple.copy(deep=True)
result = original.clip(lower=lower, upper=[5, 6, 7],
axis=axis, inplace=inplace)
expected = pd.DataFrame(res,
columns=original.columns,
index=original.index)
if inplace:
result = original
tm.assert_frame_equal(result, expected, check_exact=True)
@pytest.mark.parametrize("axis", [0, 1, None])
def test_clip_against_frame(self, axis):
df = DataFrame(np.random.randn(1000, 2))
lb = DataFrame(np.random.randn(1000, 2))
ub = lb + 1
clipped_df = df.clip(lb, ub, axis=axis)
lb_mask = df <= lb
ub_mask = df >= ub
mask = ~lb_mask & ~ub_mask
tm.assert_frame_equal(clipped_df[lb_mask], lb[lb_mask])
tm.assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])
tm.assert_frame_equal(clipped_df[mask], df[mask])
def test_clip_with_na_args(self):
"""Should process np.nan argument as None """
# GH # 17276
tm.assert_frame_equal(self.frame.clip(np.nan), self.frame)
tm.assert_frame_equal(self.frame.clip(upper=np.nan, lower=np.nan),
self.frame)
# GH #19992
df = DataFrame({'col_0': [1, 2, 3], 'col_1': [4, 5, 6],
'col_2': [7, 8, 9]})
result = df.clip(lower=[4, 5, np.nan], axis=0)
expected = DataFrame({'col_0': [4, 5, np.nan], 'col_1': [4, 5, np.nan],
'col_2': [7, 8, np.nan]})
tm.assert_frame_equal(result, expected)
result = df.clip(lower=[4, 5, np.nan], axis=1)
expected = DataFrame({'col_0': [4, 4, 4], 'col_1': [5, 5, 6],
'col_2': [np.nan, np.nan, np.nan]})
tm.assert_frame_equal(result, expected)
# Matrix-like
def test_dot(self):
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
result = a.dot(b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
# Check alignment
b1 = b.reindex(index=reversed(b.index))
result = a.dot(b)
tm.assert_frame_equal(result, expected)
# Check series argument
result = a.dot(b['one'])
tm.assert_series_equal(result, expected['one'], check_names=False)
assert result.name is None
result = a.dot(b1['one'])
tm.assert_series_equal(result, expected['one'], check_names=False)
assert result.name is None
# can pass correct-length arrays
row = a.iloc[0].values
result = a.dot(row)
exp = a.dot(a.iloc[0])
tm.assert_series_equal(result, exp)
with tm.assert_raises_regex(ValueError,
'Dot product shape mismatch'):
a.dot(row[:-1])
a = np.random.rand(1, 5)
b = np.random.rand(5, 1)
A = DataFrame(a)
# TODO(wesm): unused
B = DataFrame(b) # noqa
# it works
result = A.dot(b)
# unaligned
df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])
with tm.assert_raises_regex(ValueError, 'aligned'):
df.dot(df2)
@pytest.mark.skipif(not PY35,
reason='matmul supported for Python>=3.5')
@pytest.mark.xfail(
_np_version_under1p12,
reason="unpredictable return types under numpy < 1.12")
def test_matmul(self):
# matmul test is for GH #10259
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
# DataFrame @ DataFrame
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# DataFrame @ Series
result = operator.matmul(a, b.one)
expected = Series(np.dot(a.values, b.one.values),
index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
# np.array @ DataFrame
result = operator.matmul(a.values, b)
expected = np.dot(a.values, b.values)
tm.assert_almost_equal(result, expected)
# nested list @ DataFrame (__rmatmul__)
result = operator.matmul(a.values.tolist(), b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_almost_equal(result.values, expected.values)
# mixed dtype DataFrame @ DataFrame
a['q'] = a.q.round().astype(int)
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# different dtypes DataFrame @ DataFrame
a = a.astype(int)
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# unaligned
df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])
with tm.assert_raises_regex(ValueError, 'aligned'):
operator.matmul(df, df2)
@pytest.fixture
def df_duplicates():
return pd.DataFrame({'a': [1, 2, 3, 4, 4],
'b': [1, 1, 1, 1, 1],
'c': [0, 1, 2, 5, 4]},
index=[0, 0, 1, 1, 1])
@pytest.fixture
def df_strings():
return pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
@pytest.fixture
def df_main_dtypes():
return pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
class TestNLargestNSmallest(object):
dtype_error_msg_template = ("Column {column!r} has dtype {dtype}, cannot "
"use method {method!r} with this dtype")
# ----------------------------------------------------------------------
# Top / bottom
@pytest.mark.parametrize('order', [
['a'],
['c'],
['a', 'b'],
['a', 'c'],
['b', 'a'],
['b', 'c'],
['a', 'b', 'c'],
['c', 'a', 'b'],
['c', 'b', 'a'],
['b', 'c', 'a'],
['b', 'a', 'c'],
# dups!
['b', 'c', 'c']])
@pytest.mark.parametrize('n', range(1, 11))
def test_n(self, df_strings, nselect_method, n, order):
# GH10393
df = df_strings
if 'b' in order:
error_msg = self.dtype_error_msg_template.format(
column='b', method=nselect_method, dtype='object')
with tm.assert_raises_regex(TypeError, error_msg):
getattr(df, nselect_method)(n, order)
else:
ascending = nselect_method == 'nsmallest'
result = getattr(df, nselect_method)(n, order)
expected = df.sort_values(order, ascending=ascending).head(n)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('columns', [
('group', 'category_string'), ('group', 'string')])
def test_n_error(self, df_main_dtypes, nselect_method, columns):
df = df_main_dtypes
col = columns[1]
error_msg = self.dtype_error_msg_template.format(
column=col, method=nselect_method, dtype=df[col].dtype)
# escape some characters that may be in the repr
error_msg = (error_msg.replace('(', '\\(').replace(")", "\\)")
.replace("[", "\\[").replace("]", "\\]"))
with tm.assert_raises_regex(TypeError, error_msg):
getattr(df, nselect_method)(2, columns)
def test_n_all_dtypes(self, df_main_dtypes):
df = df_main_dtypes
df.nsmallest(2, list(set(df) - {'category_string', 'string'}))
df.nlargest(2, list(set(df) - {'category_string', 'string'}))
def test_n_identical_values(self):
# GH15297
df = pd.DataFrame({'a': [1] * 5, 'b': [1, 2, 3, 4, 5]})
result = df.nlargest(3, 'a')
expected = pd.DataFrame(
{'a': [1] * 3, 'b': [1, 2, 3]}, index=[0, 1, 2]
)
tm.assert_frame_equal(result, expected)
result = df.nsmallest(3, 'a')
expected = pd.DataFrame({'a': [1] * 3, 'b': [1, 2, 3]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('order', [
['a', 'b', 'c'],
['c', 'b', 'a'],
['a'],
['b'],
['a', 'b'],
['c', 'b']])
@pytest.mark.parametrize('n', range(1, 6))
def test_n_duplicate_index(self, df_duplicates, n, order):
# GH 13412
df = df_duplicates
result = df.nsmallest(n, order)
expected = df.sort_values(order).head(n)
tm.assert_frame_equal(result, expected)
result = df.nlargest(n, order)
expected = df.sort_values(order, ascending=False).head(n)
tm.assert_frame_equal(result, expected)
def test_duplicate_keep_all_ties(self):
# see gh-16818
df = pd.DataFrame({'a': [5, 4, 4, 2, 3, 3, 3, 3],
'b': [10, 9, 8, 7, 5, 50, 10, 20]})
result = df.nlargest(4, 'a', keep='all')
expected = pd.DataFrame({'a': {0: 5, 1: 4, 2: 4, 4: 3,
5: 3, 6: 3, 7: 3},
'b': {0: 10, 1: 9, 2: 8, 4: 5,
5: 50, 6: 10, 7: 20}})
tm.assert_frame_equal(result, expected)
result = df.nsmallest(2, 'a', keep='all')
expected = pd.DataFrame({'a': {3: 2, 4: 3, 5: 3, 6: 3, 7: 3},
'b': {3: 7, 4: 5, 5: 50, 6: 10, 7: 20}})
tm.assert_frame_equal(result, expected)
def test_series_broadcasting(self):
# smoke test for numpy warnings
# GH 16378, GH 16306
df = DataFrame([1.0, 1.0, 1.0])
df_nan = DataFrame({'A': [np.nan, 2.0, np.nan]})
s = Series([1, 1, 1])
s_nan = Series([np.nan, np.nan, 1])
with tm.assert_produces_warning(None):
df_nan.clip_lower(s, axis=0)
for op in ['lt', 'le', 'gt', 'ge', 'eq', 'ne']:
getattr(df, op)(s_nan, axis=0)
def test_series_nat_conversion(self):
# GH 18521
# Check rank does not mutate DataFrame
df = DataFrame(np.random.randn(10, 3), dtype='float64')
expected = df.copy()
df.rank()
result = df
tm.assert_frame_equal(result, expected)
| 37.656613 | 79 | 0.509698 |
from __future__ import print_function
import warnings
from datetime import timedelta
import operator
import pytest
from string import ascii_lowercase
from numpy import nan
from numpy.random import randn
import numpy as np
from pandas.compat import lrange, PY35
from pandas import (compat, isna, notna, DataFrame, Series,
MultiIndex, date_range, Timestamp, Categorical,
_np_version_under1p12,
to_datetime, to_timedelta)
import pandas as pd
import pandas.core.nanops as nanops
import pandas.core.algorithms as algorithms
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tests.frame.common import TestData
class TestDataFrameAnalytics(TestData):
@td.skip_if_no_scipy
def test_corr_pearson(self):
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self):
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self):
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('spearman')
def _check_method(self, method='pearson', check_minp=False):
if not check_minp:
correls = self.frame.corr(method=method)
exp = self.frame['A'].corr(self.frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], exp)
else:
result = self.frame.corr(min_periods=len(self.frame) - 8)
expected = self.frame.corr()
expected.loc['A', 'B'] = expected.loc['B', 'A'] = nan
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self):
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
result = self.mixed_frame.corr()
expected = self.mixed_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_cov(self):
expected = self.frame.cov()
result = self.frame.cov(min_periods=len(self.frame))
tm.assert_frame_equal(expected, result)
result = self.frame.cov(min_periods=len(self.frame) + 1)
assert isna(result.values).all()
frame = self.frame.copy()
frame['A'][:5] = nan
frame['B'][5:10] = nan
result = self.frame.cov(min_periods=len(self.frame) - 8)
expected = self.frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
self.frame['A'][:5] = nan
self.frame['B'][:10] = nan
cov = self.frame.cov()
tm.assert_almost_equal(cov['A']['C'],
self.frame['A'].cov(self.frame['C']))
result = self.mixed_frame.cov()
expected = self.mixed_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self):
a = self.tsframe
noise = Series(randn(len(a)), index=a.index)
b = self.tsframe.add(noise, axis=0)
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(randn(5, 4), index=index, columns=columns)
df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self):
result = self.tsframe.corrwith(self.tsframe['A'])
expected = self.tsframe.apply(self.tsframe['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
result = cat.describe()
assert len(result.columns) == 1
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
tm.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_describe_categorical_columns(self):
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
res = df.describe()
tm.assert_frame_equal(res, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(res) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
res = df.describe(include='all')
tm.assert_frame_equal(res, expected)
def test_reduce_mixed_frame(self):
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_count(self):
f = lambda s: notna(s).sum()
self._check_stat_op('count', f,
has_skipna=False,
has_numeric_only=True,
check_dtype=False,
check_dates=True)
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_nunique(self):
f = lambda s: len(algorithms.unique1d(s.dropna()))
self._check_stat_op('nunique', f, has_skipna=False,
check_dtype=False, check_dates=True)
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
def test_sum(self):
self._check_stat_op('sum', np.sum, has_numeric_only=True,
skipna_alternative=np.nansum)
self._check_stat_op('sum', np.sum,
frame=self.mixed_float.astype('float32'),
has_numeric_only=True, check_dtype=False,
check_less_precise=True)
@pytest.mark.parametrize(
"method", ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'],
dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean, check_dates=True)
def test_product(self):
self._check_stat_op('product', np.prod)
def test_median(self):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, check_dates=True)
def test_min(self):
with warnings.catch_warnings(record=True):
self._check_stat_op('min', np.min, check_dates=True)
self._check_stat_op('min', np.min, frame=self.intframe)
def test_cummin(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
cummin = self.tsframe.cummin()
expected = self.tsframe.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
cummin = self.tsframe.cummin(axis=1)
expected = self.tsframe.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin()
cummin_xs = self.tsframe.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(self.tsframe)
def test_cummax(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
cummax = self.tsframe.cummax()
expected = self.tsframe.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
cummax = self.tsframe.cummax(axis=1)
expected = self.tsframe.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax()
cummax_xs = self.tsframe.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(self.tsframe)
def test_max(self):
with warnings.catch_warnings(record=True):
self._check_stat_op('max', np.max, check_dates=True)
self._check_stat_op('max', np.max, frame=self.intframe)
def test_mad(self):
f = lambda x: np.abs(x - x.mean()).mean()
self._check_stat_op('mad', f)
def test_var_std(self):
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
result = self.tsframe.std(ddof=4)
expected = self.tsframe.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = self.tsframe.var(ddof=4)
expected = self.tsframe.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
pytest.raises(TypeError, lambda: getattr(df1, meth)(
axis=1, numeric_only=False))
pytest.raises(TypeError, lambda: getattr(df2, meth)(
axis=1, numeric_only=False))
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_cumsum(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
cumsum = self.tsframe.cumsum()
expected = self.tsframe.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
cumsum = self.tsframe.cumsum(axis=1)
expected = self.tsframe.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum()
cumsum_xs = self.tsframe.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(self.tsframe)
def test_cumprod(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
cumprod = self.tsframe.cumprod()
expected = self.tsframe.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
cumprod = self.tsframe.cumprod(axis=1)
expected = self.tsframe.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
cumprod_xs = self.tsframe.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(self.tsframe)
df = self.tsframe.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
df = self.tsframe.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_sem(self):
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
result = self.tsframe.sem(ddof=4)
expected = self.tsframe.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_skew(self):
from scipy.stats import skew
def alt(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', alt)
@td.skip_if_no_scipy
def test_kurt(self):
from scipy.stats import kurtosis
def alt(x):
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
self._check_stat_op('kurt', alt)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,
has_numeric_only=False, check_dtype=True,
check_dates=False, check_less_precise=False,
skipna_alternative=None):
if frame is None:
frame = self.frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
f = getattr(frame, name)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
_f = getattr(df, name)
result = _f()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, name)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if name in ['sum', 'prod']:
exp = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, exp, check_dtype=False,
check_less_precise=check_less_precise)
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
tm.assert_raises_regex(ValueError, 'No axis named 2', f, axis=2)
getattr(self.mixed_frame, name)(axis=0)
getattr(self.mixed_frame, name)(axis=1)
if has_numeric_only:
getattr(self.mixed_frame, name)(axis=0, numeric_only=True)
getattr(self.mixed_frame, name)(axis=1, numeric_only=True)
getattr(self.frame, name)(axis=0, numeric_only=False)
getattr(self.frame, name)(axis=1, numeric_only=False)
if has_skipna:
all_na = self.frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if name in ['sum', 'prod']:
unit = int(name == 'prod')
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not compat.PY3, reason="only PY3")
def test_mode_sortwarning(self):
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
from datetime import timedelta
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
from pandas.core.tools.timedeltas import (
_coerce_scalar_to_timedelta_type as _coerce)
result = mixed.min()
expected = Series([_coerce(timedelta(seconds=5 * 60 + 5)),
_coerce(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
axis0 = self.empty.sum(0)
axis1 = self.empty.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self):
values = self.frame.values.astype(int)
frame = DataFrame(values, index=self.frame.index,
columns=self.frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self):
# ensure this works, bug report
bools = np.isnan(self.frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self):
# unit test when have object data
the_mean = self.mixed_frame.mean(axis=0)
the_sum = self.mixed_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(self.mixed_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = self.mixed_frame.mean(axis=1)
the_sum = self.mixed_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
self.frame['bool'] = self.frame['A'] > 0
means = self.frame.mean(0)
assert means['bool'] == self.frame['bool'].values.mean()
def test_stats_mixed_type(self):
# don't blow up
self.mixed_frame.std(1)
self.mixed_frame.var(1)
self.mixed_frame.mean(1)
self.mixed_frame.skew(1)
def test_median_corner(self):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, frame=self.intframe,
check_dtype=False, check_dates=True)
def test_count_objects(self):
dm = DataFrame(self.mixed_frame._series)
df = DataFrame(self.mixed_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
result = dm.cumsum()
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
def test_idxmin(self):
frame = self.frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
pytest.raises(ValueError, frame.idxmin, axis=2)
def test_idxmax(self):
frame = self.frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
pytest.raises(ValueError, frame.idxmax, axis=2)
def test_any_all(self):
self._check_bool_op('any', np.any, has_skipna=True, has_bool_only=True)
self._check_bool_op('all', np.all, has_skipna=True, has_bool_only=True)
def test_any_all_extra(self):
df = DataFrame({
'A': [True, False, False],
'B': [True, True, False],
'C': [True, True, True],
}, index=['a', 'b', 'c'])
result = df[['A', 'B']].any(1)
expected = Series([True, True, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df[['A', 'B']].any(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(1)
expected = Series([True, False, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df.all(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(axis=None).item()
assert result is False
result = df.any(axis=None).item()
assert result is True
result = df[['C']].all(axis=None).item()
assert result is True
@pytest.mark.parametrize('func, data, expected', [
(np.any, {}, False),
(np.all, {}, True),
(np.any, {'A': []}, False),
(np.all, {'A': []}, True),
(np.any, {'A': [False, False]}, False),
(np.all, {'A': [False, False]}, False),
(np.any, {'A': [True, False]}, True),
(np.all, {'A': [True, False]}, False),
(np.any, {'A': [True, True]}, True),
(np.all, {'A': [True, True]}, True),
(np.any, {'A': [False], 'B': [False]}, False),
(np.all, {'A': [False], 'B': [False]}, False),
(np.any, {'A': [False, False], 'B': [False, True]}, True),
(np.all, {'A': [False, False], 'B': [False, True]}, False),
(np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False),
(np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True),
(np.all, {'A': pd.Series([0, 1], dtype=int)}, False),
(np.any, {'A': pd.Series([0, 1], dtype=int)}, True),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='M8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='m8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
(np.all, {'A': pd.Series([0, 1], dtype='category')}, False),
(np.any, {'A': pd.Series([0, 1], dtype='category')}, True),
(np.all, {'A': pd.Series([1, 2], dtype='category')}, True),
(np.any, {'A': pd.Series([1, 2], dtype='category')}, True),
])
def test_any_all_np_func(self, func, data, expected):
data = DataFrame(data)
result = func(data)
assert isinstance(result, np.bool_)
assert result.item() is expected
result = getattr(DataFrame(data), func.__name__)(axis=None)
assert isinstance(result, np.bool_)
assert result.item() is expected
def test_any_all_object(self):
result = np.all(DataFrame(columns=['a', 'b'])).item()
assert result is True
result = np.any(DataFrame(columns=['a', 'b'])).item()
assert result is False
@pytest.mark.parametrize('method', ['any', 'all'])
def test_any_all_level_axis_none_raises(self, method):
df = DataFrame(
{"A": 1},
index=MultiIndex.from_product([['A', 'B'], ['a', 'b']],
names=['out', 'in'])
)
xpr = "Must specify 'axis' when aggregating by level."
with tm.assert_raises_regex(ValueError, xpr):
getattr(df, method)(axis=None, level='out')
def _check_bool_op(self, name, alternative, frame=None, has_skipna=True,
has_bool_only=False):
if frame is None:
frame = self.frame > 0
frame = DataFrame(frame.values.astype(object), frame.index,
frame.columns)
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
f = getattr(frame, name)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False)
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
pytest.raises(ValueError, f, axis=2)
mixed = self.mixed_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0
getattr(mixed, name)(axis=0)
getattr(mixed, name)(axis=1)
class NonzeroFail(object):
def __nonzero__(self):
raise ValueError
mixed['_nonzero_fail_'] = NonzeroFail()
if has_bool_only:
getattr(mixed, name)(axis=0, bool_only=True)
getattr(mixed, name)(axis=1, bool_only=True)
getattr(frame, name)(axis=0, bool_only=False)
getattr(frame, name)(axis=1, bool_only=False)
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if name == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def test_isin(self):
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
expected = DataFrame(False, df.index, df.columns)
result = df.isin(empty)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with pytest.raises(TypeError):
df.isin('a')
with pytest.raises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
tm.assert_frame_equal(result, expected)
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
df['C'] = list(zip(df['A'], df['B']))
result = df['C'].isin([(1, 'a')])
tm.assert_series_equal(result,
Series([True, False, False], name="C"))
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with pytest.raises(ValueError):
df1.isin(df2)
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with pytest.raises(ValueError):
df1.isin(df2)
df2.columns = ['B', 'B']
with pytest.raises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A', 'A'])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
tm.assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},
index=['a', 'b', 'c', 'd'])
s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected['A'].loc['a'] = True
expected.loc['d'] = True
result = df.isin(s)
tm.assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),
(0, 'b', 'bar'), (0, 'b', 'baz'),
(2, 'a', 'foo'), (2, 'a', 'bar'),
(2, 'c', 'bar'), (2, 'c', 'baz'),
(1, 'b', 'foo'), (1, 'b', 'bar'),
(1, 'c', 'bar'), (1, 'c', 'baz')])
df1 = DataFrame({'A': np.ones(12),
'B': np.zeros(12)}, index=idx)
df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(np.bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=['A', 'B'], index=idx)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
def test_isin_empty_datetimelike(self):
df1_ts = DataFrame({'date':
pd.to_datetime(['2014-01-01', '2014-01-02'])})
df1_td = DataFrame({'date':
[pd.Timedelta(1, 's'), pd.Timedelta(2, 's')]})
df2 = DataFrame({'date': []})
df3 = DataFrame()
expected = DataFrame({'date': [False, False]})
result = df1_ts.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_ts.isin(df3)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df3)
tm.assert_frame_equal(result, expected)
def test_round(self):
df = DataFrame()
tm.assert_frame_equal(df, df.round())
df = DataFrame({'col1': [1.123, 2.123, 3.123],
'col2': [1.234, 2.234, 3.234]})
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(df.round(), expected_rounded)
decimals = 2
expected_rounded = DataFrame({'col1': [1.12, 2.12, 3.12],
'col2': [1.23, 2.23, 3.23]})
tm.assert_frame_equal(df.round(decimals), expected_rounded)
tm.assert_frame_equal(np.round(df, decimals), expected_rounded)
round_list = [1, 2]
with pytest.raises(TypeError):
df.round(round_list)
expected_rounded = DataFrame(
{'col1': [1.1, 2.1, 3.1], 'col2': [1.23, 2.23, 3.23]})
round_dict = {'col1': 1, 'col2': 2}
tm.assert_frame_equal(df.round(round_dict), expected_rounded)
expected_partially_rounded = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
partial_round_dict = {'col2': 1}
tm.assert_frame_equal(df.round(partial_round_dict),
expected_partially_rounded)
wrong_round_dict = {'col3': 2, 'col2': 1}
tm.assert_frame_equal(df.round(wrong_round_dict),
expected_partially_rounded)
non_int_round_dict = {'col1': 1, 'col2': 0.5}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_dict = {'col1': 1, 'col2': 'foo'}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
non_int_round_dict = {'col1': 1, 'col2': [1, 2]}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
negative_round_dict = {'col1': -1, 'col2': -2}
big_df = df * 100
expected_neg_rounded = DataFrame(
{'col1': [110., 210, 310], 'col2': [100., 200, 300]})
tm.assert_frame_equal(big_df.round(negative_round_dict),
expected_neg_rounded)
nan_round_Series = Series({'col1': nan, 'col2': 1})
expected_nan_round = DataFrame({
'col1': [1.123, 2.123, 3.123],
'col2': [1.2, 2.2, 3.2]})
with pytest.raises(TypeError):
df.round(nan_round_Series)
tm.assert_series_equal(df['col1'].round(1), expected_rounded['col1'])
# named columns
# GH 11986
decimals = 2
expected_rounded = DataFrame(
{'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})
df.columns.name = "cols"
expected_rounded.columns.name = "cols"
tm.assert_frame_equal(df.round(decimals), expected_rounded)
# interaction of named columns & series
tm.assert_series_equal(df['col1'].round(decimals),
expected_rounded['col1'])
tm.assert_series_equal(df.round(decimals)['col1'],
expected_rounded['col1'])
def test_numpy_round(self):
# See gh-12600
df = DataFrame([[1.53, 1.36], [0.06, 7.01]])
out = np.round(df, decimals=0)
expected = DataFrame([[2., 1.], [0., 7.]])
tm.assert_frame_equal(out, expected)
msg = "the 'out' parameter is not supported"
with tm.assert_raises_regex(ValueError, msg):
np.round(df, decimals=0, out=df)
def test_round_mixed_type(self):
# GH11885
df = DataFrame({'col1': [1.1, 2.2, 3.3, 4.4],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
round_0 = DataFrame({'col1': [1., 2., 3., 4.],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
tm.assert_frame_equal(df.round(), round_0)
tm.assert_frame_equal(df.round(1), df)
tm.assert_frame_equal(df.round({'col1': 1}), df)
tm.assert_frame_equal(df.round({'col1': 0}), round_0)
tm.assert_frame_equal(df.round({'col1': 0, 'col2': 1}), round_0)
tm.assert_frame_equal(df.round({'col3': 1}), df)
def test_round_issue(self):
# GH11611
df = pd.DataFrame(np.random.random([3, 3]), columns=['A', 'B', 'C'],
index=['first', 'second', 'third'])
dfs = pd.concat((df, df), axis=1)
rounded = dfs.round()
tm.assert_index_equal(rounded.index, dfs.index)
decimals = pd.Series([1, 0, 2], index=['A', 'B', 'A'])
pytest.raises(ValueError, df.round, decimals)
def test_built_in_round(self):
if not compat.PY3:
pytest.skip("build in round cannot be overridden "
"prior to Python 3")
# GH11763
# Here's the test frame we'll be working with
df = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(round(df), expected_rounded)
def test_pct_change(self):
# GH 11150
pnl = DataFrame([np.arange(0, 40, 10), np.arange(0, 40, 10), np.arange(
0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
result = pnl.pct_change(axis=axis, fill_method='pad')
tm.assert_frame_equal(result, expected)
# Clip
def test_clip(self):
median = self.frame.median().median()
original = self.frame.copy()
capped = self.frame.clip_upper(median)
assert not (capped.values > median).any()
floored = self.frame.clip_lower(median)
assert not (floored.values < median).any()
double = self.frame.clip(upper=median, lower=median)
assert not (double.values != median).any()
# Verify that self.frame was not changed inplace
assert (self.frame.values == original.values).all()
def test_inplace_clip(self):
# GH #15388
median = self.frame.median().median()
frame_copy = self.frame.copy()
frame_copy.clip_upper(median, inplace=True)
assert not (frame_copy.values > median).any()
frame_copy = self.frame.copy()
frame_copy.clip_lower(median, inplace=True)
assert not (frame_copy.values < median).any()
frame_copy = self.frame.copy()
frame_copy.clip(upper=median, lower=median, inplace=True)
assert not (frame_copy.values != median).any()
def test_dataframe_clip(self):
# GH #2747
df = DataFrame(np.random.randn(1000, 2))
for lb, ub in [(-1, 1), (1, -1)]:
clipped_df = df.clip(lb, ub)
lb, ub = min(lb, ub), max(ub, lb)
lb_mask = df.values <= lb
ub_mask = df.values >= ub
mask = ~lb_mask & ~ub_mask
assert (clipped_df.values[lb_mask] == lb).all()
assert (clipped_df.values[ub_mask] == ub).all()
assert (clipped_df.values[mask] == df.values[mask]).all()
def test_clip_mixed_numeric(self):
# TODO(jreback)
# clip on mixed integer or floats
# with integer clippers coerces to float
df = DataFrame({'A': [1, 2, 3],
'B': [1., np.nan, 3.]})
result = df.clip(1, 2)
expected = DataFrame({'A': [1, 2, 2.],
'B': [1., np.nan, 2.]})
tm.assert_frame_equal(result, expected, check_like=True)
@pytest.mark.parametrize("inplace", [True, False])
def test_clip_against_series(self, inplace):
# GH #6966
df = DataFrame(np.random.randn(1000, 2))
lb = Series(np.random.randn(1000))
ub = lb + 1
original = df.copy()
clipped_df = df.clip(lb, ub, axis=0, inplace=inplace)
if inplace:
clipped_df = df
for i in range(2):
lb_mask = original.iloc[:, i] <= lb
ub_mask = original.iloc[:, i] >= ub
mask = ~lb_mask & ~ub_mask
result = clipped_df.loc[lb_mask, i]
tm.assert_series_equal(result, lb[lb_mask], check_names=False)
assert result.name == i
result = clipped_df.loc[ub_mask, i]
tm.assert_series_equal(result, ub[ub_mask], check_names=False)
assert result.name == i
tm.assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("lower", [[2, 3, 4], np.asarray([2, 3, 4])])
@pytest.mark.parametrize("axis,res", [
(0, [[2., 2., 3.], [4., 5., 6.], [7., 7., 7.]]),
(1, [[2., 3., 4.], [4., 5., 6.], [5., 6., 7.]])
])
def test_clip_against_list_like(self, inplace, lower, axis, res):
# GH #15390
original = self.simple.copy(deep=True)
result = original.clip(lower=lower, upper=[5, 6, 7],
axis=axis, inplace=inplace)
expected = pd.DataFrame(res,
columns=original.columns,
index=original.index)
if inplace:
result = original
tm.assert_frame_equal(result, expected, check_exact=True)
@pytest.mark.parametrize("axis", [0, 1, None])
def test_clip_against_frame(self, axis):
df = DataFrame(np.random.randn(1000, 2))
lb = DataFrame(np.random.randn(1000, 2))
ub = lb + 1
clipped_df = df.clip(lb, ub, axis=axis)
lb_mask = df <= lb
ub_mask = df >= ub
mask = ~lb_mask & ~ub_mask
tm.assert_frame_equal(clipped_df[lb_mask], lb[lb_mask])
tm.assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])
tm.assert_frame_equal(clipped_df[mask], df[mask])
def test_clip_with_na_args(self):
# GH # 17276
tm.assert_frame_equal(self.frame.clip(np.nan), self.frame)
tm.assert_frame_equal(self.frame.clip(upper=np.nan, lower=np.nan),
self.frame)
# GH #19992
df = DataFrame({'col_0': [1, 2, 3], 'col_1': [4, 5, 6],
'col_2': [7, 8, 9]})
result = df.clip(lower=[4, 5, np.nan], axis=0)
expected = DataFrame({'col_0': [4, 5, np.nan], 'col_1': [4, 5, np.nan],
'col_2': [7, 8, np.nan]})
tm.assert_frame_equal(result, expected)
result = df.clip(lower=[4, 5, np.nan], axis=1)
expected = DataFrame({'col_0': [4, 4, 4], 'col_1': [5, 5, 6],
'col_2': [np.nan, np.nan, np.nan]})
tm.assert_frame_equal(result, expected)
# Matrix-like
def test_dot(self):
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
result = a.dot(b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
# Check alignment
b1 = b.reindex(index=reversed(b.index))
result = a.dot(b)
tm.assert_frame_equal(result, expected)
# Check series argument
result = a.dot(b['one'])
tm.assert_series_equal(result, expected['one'], check_names=False)
assert result.name is None
result = a.dot(b1['one'])
tm.assert_series_equal(result, expected['one'], check_names=False)
assert result.name is None
# can pass correct-length arrays
row = a.iloc[0].values
result = a.dot(row)
exp = a.dot(a.iloc[0])
tm.assert_series_equal(result, exp)
with tm.assert_raises_regex(ValueError,
'Dot product shape mismatch'):
a.dot(row[:-1])
a = np.random.rand(1, 5)
b = np.random.rand(5, 1)
A = DataFrame(a)
# TODO(wesm): unused
B = DataFrame(b) # noqa
# it works
result = A.dot(b)
# unaligned
df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])
with tm.assert_raises_regex(ValueError, 'aligned'):
df.dot(df2)
@pytest.mark.skipif(not PY35,
reason='matmul supported for Python>=3.5')
@pytest.mark.xfail(
_np_version_under1p12,
reason="unpredictable return types under numpy < 1.12")
def test_matmul(self):
# matmul test is for GH #10259
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
# DataFrame @ DataFrame
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# DataFrame @ Series
result = operator.matmul(a, b.one)
expected = Series(np.dot(a.values, b.one.values),
index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
# np.array @ DataFrame
result = operator.matmul(a.values, b)
expected = np.dot(a.values, b.values)
tm.assert_almost_equal(result, expected)
# nested list @ DataFrame (__rmatmul__)
result = operator.matmul(a.values.tolist(), b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_almost_equal(result.values, expected.values)
# mixed dtype DataFrame @ DataFrame
a['q'] = a.q.round().astype(int)
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# different dtypes DataFrame @ DataFrame
a = a.astype(int)
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# unaligned
df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])
with tm.assert_raises_regex(ValueError, 'aligned'):
operator.matmul(df, df2)
@pytest.fixture
def df_duplicates():
return pd.DataFrame({'a': [1, 2, 3, 4, 4],
'b': [1, 1, 1, 1, 1],
'c': [0, 1, 2, 5, 4]},
index=[0, 0, 1, 1, 1])
@pytest.fixture
def df_strings():
return pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
@pytest.fixture
def df_main_dtypes():
return pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
class TestNLargestNSmallest(object):
dtype_error_msg_template = ("Column {column!r} has dtype {dtype}, cannot "
"use method {method!r} with this dtype")
# ----------------------------------------------------------------------
# Top / bottom
@pytest.mark.parametrize('order', [
['a'],
['c'],
['a', 'b'],
['a', 'c'],
['b', 'a'],
['b', 'c'],
['a', 'b', 'c'],
['c', 'a', 'b'],
['c', 'b', 'a'],
['b', 'c', 'a'],
['b', 'a', 'c'],
# dups!
['b', 'c', 'c']])
@pytest.mark.parametrize('n', range(1, 11))
def test_n(self, df_strings, nselect_method, n, order):
# GH10393
df = df_strings
if 'b' in order:
error_msg = self.dtype_error_msg_template.format(
column='b', method=nselect_method, dtype='object')
with tm.assert_raises_regex(TypeError, error_msg):
getattr(df, nselect_method)(n, order)
else:
ascending = nselect_method == 'nsmallest'
result = getattr(df, nselect_method)(n, order)
expected = df.sort_values(order, ascending=ascending).head(n)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('columns', [
('group', 'category_string'), ('group', 'string')])
def test_n_error(self, df_main_dtypes, nselect_method, columns):
df = df_main_dtypes
col = columns[1]
error_msg = self.dtype_error_msg_template.format(
column=col, method=nselect_method, dtype=df[col].dtype)
# escape some characters that may be in the repr
error_msg = (error_msg.replace('(', '\\(').replace(")", "\\)")
.replace("[", "\\[").replace("]", "\\]"))
with tm.assert_raises_regex(TypeError, error_msg):
getattr(df, nselect_method)(2, columns)
def test_n_all_dtypes(self, df_main_dtypes):
df = df_main_dtypes
df.nsmallest(2, list(set(df) - {'category_string', 'string'}))
df.nlargest(2, list(set(df) - {'category_string', 'string'}))
def test_n_identical_values(self):
# GH15297
df = pd.DataFrame({'a': [1] * 5, 'b': [1, 2, 3, 4, 5]})
result = df.nlargest(3, 'a')
expected = pd.DataFrame(
{'a': [1] * 3, 'b': [1, 2, 3]}, index=[0, 1, 2]
)
tm.assert_frame_equal(result, expected)
result = df.nsmallest(3, 'a')
expected = pd.DataFrame({'a': [1] * 3, 'b': [1, 2, 3]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('order', [
['a', 'b', 'c'],
['c', 'b', 'a'],
['a'],
['b'],
['a', 'b'],
['c', 'b']])
@pytest.mark.parametrize('n', range(1, 6))
def test_n_duplicate_index(self, df_duplicates, n, order):
# GH 13412
df = df_duplicates
result = df.nsmallest(n, order)
expected = df.sort_values(order).head(n)
tm.assert_frame_equal(result, expected)
result = df.nlargest(n, order)
expected = df.sort_values(order, ascending=False).head(n)
tm.assert_frame_equal(result, expected)
def test_duplicate_keep_all_ties(self):
# see gh-16818
df = pd.DataFrame({'a': [5, 4, 4, 2, 3, 3, 3, 3],
'b': [10, 9, 8, 7, 5, 50, 10, 20]})
result = df.nlargest(4, 'a', keep='all')
expected = pd.DataFrame({'a': {0: 5, 1: 4, 2: 4, 4: 3,
5: 3, 6: 3, 7: 3},
'b': {0: 10, 1: 9, 2: 8, 4: 5,
5: 50, 6: 10, 7: 20}})
tm.assert_frame_equal(result, expected)
result = df.nsmallest(2, 'a', keep='all')
expected = pd.DataFrame({'a': {3: 2, 4: 3, 5: 3, 6: 3, 7: 3},
'b': {3: 7, 4: 5, 5: 50, 6: 10, 7: 20}})
tm.assert_frame_equal(result, expected)
def test_series_broadcasting(self):
# smoke test for numpy warnings
# GH 16378, GH 16306
df = DataFrame([1.0, 1.0, 1.0])
df_nan = DataFrame({'A': [np.nan, 2.0, np.nan]})
s = Series([1, 1, 1])
s_nan = Series([np.nan, np.nan, 1])
with tm.assert_produces_warning(None):
df_nan.clip_lower(s, axis=0)
for op in ['lt', 'le', 'gt', 'ge', 'eq', 'ne']:
getattr(df, op)(s_nan, axis=0)
def test_series_nat_conversion(self):
# GH 18521
# Check rank does not mutate DataFrame
df = DataFrame(np.random.randn(10, 3), dtype='float64')
expected = df.copy()
df.rank()
result = df
tm.assert_frame_equal(result, expected)
| true | true |
f72cf916fbd8ea467321863ca89fb57083e4ec13 | 36,218 | py | Python | pydca/meanfield_dca/meanfield_dca.py | MehariBZ/pydca | 034e0707a13e6e43da1343630047d47caeca896e | [
"MIT"
] | 1 | 2021-03-28T01:57:38.000Z | 2021-03-28T01:57:38.000Z | pydca/meanfield_dca/meanfield_dca.py | MehariBZ/pydca | 034e0707a13e6e43da1343630047d47caeca896e | [
"MIT"
] | null | null | null | pydca/meanfield_dca/meanfield_dca.py | MehariBZ/pydca | 034e0707a13e6e43da1343630047d47caeca896e | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division
from . import msa_numerics
from pydca.fasta_reader import fasta_reader
import logging
import numpy as np
"""This module implements Direc Coupling Analysis (DCA) of residue coevolution
for protein and RNA sequences using the mean-field algorithm. The final
coevolution score is computed from the direct probability. The general steps
carried out are outlined as follows
For a detailed information about Direct Coupling Analysis, one can refer to the
following articles:
a) Identification of direct residue contacts in protein-protein interaction
by message-passing
Martin Weigt, Robert A White, Hendrik Szurmant, James A Hoch, Terence Hwa
Journal: Proceedings of the National Academy of Sciences
Volume: 106
Issue: 1
Pages: 67-72
b) Direct-coupling analysis of residue coevolution captures native contacts
across many protein families
Faruck Morcos, Andrea Pagnani, Bryan Lunt, Arianna Bertolino,
Debora S Marks, Chris Sander, Riccardo Zecchina, Jose N Onuchic,
Terence Hwa, Martin Weigt
Journal: Proceedings of the National Academy of Sciences
Volume: 108
Issue: 49
Pages: E1293-E1301
Author(s) Mehari B. Zerihun, Alexander Schug
"""
logger = logging.getLogger(__name__)
class MeanFieldDCAException(Exception):
"""
"""
class MeanFieldDCA:
"""MeanFieldDCA class. Instances of this class are used to carry out Direct
Coupling Analysis (DCA) of residue coevolution using the mean-field DCA
algorithm.
"""
def __init__(self, msa_file_name, biomolecule, pseudocount=None, seqid=None):
"""MeanFieldDCA object class initializer
Parameters
----------
msa_file : str
Name of the FASTA formatted file containing alignmnet
biomolecule : str
Type of biomolecule (must be protein or RNA, lower or
upper case)
pseudocount : float
Parameter for regularizing data before DCA analysis.
Default value is 0.5
seqid : float
This parameter's value measure the maximum
similarity two or more sequences can have so that they can be
considered distinct, or lumped together otherwise.
Returns
-------
None : None
"""
self.__pseudocount = pseudocount if pseudocount is not None else 0.5
self.__seqid = seqid if seqid is not None else 0.8
#Validate the value of pseudo count incase user provide an invalid one
if self.__pseudocount >= 1.0 or self.__pseudocount < 0:
logger.error('\n\tValue of relative pseudo-count must be'
' between 0 and 1.0. Typical value is 0.5')
raise ValueError
#Validate the value of sequence identity
if self.__seqid > 1.0 or self.__seqid <= 0.0:
logger.error('\n\tValue of sequence-identity must'
' not exceed 1 nor less than 0. Typical values are 0.7, 0.8., 0.9')
raise ValueError
biomolecule = biomolecule.strip().upper()
self.__msa_file_name = msa_file_name
if biomolecule=='RNA':
self.__num_site_states = 5
elif biomolecule=='PROTEIN':
self.__num_site_states = 21
else:
logger.error(
'\n\tUnknown biomolecule ... must be protein (PROTEIN) or rna (RNA)',
)
raise ValueError
self.__sequences = fasta_reader.get_alignment_int_form(
self.__msa_file_name,
biomolecule=biomolecule,
)
self.__num_sequences = len(self.__sequences)
self.__sequences_len = len(self.__sequences[0])
self.__biomolecule = biomolecule
if self.__seqid < 1.0:
self.__sequences_weight = self.compute_sequences_weight()
else :
# assign each sequence a weight of one
self.__sequences_weight = np.ones((self.__num_sequences,), dtype = np.float64)
self.__effective_num_sequences = np.sum(self.__sequences_weight)
#sometimes users might enter the wrong biomolecule type
#verify biomolecule type
mf_dca_info = """\n\tCreated a MeanFieldDCA object with the following attributes
\tbiomolecule: {}
\ttotal states at sites: {}
\tpseudocount: {}
\tsequence identity: {}
\talignment length: {}
\ttotal number of unique sequences (excluding redundant sequences with 100 percent similarity): {}
\teffective number of sequences (with sequence identity {}): {}
""".format(
biomolecule,
self.__num_site_states,
self.__pseudocount,
self.__seqid,
self.__sequences_len,
self.__num_sequences,
self.__seqid,
self.__effective_num_sequences,
)
logger.info(mf_dca_info)
return None
def __str__(self):
"""Describes the MeanFieldDCA object.
Parameters
----------
self: MeanFieldDCA
Instance of MeanFieldDCA class
Returns
-------
description : str
A representation about objects created from
the MeanFieldDCA class.
"""
description = '<instance of MeanFieldDCA>'
return description
def __call__(self, pseudocount = 0.5 , seqid = 0.8):
"""Resets the value of pseudo count and sequence identity through
the instance.
Parameters
----------
self : MeanFieldDCA
MeanFieldDCA instance.
pseudocount : float
The value of the raltive pseudo count. It must be between
0 and 1. Default value is 0.5.
seqid : float
Threshold sequence similarity for computing sequences weight.
This parameter must be between 0 and 1. Typical values are
0.7, 0.8, 0.9 or something in between these numbers.
Returns
-------
None : None
"""
#warn the user that paramertes are being reset
self.__pseudocount = pseudocount
self.__seqid = seqid
logger.warning('\n\tYou have changed one of the parameters (pseudo count or sequence identity)'
'\n\tfrom their default values'
'\n\tpseudocount: {} \n\tsequence_identity: {}'.format(
self.__pseudocount, self.__seqid,
)
)
return None
@property
def alignment(self):
"""Alignment data getter.
Parameters
----------
self : MeanFieldDCA
Instance of MeanFieldDCA class
Returns
--------
self.__sequences : list
A 2d list of alignment sequences in integer representation.
"""
return self.__sequences
@property
def biomolecule(self):
"""Sequence type getter
Parameters
----------
Self : MeanFieldDCA
Instance of MeanFieldDCA class
Returns
-------
self.__biomolecule : str
Biomolecule type (protein or RNA)
"""
return self.__biomolecule
@property
def sequences_len(self):
"""Sequences length getter.
Parameters
---------
self : MeanFieldDCA
Instance of MeanFieldDCA class
Returns
-------
self.__sequences_len : int
Sequences length in alignment data
"""
return self.__sequences_len
@property
def num_site_states(self):
"""Get number of states for an MSA (eg. 5 for RNAs and 21 for proteins)
Parameters
----------
self : MeanFieldDCA
Instance of MeanFieldDCA class
Returns
-------
self.__num_site_states : int
Maximum number of states in a sequence site
"""
return self.__num_site_states
@property
def num_sequences(self):
"""Getter for the number of sequences read from alignment file
Parameters
----------
self : MeanFieldDCA
Instance of MeanFieldDCA class
Returns
-------
self.__num_sequences : int
The total number of sequences in alignment data
"""
return self.__num_sequences
@property
def sequence_identity(self):
"""Getter for the value of sequence indentity.
Parameters
----------
self : MeanFieldDCA
Instance of MeanFieldDCA class
Returns
-------
self.__seqid : float
Cut-off value for sequences similarity above which sequences are
considered identical
"""
return self.__seqid
@property
def pseudocount(self):
"""Getter for value of pseudo count
Parameters
----------
self : MeanFieldDCA
Instance of MeanFieldDCA class
Returns
-------
self.__pseudocount : float
Value of pseudo count usef for regularization
"""
return self.__pseudocount
@property
def sequences_weight(self):
"""Getter for the weight of each sequences in alignment data.
Parameters
----------
self : MeanFieldDCA
Instance of MeanFieldDCA class
Returns
-------
self.__sequences_weight : np.array(dtype=np.float64)
A 1d numpy array containing the weight of each sequences in the
alignment.
"""
return self.__sequences_weight
@property
def effective_num_sequences(self):
"""Getter for the effective number of sequences.
Parameters
----------
self : MeanFieldDCA
Instance of MeanFieldDCA class
Returns
-------
np.sum(self.__sequences_weight) : float
The sum of each sequence's weight.
"""
return np.sum(self.__sequences_weight)
def compute_sequences_weight(self):
"""Computes the weight of each sequences in the alignment. If the
sequences identity is one, each sequences has equal weight and this is
the maximum weight a sequence in the alignment data can have. Whenever
the sequence identity is set a value less than one, sequences that have
similarity beyond the sequence identity are lumped together. If there are
m similar sequences, their corresponding weight is the reciprocal.
Parameters
----------
self : MeanFieldDCA
The instance
Returns
-------
weights : np.array
A 1d numpy array of size self.__num_sequences containing the
weight of each sequence.
"""
logger.info('\n\tComputing sequences weights')
weights = msa_numerics.compute_sequences_weight(
alignment_data= np.array(self.__sequences, dtype=np.int32),
seqid = self.__seqid,
)
return weights
def get_single_site_freqs(self):
"""Computes single site frequency counts.
Parameters
----------
self : MeanFieldDCA
The instance.
Returns
-------
single_site_freqs : np.array
A 2d numpy array of shape (L, q) containing the frequency
count of residues at sequence sites. L is the length of
sequences in the alignment, and q is the maximum possible
states a site can accommodate. The last state (q) of each
site represents a gap.
"""
logger.info('\n\tComputing single site frequencies')
single_site_freqs = msa_numerics.compute_single_site_freqs(
alignment_data = np.array(self.__sequences),
num_site_states = self.__num_site_states,
seqs_weight = self.__sequences_weight,
)
return single_site_freqs
def get_reg_single_site_freqs(self):
"""Regularizes single site frequencies.
Parameters
----------
self : MeanFieldDCA
The instance
Returns
-------
reg_single_site_freqs : np.array
A 2d numpy array of shape (L, q) containing regularized single
site frequencies. L and q are the sequences length and maximum
number of site-states respectively.
"""
single_site_freqs = self.get_single_site_freqs()
logger.info('\n\tRegularizing single site frequencies')
reg_single_site_freqs = msa_numerics.get_reg_single_site_freqs(
single_site_freqs = single_site_freqs,
seqs_len = self.__sequences_len,
num_site_states = self.__num_site_states,
pseudocount = self.__pseudocount,
)
return reg_single_site_freqs
def get_pair_site_freqs(self):
"""Computes pair site frequencies
Parameters
----------
self : MeanFieldDCA
The instance.
Returns
-------
pair_site_freqs : np.array
A 2d numpy array of pair site frequncies. It has a shape of
(N, q-1, q-1) where N is the number of unique site pairs and q
is the maximum number of states a site can accommodate. Note
site pairig is performed in the following order: (0, 0), (0, 1),
..., (0, L-1), ...(L-1, L) where L is the sequences length. This
ordering is critical that any computation involding pair site
frequencies must be implemented in the righ order of pairs.
"""
logger.info('\n\tComputing pair site frequencies')
pair_site_freqs = msa_numerics.compute_pair_site_freqs(
alignment_data = np.array(self.__sequences),
num_site_states = self.__num_site_states,
seqs_weight = self.__sequences_weight,
)
return pair_site_freqs
def get_reg_pair_site_freqs(self):
"""Regularizes pair site frequencies
Parameters
----------
self : MeanFieldDCA
The instance.
Returns
-------
reg_pair_site_freqs : np.array
A 3d numpy array of shape (N, q-1, q-1) containing regularized
pair site frequencies. N is the number of unique site pairs and
q is the maximum number of states in a sequence site. The
ordering of pairs follows numbering like (unregularized) pair
site frequencies.
"""
pair_site_freqs = self.get_pair_site_freqs()
logger.info('\n\tRegularizing pair site frequencies')
reg_pair_site_freqs = msa_numerics.get_reg_pair_site_freqs(
pair_site_freqs = pair_site_freqs,
seqs_len = self.__sequences_len,
num_site_states = self.__num_site_states,
pseudocount = self.__pseudocount,
)
return reg_pair_site_freqs
def construct_corr_mat(self, reg_fi, reg_fij):
"""Constructs the correlation matrix from regularized frequencies.
Parameters
----------
self : MeanFieldDCA
The instance.
reg_fi : np.array
Regularized single site frequencies.
reg_fij : np.array
Regularized pair site frequncies.
Returns
-------
corr_mat : np.array
A 2d numpy array of (N, N) where N = L*(q-1) where L and q are
the length of sequences and number of states in a site
respectively.
"""
logger.info('\n\tConstructing the correlation matrix')
corr_mat = msa_numerics.construct_corr_mat(
reg_fi = reg_fi,
reg_fij = reg_fij,
seqs_len = self.__sequences_len,
num_site_states = self.__num_site_states,
)
return corr_mat
def compute_couplings(self, corr_mat):
"""Computing couplings by inverting the matrix of correlations. Note that
the couplings are the negative of the inverse of the correlation matrix.
Parameters
----------
self : MeanFieldDCA
The instance.
corr_mat : np.array
The correlation matrix formed from regularized pair site and
single site frequencies.
Returns
-------
couplings : np.array
A 2d numpy array of the same shape as the correlation matrix.
"""
logger.info('\n\tComputing couplings')
try:
couplings = msa_numerics.compute_couplings(corr_mat = corr_mat)
except Exception as e:
logger.error('\n\tCorrelation {}\n\tYou set the pseudocount {}.'
' You might need to increase it.'.format(e, self.__pseudocount)
)
raise
# capture couplings to avoid recomputing
self.__couplings = couplings
logger.info('\n\tMaximum and minimum couplings: {}, {}'.format(
np.max(couplings), np.min(couplings)))
return couplings
def compute_two_site_model_fields(self, couplings, reg_fi):
"""Computes two site model fields by fitting the marginal probabilities
of the direct probability with the empirical data obtained from the
alignment
Parameters
----------
self : MeanFieldDCA
The instance.
couplings : np.array
A 2d numpy array of couplings computed from the correlation matrix.
reg_fi : np.array
A 3d numpy array of regularized single site frequencies.
Returns
-------
two_site_model_fields : np.array
A 3d numpy array of shape (N, q, q) where N is the total number
of unique site pairs and q is the maximum number of states a site
can accommodate. The ordering of site pairs is the same as those
in pair site frequencies.
"""
logger.info('\n\tComputing two site model fields')
two_site_model_fields = msa_numerics.compute_two_site_model_fields(
couplings = couplings,
reg_fi = reg_fi,
seqs_len = self.__sequences_len,
num_site_states = self.__num_site_states,
)
return two_site_model_fields
def compute_fields(self, couplings=None):
"""Computes the local fields of the global probability of sequence space.
Parameters
----------
self : MeanFieldDCA
An instance of MeanFieldDCA class
couplings : np.array
A 2d numpy array of the couplings. If not give, will be computed.
Returns
-------
fields : dict
A dictionary of fields whose keys are sites in MSA and whose values
are arrays of fields per site.
"""
if couplings is None:
reg_fi = self.get_reg_single_site_freqs()
reg_fij = self.get_reg_pair_site_freqs()
corr_mat = self.construct_corr_mat(reg_fi, reg_fij)
couplings = self.compute_couplings(corr_mat)
else:
reg_fi = self.get_reg_single_site_freqs()
q = self.__num_site_states
fields = dict()
logger.info('\n\tComputing local fields of the global probability function')
for i in range(self.__sequences_len):
pi = reg_fi[i]
piq = pi[-1]
sum = np.zeros((q-1, 1))
row_start = i * (q - 1)
row_end = row_start + (q - 1)
for j in range(self.__sequences_len):
if j != i:
pj = reg_fi[j]
col_start = j * (q - 1)
col_end = col_start + (q - 1)
couplings_ij = couplings[row_start:row_end, col_start:col_end]
pj_col_vec = np.reshape(pj[:-1], (q-1, 1))
sum += np.dot(couplings_ij, pj_col_vec)
fields_i = np.log(pi[:-1]/piq) - np.reshape(sum, (q-1, ))
fields[i] = fields_i
return fields
def shift_couplings(self, couplings_ij):
"""Shifts the couplings value.
Parameters
----------
self : MeanFieldDCA
An instance of MeanFieldDCA class
couplings_ij : np.array
1d array of couplings for site pair (i, j)
Returns
-------
shifted_couplings_ij : np.array
A 2d array of the couplings for site pair (i, j)
"""
qm1 = self.__num_site_states - 1
couplings_ij = np.reshape(couplings_ij, (qm1,qm1))
avx = np.mean(couplings_ij, axis=1)
avx = np.reshape(avx, (qm1, 1))
avy = np.mean(couplings_ij, axis=0)
avy = np.reshape(avy, (1, qm1))
av = np.mean(couplings_ij)
couplings_ij = couplings_ij - avx - avy + av
return couplings_ij
def compute_params(self, seqbackmapper=None, ranked_by=None, linear_dist=None, num_site_pairs=None):
"""Computes fields and couplings with the couplings ranked by DCA score.
Parameters
----------
self : MeanFieldDCA
An instanc of MeanFieldDCA class
seqbackmapper : SequenceBackmapper
An instance of SequenceBackmapper class
ranked_by : str
DCA score type usef to rank the couplings by their site pairs.
By default they are ranked by the Frobenius Norm of couplings with
average product correction.
linear_dist : int
Minimum separation beteween site pairs (i, j).
num_site_pairs : int
Number of site pairs whose couplings are to be otained.
Returns
-------
fields, couplings : tuple
A tuple of lists of fields and couplings.
"""
if ranked_by is None: ranked_by = 'fn_apc'
if linear_dist is None: linear_dist = 4
RANKING_METHODS = ('FN', 'FN_APC', 'DI', 'DI_APC')
ranked_by = ranked_by.strip().upper()
if ranked_by not in RANKING_METHODS:
logger.error('\n\tInvalid ranking criterion {}.\nChoose from {}'.format(ranked_by, RANKING_METHODS))
raise MeanFieldDCAException
if ranked_by == 'FN': dca_scores = self.compute_sorted_FN(seqbackmapper=seqbackmapper)
if ranked_by == 'FN_APC': dca_scores = self.compute_sorted_FN_APC(seqbackmapper=seqbackmapper)
if ranked_by == 'DI': dca_scores = self.compute_sorted_DI(seqbackmapper=seqbackmapper)
if ranked_by == 'DI_APC': dca_scores = self.compute_sorted_DI_APC(seqbackmapper=seqbackmapper)
fields = self.compute_fields(couplings=self.__couplings)
qm1 = self.__num_site_states - 1
if seqbackmapper is not None:
# mapping_dict has keys from MSA sites and values from refseq sites
# we need to reverse this mapping as the fields and couplings are from MSA sites
mapping_dict = {
value : key for key, value in self.__refseq_mapping_dict.items()
}
else:
mapping_dict = {
i : i for i in range(self.__sequences_len)
}
# set default number of site pairs whose couplings are to be extracted
if num_site_pairs is None :
num_site_pairs = len(seqbackmapper.ref_sequence) if seqbackmapper is not None else len(mapping_dict.keys())
# we need only the fields corresponding to mapped sites
fields_mapped = list()
logger.info('\n\tExtracting fields')
for i in mapping_dict.keys():
site_in_msa = mapping_dict[i]
fields_im = fields[site_in_msa]
site_fields = i, fields_im
fields_mapped.append(site_fields)
# extract couplings
logger.info('\n\tExtracting couplings for top {} site pairs (i, j) with |i - j| > {} and ranked by {}'.format(
num_site_pairs, linear_dist, ranked_by)
)
couplings_ranked_by_dca_score = list()
count_pairs = 0
for pair, score in dca_scores:
site_1_in_refseq, site_2_in_refseq = pair[0], pair[1]
if abs(site_1_in_refseq - site_2_in_refseq) > linear_dist:
count_pairs += 1
if count_pairs > num_site_pairs: break
i, j = mapping_dict[site_1_in_refseq], mapping_dict[site_2_in_refseq]
if(i > j):
logger.error('\n\tInvalid site pair. Site pair (i, j) should be ordered in i < j')
raise MeanFieldDCAException
row_start = i * qm1
row_end = row_start + qm1
column_start = j * qm1
column_end = column_start + qm1
couplings_ij = self.__couplings[row_start:row_end, column_start:column_end]
couplings_ij = self.shift_couplings(couplings_ij) # now couplings_ij is a 2d numpy array
couplings_ij = np.reshape(couplings_ij, (qm1*qm1,))
pair_couplings_ij = pair, couplings_ij
couplings_ranked_by_dca_score.append(pair_couplings_ij)
if count_pairs < num_site_pairs:
logger.warning('\n\tObtained couplings for only {} ranked site pairs.'
'\n\tThis is the maximum number of site paris we can obtain under '
'the given criteria'.format(count_pairs)
)
return tuple(fields_mapped), tuple(couplings_ranked_by_dca_score)
def get_mapped_site_pairs_dca_scores(self, sorted_dca_scores, seqbackmapper):
"""Filters mapped site pairs with a reference sequence.
Parameters
-----------
self : MeanFieldDCA
An instance of MeanFieldDCA class
sorted_dca_scores : tuple of tuples
A tuple of tuples of site-pair and DCA score sorted by DCA scores
in reverse order.
seqbackmapper : SequenceBackmapper
An instance of SequenceBackmapper class
Returns
-------
sorted_scores_mapped : tuple
A tuple of tuples of site pairs and dca score
"""
mapping_dict = seqbackmapper.map_to_reference_sequence()
# Add attribute __reseq_mapping_dict
self.__refseq_mapping_dict = mapping_dict
sorted_scores_mapped = list()
num_mapped_pairs = 0
for pair, score in sorted_dca_scores:
try:
mapped_pair = mapping_dict[pair[0]], mapping_dict[pair[1]]
except KeyError:
pass
else:
current_pair_score = mapped_pair, score
sorted_scores_mapped.append(current_pair_score)
num_mapped_pairs += 1
# sort mapped pairs in case they were not
sorted_scores_mapped = sorted(sorted_scores_mapped, key = lambda k : k[1], reverse=True)
logger.info('\n\tTotal number of mapped sites: {}'.format(num_mapped_pairs))
return tuple(sorted_scores_mapped)
def get_site_pair_di_score(self):
"""Obtains computed direct information (DI) scores from backend and
puts them a list of tuples of in (site-pair, score) form.
Parameters
----------
self : MeanFieldDCA
The instance.
Returns
-------
site_pair_di_score : list
A list of tuples containing site pairs and DCA score, i.e., the
list [((i, j), score), ...] for all unique ite pairs (i, j)
such that j > i.
"""
reg_fi = self.get_reg_single_site_freqs()
reg_fij = self.get_reg_pair_site_freqs()
corr_mat = self.construct_corr_mat(reg_fi, reg_fij)
couplings = self.compute_couplings(corr_mat)
fields_ij = self.compute_two_site_model_fields(couplings, reg_fi)
logger.info('\n\tComputing direct information')
unsorted_DI = msa_numerics.compute_direct_info(
couplings = couplings,
fields_ij = fields_ij,
reg_fi = reg_fi,
seqs_len = self.__sequences_len,
num_site_states = self.__num_site_states,
)
site_pair_di_score= dict()
pair_counter = 0
for i in range(self.__sequences_len - 1):
for j in range(i + 1, self.__sequences_len):
site_pair = (i , j)
site_pair_di_score[site_pair] = unsorted_DI[pair_counter]
pair_counter += 1
return site_pair_di_score
def compute_sorted_DI(self, seqbackmapper=None):
"""Computes direct informations for each pair of sites and sorts them in
descending order of DCA score.
Parameters
----------
self : MeanFieldDCA
The instance.
seqbackmapper : SequenceBackmapper
An instance of SequenceBackmapper class.
Returns
-------
sorted_DI : list
A list of tuples containing site pairs and DCA score, i.e., the
contents of sorted_DI are [((i, j), score), ...] for all unique
site pairs (i, j) such that j > i.
"""
unsorted_DI = self.get_site_pair_di_score()
sorted_DI = sorted(unsorted_DI.items(), key = lambda k : k[1], reverse=True)
if seqbackmapper is not None:
sorted_DI = self.get_mapped_site_pairs_dca_scores(sorted_DI, seqbackmapper)
return sorted_DI
def compute_sorted_DI_APC(self, seqbackmapper=None):
"""Computes the average DI score for every site.
Parameters
----------
self : MeanFieldDCA
An instance of MeanFieldDCA class
seqbackmapper : SequenceBackmapper
An instance of SequenceBackmapper class.
Returns
-------
sorted_DI_APC : list
A list of tuples containing site pairs and DCA score, i.e., the
contents of sorted_DI are [((i, j), score), ...] for all unique
site pairs (i, j) such that j > i. These DI scores are average
product corrected.
"""
sorted_DI = self.compute_sorted_DI() # we must not supply seqbackmapper at this point.
# the backmapping is done at the end of APC step
logger.info('\n\tPerforming average product correction (APC) of DI scores')
# compute the average score of each site
av_score_sites = list()
N = self.__sequences_len
for i in range(N):
i_scores = [score for pair, score in sorted_DI if i in pair]
assert len(i_scores) == N - 1
i_scores_sum = sum(i_scores)
i_scores_ave = i_scores_sum/float(N - 1)
av_score_sites.append(i_scores_ave)
# compute average product corrected DI
av_all_scores = sum(av_score_sites)/float(N)
sorted_DI_APC = list()
for pair, score in sorted_DI:
i, j = pair
score_apc = score - av_score_sites[i] * (av_score_sites[j]/av_all_scores)
sorted_DI_APC.append((pair, score_apc))
# sort the scores as doing APC may have disrupted the ordering
sorted_DI_APC = sorted(sorted_DI_APC, key = lambda k : k[1], reverse=True)
# Now we must do backmapping if seqbackmapper is provided.
if seqbackmapper is not None:
sorted_DI_APC = self.get_mapped_site_pairs_dca_scores(sorted_DI_APC, seqbackmapper)
return sorted_DI_APC
def compute_sorted_FN(self, seqbackmapper=None):
"""Computes the Frobenius norm of couplings.
Parameters
----------
self : MeanFieldDCA
An instance of MeanFieldDCA class.
seqbackmapper : SequenceBackmapper
An instance of SequenceBackmapper class.
Returns
-------
fn_sorted : list
A list of tuples containing site pairs and DCA score, i.e., the
list [((i, j), score), ...] for all unique
site pairs (i, j) such that j > i.
"""
reg_fi = self.get_reg_single_site_freqs()
reg_fij = self.get_reg_pair_site_freqs()
corr_mat = self.construct_corr_mat(reg_fi, reg_fij)
couplings = self.compute_couplings(corr_mat)
logger.info('\n\tComputing Frobenius norm of couplings')
num_sites = self.__sequences_len
q = self.__num_site_states
frobenius_norm = list()
for i in range(num_sites):
row_start = i * (q - 1)
row_end = row_start + (q - 1)
for j in range(i + 1, num_sites):
site_pair = (i, j)
col_start = j * (q - 1)
col_end = col_start + (q - 1)
cij = couplings[row_start:row_end, col_start:col_end]
cij_mean_1 = np.reshape(np.mean(cij, axis=0), (1, q-1))
cij_mean_2 = np.reshape(np.mean(cij, axis=1), (q-1, 1))
cij_mean = np.mean(cij)
cij_new = cij - cij_mean_1 - cij_mean_2 + cij_mean
fn_ij = np.sqrt(np.sum(cij_new * cij_new))
frobenius_norm.append((site_pair, fn_ij))
fn_sorted = sorted(frobenius_norm, key = lambda x : x[1], reverse=True)
if seqbackmapper is not None:
fn_sorted = self.get_mapped_site_pairs_dca_scores(fn_sorted, seqbackmapper)
return fn_sorted
def compute_sorted_FN_APC(self, seqbackmapper = None):
"""Performs average product correction (APC) on DCA scores
Parameters
----------
self : MeanFieldDCA
An instance of MeanFieldDCA class.
seqbackmapper : SequenceBackmapper
An instance of SequenceBackmapper class.
Returns
-------
sorted_FN_APC : list
A list of tuples containing site pairs and DCA score, i.e., the
list [((i, j), score), ...] for all unique site pairs (i, j)
such that j > i. The DCA scores are average product corrected.
"""
raw_FN = self.compute_sorted_FN() # Must not supply seqbackmapper at this stage.
logger.info('\n\tPerforming average product correction (APC) to Frobenius'
' norm of couplings.'
)
# compute the average score of each site
av_score_sites = list()
N = self.__sequences_len
for i in range(N):
i_scores = [score for pair, score in raw_FN if i in pair]
assert len(i_scores) == N - 1
i_scores_sum = sum(i_scores)
i_scores_ave = i_scores_sum/float(N - 1)
av_score_sites.append(i_scores_ave)
# compute average product corrected DI
av_all_scores = sum(av_score_sites)/float(N)
sorted_FN_APC = list()
for pair, score in raw_FN:
i, j = pair
score_apc = score - av_score_sites[i] * (av_score_sites[j]/av_all_scores)
sorted_FN_APC.append((pair, score_apc))
sorted_FN_APC = sorted(sorted_FN_APC, key=lambda x : x[1], reverse=True)
# Must do backmapping is sebackmapper is not None
if seqbackmapper is not None:
sorted_FN_APC = self.get_mapped_site_pairs_dca_scores(sorted_FN_APC, seqbackmapper)
return sorted_FN_APC
if __name__ == '__main__':
"""
"""
| 36.881874 | 120 | 0.585344 | from __future__ import absolute_import, division
from . import msa_numerics
from pydca.fasta_reader import fasta_reader
import logging
import numpy as np
logger = logging.getLogger(__name__)
class MeanFieldDCAException(Exception):
class MeanFieldDCA:
def __init__(self, msa_file_name, biomolecule, pseudocount=None, seqid=None):
self.__pseudocount = pseudocount if pseudocount is not None else 0.5
self.__seqid = seqid if seqid is not None else 0.8
if self.__pseudocount >= 1.0 or self.__pseudocount < 0:
logger.error('\n\tValue of relative pseudo-count must be'
' between 0 and 1.0. Typical value is 0.5')
raise ValueError
if self.__seqid > 1.0 or self.__seqid <= 0.0:
logger.error('\n\tValue of sequence-identity must'
' not exceed 1 nor less than 0. Typical values are 0.7, 0.8., 0.9')
raise ValueError
biomolecule = biomolecule.strip().upper()
self.__msa_file_name = msa_file_name
if biomolecule=='RNA':
self.__num_site_states = 5
elif biomolecule=='PROTEIN':
self.__num_site_states = 21
else:
logger.error(
'\n\tUnknown biomolecule ... must be protein (PROTEIN) or rna (RNA)',
)
raise ValueError
self.__sequences = fasta_reader.get_alignment_int_form(
self.__msa_file_name,
biomolecule=biomolecule,
)
self.__num_sequences = len(self.__sequences)
self.__sequences_len = len(self.__sequences[0])
self.__biomolecule = biomolecule
if self.__seqid < 1.0:
self.__sequences_weight = self.compute_sequences_weight()
else :
self.__sequences_weight = np.ones((self.__num_sequences,), dtype = np.float64)
self.__effective_num_sequences = np.sum(self.__sequences_weight)
mf_dca_info = """\n\tCreated a MeanFieldDCA object with the following attributes
\tbiomolecule: {}
\ttotal states at sites: {}
\tpseudocount: {}
\tsequence identity: {}
\talignment length: {}
\ttotal number of unique sequences (excluding redundant sequences with 100 percent similarity): {}
\teffective number of sequences (with sequence identity {}): {}
""".format(
biomolecule,
self.__num_site_states,
self.__pseudocount,
self.__seqid,
self.__sequences_len,
self.__num_sequences,
self.__seqid,
self.__effective_num_sequences,
)
logger.info(mf_dca_info)
return None
def __str__(self):
description = '<instance of MeanFieldDCA>'
return description
def __call__(self, pseudocount = 0.5 , seqid = 0.8):
self.__pseudocount = pseudocount
self.__seqid = seqid
logger.warning('\n\tYou have changed one of the parameters (pseudo count or sequence identity)'
'\n\tfrom their default values'
'\n\tpseudocount: {} \n\tsequence_identity: {}'.format(
self.__pseudocount, self.__seqid,
)
)
return None
@property
def alignment(self):
return self.__sequences
@property
def biomolecule(self):
return self.__biomolecule
@property
def sequences_len(self):
return self.__sequences_len
@property
def num_site_states(self):
return self.__num_site_states
@property
def num_sequences(self):
return self.__num_sequences
@property
def sequence_identity(self):
return self.__seqid
@property
def pseudocount(self):
return self.__pseudocount
@property
def sequences_weight(self):
return self.__sequences_weight
@property
def effective_num_sequences(self):
return np.sum(self.__sequences_weight)
def compute_sequences_weight(self):
logger.info('\n\tComputing sequences weights')
weights = msa_numerics.compute_sequences_weight(
alignment_data= np.array(self.__sequences, dtype=np.int32),
seqid = self.__seqid,
)
return weights
def get_single_site_freqs(self):
logger.info('\n\tComputing single site frequencies')
single_site_freqs = msa_numerics.compute_single_site_freqs(
alignment_data = np.array(self.__sequences),
num_site_states = self.__num_site_states,
seqs_weight = self.__sequences_weight,
)
return single_site_freqs
def get_reg_single_site_freqs(self):
single_site_freqs = self.get_single_site_freqs()
logger.info('\n\tRegularizing single site frequencies')
reg_single_site_freqs = msa_numerics.get_reg_single_site_freqs(
single_site_freqs = single_site_freqs,
seqs_len = self.__sequences_len,
num_site_states = self.__num_site_states,
pseudocount = self.__pseudocount,
)
return reg_single_site_freqs
def get_pair_site_freqs(self):
logger.info('\n\tComputing pair site frequencies')
pair_site_freqs = msa_numerics.compute_pair_site_freqs(
alignment_data = np.array(self.__sequences),
num_site_states = self.__num_site_states,
seqs_weight = self.__sequences_weight,
)
return pair_site_freqs
def get_reg_pair_site_freqs(self):
pair_site_freqs = self.get_pair_site_freqs()
logger.info('\n\tRegularizing pair site frequencies')
reg_pair_site_freqs = msa_numerics.get_reg_pair_site_freqs(
pair_site_freqs = pair_site_freqs,
seqs_len = self.__sequences_len,
num_site_states = self.__num_site_states,
pseudocount = self.__pseudocount,
)
return reg_pair_site_freqs
def construct_corr_mat(self, reg_fi, reg_fij):
logger.info('\n\tConstructing the correlation matrix')
corr_mat = msa_numerics.construct_corr_mat(
reg_fi = reg_fi,
reg_fij = reg_fij,
seqs_len = self.__sequences_len,
num_site_states = self.__num_site_states,
)
return corr_mat
def compute_couplings(self, corr_mat):
logger.info('\n\tComputing couplings')
try:
couplings = msa_numerics.compute_couplings(corr_mat = corr_mat)
except Exception as e:
logger.error('\n\tCorrelation {}\n\tYou set the pseudocount {}.'
' You might need to increase it.'.format(e, self.__pseudocount)
)
raise
self.__couplings = couplings
logger.info('\n\tMaximum and minimum couplings: {}, {}'.format(
np.max(couplings), np.min(couplings)))
return couplings
def compute_two_site_model_fields(self, couplings, reg_fi):
logger.info('\n\tComputing two site model fields')
two_site_model_fields = msa_numerics.compute_two_site_model_fields(
couplings = couplings,
reg_fi = reg_fi,
seqs_len = self.__sequences_len,
num_site_states = self.__num_site_states,
)
return two_site_model_fields
def compute_fields(self, couplings=None):
if couplings is None:
reg_fi = self.get_reg_single_site_freqs()
reg_fij = self.get_reg_pair_site_freqs()
corr_mat = self.construct_corr_mat(reg_fi, reg_fij)
couplings = self.compute_couplings(corr_mat)
else:
reg_fi = self.get_reg_single_site_freqs()
q = self.__num_site_states
fields = dict()
logger.info('\n\tComputing local fields of the global probability function')
for i in range(self.__sequences_len):
pi = reg_fi[i]
piq = pi[-1]
sum = np.zeros((q-1, 1))
row_start = i * (q - 1)
row_end = row_start + (q - 1)
for j in range(self.__sequences_len):
if j != i:
pj = reg_fi[j]
col_start = j * (q - 1)
col_end = col_start + (q - 1)
couplings_ij = couplings[row_start:row_end, col_start:col_end]
pj_col_vec = np.reshape(pj[:-1], (q-1, 1))
sum += np.dot(couplings_ij, pj_col_vec)
fields_i = np.log(pi[:-1]/piq) - np.reshape(sum, (q-1, ))
fields[i] = fields_i
return fields
def shift_couplings(self, couplings_ij):
qm1 = self.__num_site_states - 1
couplings_ij = np.reshape(couplings_ij, (qm1,qm1))
avx = np.mean(couplings_ij, axis=1)
avx = np.reshape(avx, (qm1, 1))
avy = np.mean(couplings_ij, axis=0)
avy = np.reshape(avy, (1, qm1))
av = np.mean(couplings_ij)
couplings_ij = couplings_ij - avx - avy + av
return couplings_ij
def compute_params(self, seqbackmapper=None, ranked_by=None, linear_dist=None, num_site_pairs=None):
if ranked_by is None: ranked_by = 'fn_apc'
if linear_dist is None: linear_dist = 4
RANKING_METHODS = ('FN', 'FN_APC', 'DI', 'DI_APC')
ranked_by = ranked_by.strip().upper()
if ranked_by not in RANKING_METHODS:
logger.error('\n\tInvalid ranking criterion {}.\nChoose from {}'.format(ranked_by, RANKING_METHODS))
raise MeanFieldDCAException
if ranked_by == 'FN': dca_scores = self.compute_sorted_FN(seqbackmapper=seqbackmapper)
if ranked_by == 'FN_APC': dca_scores = self.compute_sorted_FN_APC(seqbackmapper=seqbackmapper)
if ranked_by == 'DI': dca_scores = self.compute_sorted_DI(seqbackmapper=seqbackmapper)
if ranked_by == 'DI_APC': dca_scores = self.compute_sorted_DI_APC(seqbackmapper=seqbackmapper)
fields = self.compute_fields(couplings=self.__couplings)
qm1 = self.__num_site_states - 1
if seqbackmapper is not None:
mapping_dict = {
value : key for key, value in self.__refseq_mapping_dict.items()
}
else:
mapping_dict = {
i : i for i in range(self.__sequences_len)
}
if num_site_pairs is None :
num_site_pairs = len(seqbackmapper.ref_sequence) if seqbackmapper is not None else len(mapping_dict.keys())
fields_mapped = list()
logger.info('\n\tExtracting fields')
for i in mapping_dict.keys():
site_in_msa = mapping_dict[i]
fields_im = fields[site_in_msa]
site_fields = i, fields_im
fields_mapped.append(site_fields)
logger.info('\n\tExtracting couplings for top {} site pairs (i, j) with |i - j| > {} and ranked by {}'.format(
num_site_pairs, linear_dist, ranked_by)
)
couplings_ranked_by_dca_score = list()
count_pairs = 0
for pair, score in dca_scores:
site_1_in_refseq, site_2_in_refseq = pair[0], pair[1]
if abs(site_1_in_refseq - site_2_in_refseq) > linear_dist:
count_pairs += 1
if count_pairs > num_site_pairs: break
i, j = mapping_dict[site_1_in_refseq], mapping_dict[site_2_in_refseq]
if(i > j):
logger.error('\n\tInvalid site pair. Site pair (i, j) should be ordered in i < j')
raise MeanFieldDCAException
row_start = i * qm1
row_end = row_start + qm1
column_start = j * qm1
column_end = column_start + qm1
couplings_ij = self.__couplings[row_start:row_end, column_start:column_end]
couplings_ij = self.shift_couplings(couplings_ij)
couplings_ij = np.reshape(couplings_ij, (qm1*qm1,))
pair_couplings_ij = pair, couplings_ij
couplings_ranked_by_dca_score.append(pair_couplings_ij)
if count_pairs < num_site_pairs:
logger.warning('\n\tObtained couplings for only {} ranked site pairs.'
'\n\tThis is the maximum number of site paris we can obtain under '
'the given criteria'.format(count_pairs)
)
return tuple(fields_mapped), tuple(couplings_ranked_by_dca_score)
def get_mapped_site_pairs_dca_scores(self, sorted_dca_scores, seqbackmapper):
mapping_dict = seqbackmapper.map_to_reference_sequence()
self.__refseq_mapping_dict = mapping_dict
sorted_scores_mapped = list()
num_mapped_pairs = 0
for pair, score in sorted_dca_scores:
try:
mapped_pair = mapping_dict[pair[0]], mapping_dict[pair[1]]
except KeyError:
pass
else:
current_pair_score = mapped_pair, score
sorted_scores_mapped.append(current_pair_score)
num_mapped_pairs += 1
sorted_scores_mapped = sorted(sorted_scores_mapped, key = lambda k : k[1], reverse=True)
logger.info('\n\tTotal number of mapped sites: {}'.format(num_mapped_pairs))
return tuple(sorted_scores_mapped)
def get_site_pair_di_score(self):
reg_fi = self.get_reg_single_site_freqs()
reg_fij = self.get_reg_pair_site_freqs()
corr_mat = self.construct_corr_mat(reg_fi, reg_fij)
couplings = self.compute_couplings(corr_mat)
fields_ij = self.compute_two_site_model_fields(couplings, reg_fi)
logger.info('\n\tComputing direct information')
unsorted_DI = msa_numerics.compute_direct_info(
couplings = couplings,
fields_ij = fields_ij,
reg_fi = reg_fi,
seqs_len = self.__sequences_len,
num_site_states = self.__num_site_states,
)
site_pair_di_score= dict()
pair_counter = 0
for i in range(self.__sequences_len - 1):
for j in range(i + 1, self.__sequences_len):
site_pair = (i , j)
site_pair_di_score[site_pair] = unsorted_DI[pair_counter]
pair_counter += 1
return site_pair_di_score
def compute_sorted_DI(self, seqbackmapper=None):
unsorted_DI = self.get_site_pair_di_score()
sorted_DI = sorted(unsorted_DI.items(), key = lambda k : k[1], reverse=True)
if seqbackmapper is not None:
sorted_DI = self.get_mapped_site_pairs_dca_scores(sorted_DI, seqbackmapper)
return sorted_DI
def compute_sorted_DI_APC(self, seqbackmapper=None):
sorted_DI = self.compute_sorted_DI()
logger.info('\n\tPerforming average product correction (APC) of DI scores')
av_score_sites = list()
N = self.__sequences_len
for i in range(N):
i_scores = [score for pair, score in sorted_DI if i in pair]
assert len(i_scores) == N - 1
i_scores_sum = sum(i_scores)
i_scores_ave = i_scores_sum/float(N - 1)
av_score_sites.append(i_scores_ave)
av_all_scores = sum(av_score_sites)/float(N)
sorted_DI_APC = list()
for pair, score in sorted_DI:
i, j = pair
score_apc = score - av_score_sites[i] * (av_score_sites[j]/av_all_scores)
sorted_DI_APC.append((pair, score_apc))
sorted_DI_APC = sorted(sorted_DI_APC, key = lambda k : k[1], reverse=True)
if seqbackmapper is not None:
sorted_DI_APC = self.get_mapped_site_pairs_dca_scores(sorted_DI_APC, seqbackmapper)
return sorted_DI_APC
def compute_sorted_FN(self, seqbackmapper=None):
reg_fi = self.get_reg_single_site_freqs()
reg_fij = self.get_reg_pair_site_freqs()
corr_mat = self.construct_corr_mat(reg_fi, reg_fij)
couplings = self.compute_couplings(corr_mat)
logger.info('\n\tComputing Frobenius norm of couplings')
num_sites = self.__sequences_len
q = self.__num_site_states
frobenius_norm = list()
for i in range(num_sites):
row_start = i * (q - 1)
row_end = row_start + (q - 1)
for j in range(i + 1, num_sites):
site_pair = (i, j)
col_start = j * (q - 1)
col_end = col_start + (q - 1)
cij = couplings[row_start:row_end, col_start:col_end]
cij_mean_1 = np.reshape(np.mean(cij, axis=0), (1, q-1))
cij_mean_2 = np.reshape(np.mean(cij, axis=1), (q-1, 1))
cij_mean = np.mean(cij)
cij_new = cij - cij_mean_1 - cij_mean_2 + cij_mean
fn_ij = np.sqrt(np.sum(cij_new * cij_new))
frobenius_norm.append((site_pair, fn_ij))
fn_sorted = sorted(frobenius_norm, key = lambda x : x[1], reverse=True)
if seqbackmapper is not None:
fn_sorted = self.get_mapped_site_pairs_dca_scores(fn_sorted, seqbackmapper)
return fn_sorted
def compute_sorted_FN_APC(self, seqbackmapper = None):
raw_FN = self.compute_sorted_FN()
logger.info('\n\tPerforming average product correction (APC) to Frobenius'
' norm of couplings.'
)
av_score_sites = list()
N = self.__sequences_len
for i in range(N):
i_scores = [score for pair, score in raw_FN if i in pair]
assert len(i_scores) == N - 1
i_scores_sum = sum(i_scores)
i_scores_ave = i_scores_sum/float(N - 1)
av_score_sites.append(i_scores_ave)
av_all_scores = sum(av_score_sites)/float(N)
sorted_FN_APC = list()
for pair, score in raw_FN:
i, j = pair
score_apc = score - av_score_sites[i] * (av_score_sites[j]/av_all_scores)
sorted_FN_APC.append((pair, score_apc))
sorted_FN_APC = sorted(sorted_FN_APC, key=lambda x : x[1], reverse=True)
if seqbackmapper is not None:
sorted_FN_APC = self.get_mapped_site_pairs_dca_scores(sorted_FN_APC, seqbackmapper)
return sorted_FN_APC
if __name__ == '__main__':
| true | true |
f72cf9518bb05b881c788963248acf6812065892 | 2,110 | py | Python | pyDist/MultiKeyData.py | alekLukanen/pyDist | ffb2c3feb20afba078fec7381c8785eb1e2b0543 | [
"MIT"
] | 5 | 2017-12-24T08:11:16.000Z | 2019-02-07T22:13:26.000Z | pyDist/MultiKeyData.py | alekLukanen/pyDist | ffb2c3feb20afba078fec7381c8785eb1e2b0543 | [
"MIT"
] | 1 | 2021-06-01T23:17:31.000Z | 2021-06-01T23:17:31.000Z | pyDist/MultiKeyData.py | alekLukanen/pyDist | ffb2c3feb20afba078fec7381c8785eb1e2b0543 | [
"MIT"
] | null | null | null |
class MultiKeyData(object):
def __init__(self):
self._keys = {}
self._values = {}
self._links = {}
self._index = 0
def __add_item(self, key, value):
if key not in self._keys:
self._keys[key] = self._index
self._values[self._index] = value
self._links[self._index] = 1
return 1
else:
self._values[self._keys[key]] = value
return 0
def multi_set(self, keys, value):
count = 0
for key in keys:
count += self.__add_item(key, value)
if count>0:
self._links[self._index] += count-1
self._index += 1
def get_values(self):
return list(self._values.values())
def get_keys(self):
return list(self._keys.keys())
def __getitem__(self, key):
return self._values[self._keys[key]] if key in self._keys else None
def __setitem__(self, key, value):
self._index += self.__add_item(key, value)
def __delitem__(self, key):
index = self._keys[key]
self._links[index] += -1
del self._keys[key]
if self._links[index]==0:
del self._links[index]
del self._values[index]
def __str__(self):
return f'keys: {self._keys}\n' \
f'values: {self._values}\n' \
f'links: {self._links}'
if __name__ == '__main__':
print('MultiKeuData Test')
data = MultiKeyData()
data['a'] = 101
data['b'] = 201
print("data['b']: ", data['b'])
print('-------------')
print('data: ')
print(data)
print('-------------')
data.multi_set(('a', 'b', 'c', 'd'), 'hello, world!')
print(data)
print('-------------')
data.multi_set(('a', 'b', 'c', 'd'), 'hello, world!')
print(data)
print('-------------')
data.multi_set(('a', 'b', 'c', 'd', 'e'), 'hello, world!')
print(data)
print('-------------')
del data['e']
print(data)
print('-------------')
print('keys: ', data.get_keys())
print('values: ', data.get_values())
| 24.534884 | 75 | 0.505687 |
class MultiKeyData(object):
def __init__(self):
self._keys = {}
self._values = {}
self._links = {}
self._index = 0
def __add_item(self, key, value):
if key not in self._keys:
self._keys[key] = self._index
self._values[self._index] = value
self._links[self._index] = 1
return 1
else:
self._values[self._keys[key]] = value
return 0
def multi_set(self, keys, value):
count = 0
for key in keys:
count += self.__add_item(key, value)
if count>0:
self._links[self._index] += count-1
self._index += 1
def get_values(self):
return list(self._values.values())
def get_keys(self):
return list(self._keys.keys())
def __getitem__(self, key):
return self._values[self._keys[key]] if key in self._keys else None
def __setitem__(self, key, value):
self._index += self.__add_item(key, value)
def __delitem__(self, key):
index = self._keys[key]
self._links[index] += -1
del self._keys[key]
if self._links[index]==0:
del self._links[index]
del self._values[index]
def __str__(self):
return f'keys: {self._keys}\n' \
f'values: {self._values}\n' \
f'links: {self._links}'
if __name__ == '__main__':
print('MultiKeuData Test')
data = MultiKeyData()
data['a'] = 101
data['b'] = 201
print("data['b']: ", data['b'])
print('-------------')
print('data: ')
print(data)
print('-------------')
data.multi_set(('a', 'b', 'c', 'd'), 'hello, world!')
print(data)
print('-------------')
data.multi_set(('a', 'b', 'c', 'd'), 'hello, world!')
print(data)
print('-------------')
data.multi_set(('a', 'b', 'c', 'd', 'e'), 'hello, world!')
print(data)
print('-------------')
del data['e']
print(data)
print('-------------')
print('keys: ', data.get_keys())
print('values: ', data.get_values())
| true | true |
f72cf995acdc249a52c5780d6bbc9e63253eff06 | 18,720 | py | Python | awesometkinter/utils.py | python-gui-application/AwesomeTkinter | 73f638ac432bafbbd4296588a3d20f27f8570577 | [
"MIT"
] | 61 | 2020-09-16T14:22:08.000Z | 2022-03-18T07:38:15.000Z | awesometkinter/utils.py | python-gui-application/AwesomeTkinter | 73f638ac432bafbbd4296588a3d20f27f8570577 | [
"MIT"
] | 10 | 2020-09-15T10:52:24.000Z | 2021-12-24T00:57:22.000Z | awesometkinter/utils.py | python-gui-application/AwesomeTkinter | 73f638ac432bafbbd4296588a3d20f27f8570577 | [
"MIT"
] | 6 | 2020-11-17T06:33:01.000Z | 2021-11-05T08:04:29.000Z | import base64
import math
import platform
import tkinter as tk
from tkinter import ttk
import PIL
from PIL import Image, ImageTk, ImageColor, ImageDraw, ImageFilter
import hashlib
import io
def identify_operating_system():
"""identify current operating system
Returns:
(str): 'Windows', 'Linux', or 'Darwin' for mac
"""
return platform.system()
def calc_md5(binary_data):
return hashlib.md5(binary_data).hexdigest()
def generate_unique_name(*args):
"""get md5 encoding for any arguments that have a string representation
Returns:
md5 string
"""
name = ''.join([str(x) for x in args])
try:
name = calc_md5(name.encode())
except:
pass
return name
def invert_color(color):
"""return inverted hex color
"""
color = color_to_rgba(color)
r, g, b, a = color
inverted_color = rgb2hex(255 - r, 255 - g, 255 - b)
return inverted_color
def rgb2hex(r, g, b):
return '#{:02x}{:02x}{:02x}'.format(r, g, b)
def change_img_color(img, new_color, old_color=None):
"""Change image color
Args:
img: pillow image
new_color (str): new image color, ex: 'red', '#ff00ff', (255, 0, 0), (255, 0, 0, 255)
old_color (str): color to be replaced, if omitted, all colors will be replaced with new color keeping
alpha channel.
Returns:
pillow image
"""
# convert image to RGBA color scheme
img = img.convert('RGBA')
# load pixels data
pixdata = img.load()
# handle color
new_color = color_to_rgba(new_color)
old_color = color_to_rgba(old_color)
for y in range(img.size[1]):
for x in range(img.size[0]):
alpha = pixdata[x, y][-1]
if old_color:
if pixdata[x, y] == old_color:
r, g, b, _ = new_color
pixdata[x, y] = (r, g, b, alpha)
else:
r, g, b, _ = new_color
pixdata[x, y] = (r, g, b, alpha)
return img
def resize_img(img, size, keep_aspect_ratio=True):
"""resize image using pillow
Args:
img (PIL.Image): pillow image object
size(int or tuple(in, int)): width of image or tuple of (width, height)
keep_aspect_ratio(bool): maintain aspect ratio relative to width
Returns:
(PIL.Image): pillow image
"""
if isinstance(size, int):
size = (size, size)
# get ratio
width, height = img.size
requested_width = size[0]
if keep_aspect_ratio:
ratio = width / requested_width
requested_height = height / ratio
else:
requested_height = size[1]
size = (int(requested_width), int(requested_height))
img = img.resize(size, resample=PIL.Image.LANCZOS)
return img
def mix_images(background_img, foreground_img):
"""paste an image on top of another image
Args:
background_img: pillow image in background
foreground_img: pillow image in foreground
Returns:
pillow image
"""
background_img = background_img.convert('RGBA')
foreground_img = foreground_img.convert('RGBA')
img_w, img_h = foreground_img.size
bg_w, bg_h = background_img.size
offset = ((bg_w - img_w) // 2, (bg_h - img_h) // 2)
background_img.paste(foreground_img, offset, mask=foreground_img)
return background_img
def color_to_rgba(color):
"""Convert color names or hex notation to RGBA,
Args:
color (str): color e.g. 'white' or '#333' or formats like #rgb or #rrggbb
Returns:
(4-tuple): tuple of format (r, g, b, a) e.g. it will return (255, 0, 0, 255) for solid red
"""
if color is None:
return None
if isinstance(color, (tuple, list)):
if len(color) == 3:
r, g, b = color
color = (r, g, b, 255)
return color
else:
return ImageColor.getcolor(color, 'RGBA')
def is_dark(color):
"""rough check if color is dark or light
Returns:
(bool): True if color is dark, False if light
"""
r, g, b, a = color_to_rgba(color)
# calculate lumina, reference https://stackoverflow.com/a/1855903
lumina = (0.299 * r + 0.587 * g + 0.114 * b) / 255
return True if lumina < 0.6 else False
def calc_font_color(bg):
"""calculate font color based on given background
Args:
bg (str): background color
Returns:
(str): color name, e.g. "white" for dark background and "black" for light background
"""
return 'white' if is_dark(bg) else 'black'
def calc_contrast_color(color, offset):
"""calculate a contrast color
for darker colors will get a slightly lighter color depend on "offset" and for light colors will get a darker color
Args:
color (str): color
offset (int): 1 to 254
Returns:
(str): color
"""
r, g, b, a = color_to_rgba(color)
if is_dark(color):
new_color = [x + offset if x + offset <= 255 else 255 for x in (r, g, b)]
else:
new_color = [x - offset if x - offset >= 0 else 0 for x in (r, g, b)]
return rgb2hex(*new_color)
def text_to_image(text, text_color, bg_color, size):
"""Not implemented"""
pass
# img = Image.new('RGBA', size, color_to_rgba(text_color))
# draw = ImageDraw.Draw(img)
# font = ImageFont.truetype(current_path + "s.ttf", size - int(0.15 * width))
# draw.text((pad, -pad), str(num), font=font, fill=color_to_rgba(bg_color))
def create_pil_image(fp=None, color=None, size=None, b64=None):
"""create pillow Image object
Args:
fp: A filename (string), pathlib.Path object or a file object. The file object must implement read(), seek(),
and tell() methods, and be opened in binary mode.
color (str): color in tkinter format, e.g. 'red', '#3300ff', also color can be a tuple or a list of RGB,
e.g. (255, 0, 255)
size (int or 2-tuple(int, int)): an image required size in a (width, height) tuple
b64 (str): base64 hex representation of an image, if "fp" is given this parameter will be ignored
Returns:
pillow image object
"""
if not fp and b64:
fp = io.BytesIO(base64.b64decode(b64))
img = Image.open(fp)
# change color
if color:
img = change_img_color(img, color)
# resize
if size:
if isinstance(size, int):
size = (size, size)
img = resize_img(img, size)
return img
def create_image(fp=None, img=None, color=None, size=None, b64=None):
"""create tkinter PhotoImage object
it can modify size and color of original image
Args:
fp: A filename (string), pathlib.Path object or a file object. The file object must implement read(), seek(),
and tell() methods, and be opened in binary mode.
img (pillow image): if exist fp or b64 arguments will be ignored
color (str): color in tkinter format, e.g. 'red', '#3300ff', also color can be a tuple or a list of RGB,
e.g. (255, 0, 255)
size (int or 2-tuple(int, int)): an image required size in a (width, height) tuple
b64 (str): base64 hex representation of an image, if "fp" is given this parameter will be ignored
Returns:
tkinter PhotoImage object
"""
# create pillow image
if not img:
img = create_pil_image(fp, color, size, b64)
# create tkinter images using pillow ImageTk
img = ImageTk.PhotoImage(img)
return img
def create_circle(size=100, thickness=None, color='black', fill=None, antialias=4, offset=0):
"""create high quality circle
the idea to smooth circle line is to draw a bigger size circle and then resize it to the requested size
inspired from https://stackoverflow.com/a/34926008
Args:
size (tuple or list, or int): outer diameter of the circle or width of bounding box
thickness (int): outer line thickness in pixels
color (str): outer line color
fill (str): fill color, default is a transparent fill
antialias (int): used to enhance outer line quality and make it smoother
offset (int): correct cut edges of circle outline
Returns:
PIL image: a circle on a transparent image
"""
if isinstance(size, int):
size = (size, size)
else:
size = size
fill_color = color_to_rgba(fill) or '#0000'
requested_size = size
# calculate thickness to be 2% of circle diameter
thickness = thickness or max(size[0] * 2 // 100, 2)
offset = offset or thickness // 2
# make things bigger
size = [x * antialias for x in requested_size]
thickness *= antialias
# create a transparent image with a big size
img = Image.new(size=size, mode='RGBA', color='#0000')
draw = ImageDraw.Draw(img)
# draw circle with a required color
draw.ellipse([offset, offset, size[0] - offset, size[1] - offset], outline=color, fill=fill_color, width=thickness)
img = img.filter(ImageFilter.BLUR)
# resize image back to the requested size
img = img.resize(requested_size, Image.LANCZOS)
# change color again will enhance quality (weird)
if fill:
img = change_img_color(img, color, old_color=color)
img = change_img_color(img, fill, old_color=fill)
else:
img = change_img_color(img, color)
return img
def apply_gradient(img, gradient='vertical', colors=None, keep_transparency=True):
"""apply gradient color for pillow image
Args:
img: pillow image
gradient (str): vertical, horizontal, diagonal, radial
colors (iterable): 2-colors for the gradient
keep_transparency (bool): keep original transparency
"""
size = img.size
colors = colors or ['black', 'white']
color1 = color_to_rgba(colors[0])
color2 = color_to_rgba(colors[1])
# load pixels data
pixdata = img.load()
if gradient in ('horizontal', 'vertical', 'diagonal'):
for x in range(0, size[0]):
for y in range(0, size[1]):
if gradient == 'horizontal':
ratio1 = x / size[1]
elif gradient == 'vertical':
ratio1 = y / size[1]
elif gradient == 'diagonal':
ratio1 = (y + x) / size[1]
ratio2 = 1 - ratio1
r = ratio1 * color2[0] + ratio2 * color1[0]
g = ratio1 * color2[1] + ratio2 * color1[1]
b = ratio1 * color2[2] + ratio2 * color1[2]
if keep_transparency:
a = pixdata[x, y][-1]
else:
a = ratio1 * color2[3] + ratio2 * color1[3]
r, g, b, a = (int(x) for x in (r, g, b, a))
# Place the pixel
img.putpixel((x, y), (r, g, b, a))
elif gradient == 'radial': # inspired by https://stackoverflow.com/a/30669765
d = min(size)
radius = d // 2
for x in range(0, size[0]):
for y in range(0, size[1]):
# Find the distance to the center
distance_to_center = math.sqrt((x - size[0] / 2) ** 2 + (y - size[1] / 2) ** 2)
ratio1 = distance_to_center / radius
ratio2 = 1 - ratio1
r = ratio1 * color2[0] + ratio2 * color1[0]
g = ratio1 * color2[1] + ratio2 * color1[1]
b = ratio1 * color2[2] + ratio2 * color1[2]
if keep_transparency:
a = pixdata[x, y][-1]
else:
a = ratio1 * color2[3] + ratio2 * color1[3]
r, g, b, a = (int(x) for x in (r, g, b, a))
# Place the pixel
img.putpixel((x, y), (r, g, b, a))
return img
def scroll_with_mousewheel(widget, target=None, modifier='Shift', apply_to_children=False):
"""scroll a widget with mouse wheel
Args:
widget: tkinter widget
target: scrollable tkinter widget, in case you need "widget" to catch mousewheel event and make another widget
to scroll, useful for child widget in a scrollable frame
modifier (str): Modifier to use with mousewheel to scroll horizontally, default is shift key
apply_to_children (bool): bind all children
Examples:
scroll_with_mousewheel(my_text_widget, target='my_scrollable_frame')
to make a scrollable canvas:
for w in my_canvas:
scroll_with_mousewheel(w, target=my_canvas)
"""
def _scroll_with_mousewheel(widget):
target_widget = target if target else widget
def scroll_vertically(event):
# scroll vertically ----------------------------------
if event.num == 4 or event.delta > 0:
target_widget.yview_scroll(-1, "unit")
elif event.num == 5 or event.delta < 0:
target_widget.yview_scroll(1, "unit")
return 'break'
# bind events for vertical scroll ----------------------------------------------
if hasattr(target_widget, 'yview_scroll'):
# linux
widget.bind("<Button-4>", scroll_vertically, add='+')
widget.bind("<Button-5>", scroll_vertically, add='+')
# windows and mac
widget.bind("<MouseWheel>", scroll_vertically, add='+')
# scroll horizontally ---------------------------------------
def scroll_horizontally(event):
# scroll horizontally
if event.num == 4 or event.delta > 0:
target_widget.xview_scroll(-10, "unit")
elif event.num == 5 or event.delta < 0:
target_widget.xview_scroll(10, "unit")
return 'break'
# bind events for horizontal scroll ----------------------------------------------
if hasattr(target_widget, 'xview_scroll'):
# linux
widget.bind(f"<{modifier}-Button-4>", scroll_horizontally, add='+')
widget.bind(f"<{modifier}-Button-5>", scroll_horizontally, add='+')
# windows and mac
widget.bind(f"<{modifier}-MouseWheel>", scroll_horizontally, add='+')
_scroll_with_mousewheel(widget)
def handle_children(w):
for child in w.winfo_children():
_scroll_with_mousewheel(child)
# recursive call
if child.winfo_children():
handle_children(child)
if apply_to_children:
handle_children(widget)
def unbind_mousewheel(widget):
"""unbind mousewheel for a specific widget, e.g. combobox which have mouswheel scroll by default"""
# linux
widget.unbind("<Button-4>")
widget.unbind("<Button-5>")
# windows and mac
widget.unbind("<MouseWheel>")
def get_widget_attribute(widget, attr):
"""get an attribute of a widget
Args:
widget: tkinter widget "tk or ttk"
attr (str): attribute or property e.g. 'background'
Returns:
attribute value, e.g. '#ffffff' for a background color
"""
# if it is ttk based will get style applied, it will raise an error if the widget not a ttk
try:
style_name = widget.cget('style') or widget.winfo_class()
s = ttk.Style()
value = s.lookup(style_name, attr)
return value
except:
pass
try:
# if it's a tk widget will use cget
return widget.cget(attr)
except:
pass
return None
def configure_widget(widget, **kwargs):
"""configure widget's attributes"""
for k, v in kwargs.items():
# set widget attribute
try:
# treat as a "tk" widget, it will raise if widget is a "ttk"
widget.config(**{k: v})
continue
except:
pass
try:
# in case above failed, it might be a ttk widget
style_name = widget.cget('style') or widget.winfo_class()
s = ttk.Style()
s.configure(style_name, **{k: v})
except:
pass
def set_default_theme():
# select tkinter theme required for things to be right on windows,
# only 'alt', 'default', or 'classic' can work fine on windows 10
s = ttk.Style()
s.theme_use('default')
def theme_compatibility_check(print_warning=False):
"""check if current theme is compatible
Return:
bool: True or False
"""
compatible_themes = ['alt', 'default', 'classic']
s = ttk.Style()
current_theme = s.theme_use()
if current_theme not in compatible_themes:
if print_warning:
print(f'AwesomeTkinter Warning: Widgets might not work properly under current theme ({current_theme})\n'
f"compatible_themes are ['alt', 'default', 'classic']\n"
f"you can set default theme using atk.set_default_theme() or style.theme_use('default')")
return False
return True
def center_window(window, width=None, height=None, set_geometry_wh=True, reference=None):
"""center a tkinter window on screen's center and set its geometry if width and height given
Args:
window (tk.root or tk.Toplevel): a window to be centered
width (int): window's width
height (int): window's height
set_geometry_wh (bool): include width and height in geometry
reference: tk window e.g parent window as a reference
"""
# update_idletasks will cause a window to show early at the top left corner
# then change position to center in non-proffesional way
# window.update_idletasks()
if width and height:
if reference:
refx = reference.winfo_x() + reference.winfo_width() // 2
refy = reference.winfo_y() + reference.winfo_height() // 2
else:
refx = window.winfo_screenwidth() // 2
refy = window.winfo_screenheight() // 2
x = refx - width // 2
y = refy - height // 2
if set_geometry_wh:
window.geometry(f'{width}x{height}+{x}+{y}')
else:
window.geometry(f'+{x}+{y}')
else:
window.eval('tk::PlaceWindow . center')
__all__ = ['identify_operating_system', 'calc_md5', 'generate_unique_name', 'invert_color', 'rgb2hex',
'change_img_color', 'resize_img', 'mix_images', 'color_to_rgba', 'is_dark', 'calc_font_color',
'calc_contrast_color', 'text_to_image', 'create_pil_image', 'create_image', 'create_circle',
'scroll_with_mousewheel', 'unbind_mousewheel', 'get_widget_attribute', 'ImageTk', 'set_default_theme',
'theme_compatibility_check', 'configure_widget', 'center_window']
| 30.291262 | 119 | 0.595459 | import base64
import math
import platform
import tkinter as tk
from tkinter import ttk
import PIL
from PIL import Image, ImageTk, ImageColor, ImageDraw, ImageFilter
import hashlib
import io
def identify_operating_system():
return platform.system()
def calc_md5(binary_data):
return hashlib.md5(binary_data).hexdigest()
def generate_unique_name(*args):
name = ''.join([str(x) for x in args])
try:
name = calc_md5(name.encode())
except:
pass
return name
def invert_color(color):
color = color_to_rgba(color)
r, g, b, a = color
inverted_color = rgb2hex(255 - r, 255 - g, 255 - b)
return inverted_color
def rgb2hex(r, g, b):
return '#{:02x}{:02x}{:02x}'.format(r, g, b)
def change_img_color(img, new_color, old_color=None):
img = img.convert('RGBA')
pixdata = img.load()
new_color = color_to_rgba(new_color)
old_color = color_to_rgba(old_color)
for y in range(img.size[1]):
for x in range(img.size[0]):
alpha = pixdata[x, y][-1]
if old_color:
if pixdata[x, y] == old_color:
r, g, b, _ = new_color
pixdata[x, y] = (r, g, b, alpha)
else:
r, g, b, _ = new_color
pixdata[x, y] = (r, g, b, alpha)
return img
def resize_img(img, size, keep_aspect_ratio=True):
if isinstance(size, int):
size = (size, size)
width, height = img.size
requested_width = size[0]
if keep_aspect_ratio:
ratio = width / requested_width
requested_height = height / ratio
else:
requested_height = size[1]
size = (int(requested_width), int(requested_height))
img = img.resize(size, resample=PIL.Image.LANCZOS)
return img
def mix_images(background_img, foreground_img):
background_img = background_img.convert('RGBA')
foreground_img = foreground_img.convert('RGBA')
img_w, img_h = foreground_img.size
bg_w, bg_h = background_img.size
offset = ((bg_w - img_w) // 2, (bg_h - img_h) // 2)
background_img.paste(foreground_img, offset, mask=foreground_img)
return background_img
def color_to_rgba(color):
if color is None:
return None
if isinstance(color, (tuple, list)):
if len(color) == 3:
r, g, b = color
color = (r, g, b, 255)
return color
else:
return ImageColor.getcolor(color, 'RGBA')
def is_dark(color):
r, g, b, a = color_to_rgba(color)
lumina = (0.299 * r + 0.587 * g + 0.114 * b) / 255
return True if lumina < 0.6 else False
def calc_font_color(bg):
return 'white' if is_dark(bg) else 'black'
def calc_contrast_color(color, offset):
r, g, b, a = color_to_rgba(color)
if is_dark(color):
new_color = [x + offset if x + offset <= 255 else 255 for x in (r, g, b)]
else:
new_color = [x - offset if x - offset >= 0 else 0 for x in (r, g, b)]
return rgb2hex(*new_color)
def text_to_image(text, text_color, bg_color, size):
pass
def create_pil_image(fp=None, color=None, size=None, b64=None):
if not fp and b64:
fp = io.BytesIO(base64.b64decode(b64))
img = Image.open(fp)
if color:
img = change_img_color(img, color)
if size:
if isinstance(size, int):
size = (size, size)
img = resize_img(img, size)
return img
def create_image(fp=None, img=None, color=None, size=None, b64=None):
if not img:
img = create_pil_image(fp, color, size, b64)
img = ImageTk.PhotoImage(img)
return img
def create_circle(size=100, thickness=None, color='black', fill=None, antialias=4, offset=0):
if isinstance(size, int):
size = (size, size)
else:
size = size
fill_color = color_to_rgba(fill) or '#0000'
requested_size = size
thickness = thickness or max(size[0] * 2 // 100, 2)
offset = offset or thickness // 2
size = [x * antialias for x in requested_size]
thickness *= antialias
img = Image.new(size=size, mode='RGBA', color='#0000')
draw = ImageDraw.Draw(img)
draw.ellipse([offset, offset, size[0] - offset, size[1] - offset], outline=color, fill=fill_color, width=thickness)
img = img.filter(ImageFilter.BLUR)
img = img.resize(requested_size, Image.LANCZOS)
if fill:
img = change_img_color(img, color, old_color=color)
img = change_img_color(img, fill, old_color=fill)
else:
img = change_img_color(img, color)
return img
def apply_gradient(img, gradient='vertical', colors=None, keep_transparency=True):
size = img.size
colors = colors or ['black', 'white']
color1 = color_to_rgba(colors[0])
color2 = color_to_rgba(colors[1])
pixdata = img.load()
if gradient in ('horizontal', 'vertical', 'diagonal'):
for x in range(0, size[0]):
for y in range(0, size[1]):
if gradient == 'horizontal':
ratio1 = x / size[1]
elif gradient == 'vertical':
ratio1 = y / size[1]
elif gradient == 'diagonal':
ratio1 = (y + x) / size[1]
ratio2 = 1 - ratio1
r = ratio1 * color2[0] + ratio2 * color1[0]
g = ratio1 * color2[1] + ratio2 * color1[1]
b = ratio1 * color2[2] + ratio2 * color1[2]
if keep_transparency:
a = pixdata[x, y][-1]
else:
a = ratio1 * color2[3] + ratio2 * color1[3]
r, g, b, a = (int(x) for x in (r, g, b, a))
img.putpixel((x, y), (r, g, b, a))
elif gradient == 'radial':
d = min(size)
radius = d // 2
for x in range(0, size[0]):
for y in range(0, size[1]):
distance_to_center = math.sqrt((x - size[0] / 2) ** 2 + (y - size[1] / 2) ** 2)
ratio1 = distance_to_center / radius
ratio2 = 1 - ratio1
r = ratio1 * color2[0] + ratio2 * color1[0]
g = ratio1 * color2[1] + ratio2 * color1[1]
b = ratio1 * color2[2] + ratio2 * color1[2]
if keep_transparency:
a = pixdata[x, y][-1]
else:
a = ratio1 * color2[3] + ratio2 * color1[3]
r, g, b, a = (int(x) for x in (r, g, b, a))
img.putpixel((x, y), (r, g, b, a))
return img
def scroll_with_mousewheel(widget, target=None, modifier='Shift', apply_to_children=False):
def _scroll_with_mousewheel(widget):
target_widget = target if target else widget
def scroll_vertically(event):
if event.num == 4 or event.delta > 0:
target_widget.yview_scroll(-1, "unit")
elif event.num == 5 or event.delta < 0:
target_widget.yview_scroll(1, "unit")
return 'break'
if hasattr(target_widget, 'yview_scroll'):
widget.bind("<Button-4>", scroll_vertically, add='+')
widget.bind("<Button-5>", scroll_vertically, add='+')
widget.bind("<MouseWheel>", scroll_vertically, add='+')
def scroll_horizontally(event):
if event.num == 4 or event.delta > 0:
target_widget.xview_scroll(-10, "unit")
elif event.num == 5 or event.delta < 0:
target_widget.xview_scroll(10, "unit")
return 'break'
if hasattr(target_widget, 'xview_scroll'):
widget.bind(f"<{modifier}-Button-4>", scroll_horizontally, add='+')
widget.bind(f"<{modifier}-Button-5>", scroll_horizontally, add='+')
widget.bind(f"<{modifier}-MouseWheel>", scroll_horizontally, add='+')
_scroll_with_mousewheel(widget)
def handle_children(w):
for child in w.winfo_children():
_scroll_with_mousewheel(child)
if child.winfo_children():
handle_children(child)
if apply_to_children:
handle_children(widget)
def unbind_mousewheel(widget):
widget.unbind("<Button-4>")
widget.unbind("<Button-5>")
widget.unbind("<MouseWheel>")
def get_widget_attribute(widget, attr):
try:
style_name = widget.cget('style') or widget.winfo_class()
s = ttk.Style()
value = s.lookup(style_name, attr)
return value
except:
pass
try:
return widget.cget(attr)
except:
pass
return None
def configure_widget(widget, **kwargs):
for k, v in kwargs.items():
# set widget attribute
try:
# treat as a "tk" widget, it will raise if widget is a "ttk"
widget.config(**{k: v})
continue
except:
pass
try:
# in case above failed, it might be a ttk widget
style_name = widget.cget('style') or widget.winfo_class()
s = ttk.Style()
s.configure(style_name, **{k: v})
except:
pass
def set_default_theme():
# select tkinter theme required for things to be right on windows,
# only 'alt', 'default', or 'classic' can work fine on windows 10
s = ttk.Style()
s.theme_use('default')
def theme_compatibility_check(print_warning=False):
compatible_themes = ['alt', 'default', 'classic']
s = ttk.Style()
current_theme = s.theme_use()
if current_theme not in compatible_themes:
if print_warning:
print(f'AwesomeTkinter Warning: Widgets might not work properly under current theme ({current_theme})\n'
f"compatible_themes are ['alt', 'default', 'classic']\n"
f"you can set default theme using atk.set_default_theme() or style.theme_use('default')")
return False
return True
def center_window(window, width=None, height=None, set_geometry_wh=True, reference=None):
# update_idletasks will cause a window to show early at the top left corner
# then change position to center in non-proffesional way
# window.update_idletasks()
if width and height:
if reference:
refx = reference.winfo_x() + reference.winfo_width() // 2
refy = reference.winfo_y() + reference.winfo_height() // 2
else:
refx = window.winfo_screenwidth() // 2
refy = window.winfo_screenheight() // 2
x = refx - width // 2
y = refy - height // 2
if set_geometry_wh:
window.geometry(f'{width}x{height}+{x}+{y}')
else:
window.geometry(f'+{x}+{y}')
else:
window.eval('tk::PlaceWindow . center')
__all__ = ['identify_operating_system', 'calc_md5', 'generate_unique_name', 'invert_color', 'rgb2hex',
'change_img_color', 'resize_img', 'mix_images', 'color_to_rgba', 'is_dark', 'calc_font_color',
'calc_contrast_color', 'text_to_image', 'create_pil_image', 'create_image', 'create_circle',
'scroll_with_mousewheel', 'unbind_mousewheel', 'get_widget_attribute', 'ImageTk', 'set_default_theme',
'theme_compatibility_check', 'configure_widget', 'center_window']
| true | true |
f72cfc43e5ce6dc5144f575469244b87366239db | 16,996 | py | Python | arguments.py | wakafengfan/CPM-1-Finetune | b2c30bd94df31bcd6ee75ba90c347113563d4075 | [
"MIT"
] | 60 | 2020-12-14T01:51:49.000Z | 2021-06-14T05:54:45.000Z | arguments.py | wakafengfan/CPM-1-Finetune | b2c30bd94df31bcd6ee75ba90c347113563d4075 | [
"MIT"
] | 29 | 2020-12-16T13:04:52.000Z | 2021-06-10T12:29:11.000Z | arguments.py | wakafengfan/CPM-1-Finetune | b2c30bd94df31bcd6ee75ba90c347113563d4075 | [
"MIT"
] | 11 | 2020-12-24T07:17:39.000Z | 2021-06-11T07:37:22.000Z | # coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""argparser configuration"""
import argparse
import os
import torch
import deepspeed
def add_model_config_args(parser):
"""Model arguments"""
group = parser.add_argument_group('model', 'model configuration')
group.add_argument('--pretrained-bert', action='store_true',
help='use a pretrained bert-large-uncased model instead'
'of initializing from scratch. See '
'--tokenizer-model-type to specify which pretrained '
'BERT model to use')
group.add_argument('--attention-dropout', type=float, default=0.1,
help='dropout probability for attention weights')
group.add_argument('--num-attention-heads', type=int, default=16,
help='num of transformer attention heads')
group.add_argument('--hidden-size', type=int, default=1024,
help='tansformer hidden size')
group.add_argument('--intermediate-size', type=int, default=None,
help='transformer embedding dimension for FFN'
'set to 4*`--hidden-size` if it is None')
group.add_argument('--num-layers', type=int, default=24,
help='num decoder layers')
group.add_argument('--layernorm-epsilon', type=float, default=1e-5,
help='layer norm epsilon')
group.add_argument('--hidden-dropout', type=float, default=0.1,
help='dropout probability for hidden state transformer')
group.add_argument('--max-position-embeddings', type=int, default=512,
help='maximum number of position embeddings to use')
group.add_argument('--vocab-size', type=int, default=30522,
help='vocab size to use for non-character-level '
'tokenization. This value will only be used when '
'creating a tokenizer')
group.add_argument('--deep-init', action='store_true',
help='initialize bert model similar to gpt2 model.'
'scales initialization of projection layers by a '
'factor of 1/sqrt(2N). Necessary to train bert '
'models larger than BERT-Large.')
group.add_argument('--make-vocab-size-divisible-by', type=int, default=128,
help='Pad the vocab size to be divisible by this value.'
'This is added for computational efficieny reasons.')
group.add_argument('--cpu-optimizer', action='store_true',
help='Run optimizer on CPU')
group.add_argument('--cpu_torch_adam', action='store_true',
help='Use Torch Adam as optimizer on CPU.')
return parser
def add_fp16_config_args(parser):
"""Mixed precision arguments."""
group = parser.add_argument_group('fp16', 'fp16 configurations')
group.add_argument('--fp16', action='store_true',
help='Run model in fp16 mode')
group.add_argument('--fp32-embedding', action='store_true',
help='embedding in fp32')
group.add_argument('--fp32-layernorm', action='store_true',
help='layer norm in fp32')
group.add_argument('--fp32-tokentypes', action='store_true',
help='embedding token types in fp32')
group.add_argument('--fp32-allreduce', action='store_true',
help='all-reduce in fp32')
group.add_argument('--hysteresis', type=int, default=2,
help='hysteresis for dynamic loss scaling')
group.add_argument('--loss-scale', type=float, default=None,
help='Static loss scaling, positive power of 2 '
'values can improve fp16 convergence. If None, dynamic'
'loss scaling is used.')
group.add_argument('--loss-scale-window', type=float, default=1000,
help='Window over which to raise/lower dynamic scale')
group.add_argument('--min-scale', type=float, default=1,
help='Minimum loss scale for dynamic loss scale')
return parser
def add_training_args(parser):
"""Training arguments."""
group = parser.add_argument_group('train', 'training configurations')
group.add_argument('--do_train', action='store_true',
help="Do training")
group.add_argument('--do_eval', action='store_true',
help="Do evaluation")
group.add_argument('--zero_shot', action="store_true",
help="do zero-shot")
group.add_argument('--batch-size', type=int, default=4,
help='Data Loader batch size')
group.add_argument('--weight-decay', type=float, default=0.01,
help='weight decay coefficient for L2 regularization')
group.add_argument('--checkpoint-activations', action='store_true',
help='checkpoint activation to allow for training '
'with larger models and sequences')
group.add_argument('--checkpoint-num-layers', type=int, default=1,
help='chunk size (number of layers) for checkpointing')
group.add_argument('--deepspeed-activation-checkpointing', action='store_true',
help='uses activation checkpointing from deepspeed')
group.add_argument('--clip-grad', type=float, default=1.0,
help='gradient clipping')
group.add_argument('--epoch', type=int, default=10,
help='total number of iterations to train over all training runs')
group.add_argument('--log-interval', type=int, default=100,
help='report interval')
group.add_argument('--exit-interval', type=int, default=None,
help='Exit the program after this many new iterations.')
group.add_argument('--seed', type=int, default=1234,
help='random seed')
# Batch prodecuer arguments
group.add_argument('--reset-position-ids', action='store_true',
help='Reset posistion ids after end-of-document token.')
group.add_argument('--reset-attention-mask', action='store_true',
help='Reset self attention maske after '
'end-of-document token.')
# Learning rate.
group.add_argument('--lr-decay-iters', type=int, default=None,
help='number of iterations to decay LR over,'
' If None defaults to `--train-iters`*`--epochs`')
group.add_argument('--lr-decay-style', type=str, default='linear',
choices=['constant', 'linear', 'cosine', 'exponential'],
help='learning rate decay function')
group.add_argument('--lr', type=float, default=1.0e-4,
help='initial learning rate')
group.add_argument('--warmup', type=float, default=0.01,
help='percentage of data to warmup on (.01 = 1% of all '
'training iters). Default 0.01')
# model checkpointing
group.add_argument('--save', type=str, default=None,
help='Output directory to save checkpoints to.')
group.add_argument('--save-interval', type=int, default=5000,
help='number of iterations between saves')
group.add_argument('--no-save-optim', action='store_true',
help='Do not save current optimizer.')
group.add_argument('--no-save-rng', action='store_true',
help='Do not save current rng state.')
group.add_argument('--load', type=str, default=None,
help='Path to a directory containing a model checkpoint.')
group.add_argument('--no-load-optim', action='store_true',
help='Do not load optimizer when loading checkpoint.')
group.add_argument('--no-load-rng', action='store_true',
help='Do not load rng state when loading checkpoint.')
group.add_argument('--finetune', action='store_true',
help='Load model for finetuning. Do not load optimizer '
'or rng state from checkpoint and set iteration to 0. '
'Assumed when loading a release checkpoint.')
# distributed training args
group.add_argument('--distributed-backend', default='nccl',
help='which backend to use for distributed '
'training. One of [gloo, nccl]')
group.add_argument('--local_rank', type=int, default=None,
help='local rank passed from distributed launcher.')
group.add_argument('--results_dir', type=str, default=None,
help='The dir to save the model.')
group.add_argument('--model_name', type=str, default="test",
help="The name you give to the model.")
# eval
group.add_argument('--eval_ckpt_path', type=str, default=None,
help='The checkpoint path used for evaluation')
return parser
def add_evaluation_args(parser):
"""Evaluation arguments."""
group = parser.add_argument_group('validation', 'validation configurations')
group.add_argument('--eval-batch-size', type=int, default=None,
help='Data Loader batch size for evaluation datasets.'
'Defaults to `--batch-size`')
group.add_argument('--eval-iters', type=int, default=100,
help='number of iterations to run for evaluation'
'validation/test for')
group.add_argument('--eval-interval', type=int, default=1000,
help='interval between running evaluation on validation set')
group.add_argument('--eval-seq-length', type=int, default=None,
help='Maximum sequence length to process for '
'evaluation. Defaults to `--seq-length`')
group.add_argument('--eval-max-preds-per-seq', type=int, default=None,
help='Maximum number of predictions to use for '
'evaluation. Defaults to '
'math.ceil(`--eval-seq-length`*.15/10)*10')
group.add_argument('--overlapping-eval', type=int, default=32,
help='sliding window for overlapping eval ')
group.add_argument('--cloze-eval', action='store_true',
help='Evaluation dataset from `--valid-data` is a cloze task')
group.add_argument('--eval-hf', action='store_true',
help='perform evaluation with huggingface openai model.'
'use `--load` to specify weights path to be loaded')
group.add_argument('--load-openai', action='store_true',
help='load openai weights into our model. Use `--load` '
'to specify weights path to be loaded')
return parser
def add_text_generate_args(parser):
"""Text generate arguments."""
group = parser.add_argument_group('Text generation', 'configurations')
group.add_argument("--temperature", type=float, default=1.0)
group.add_argument("--top_p", type=float, default=0.0)
group.add_argument("--top_k", type=int, default=0)
group.add_argument("--out-seq-length", type=int, default=256)
return parser
def add_data_args(parser):
"""Train/valid/test data arguments."""
group = parser.add_argument_group('data', 'data configurations')
group.add_argument('--data_dir', type=str, required=True,
help="Training data dir")
group.add_argument('--mmap-warmup', action='store_true',
help='Warm up mmap files.')
group.add_argument('--model-parallel-size', type=int, default=1,
help='size of the model parallel.')
group.add_argument('--shuffle', action='store_true',
help='Shuffle data. Shuffling is deterministic '
'based on seed and current epoch.')
group.add_argument('--use-npy-data-loader', action='store_true',
help='Use the numpy data loader. If set, then'
'train-data-path, val-data-path, and test-data-path'
'should also be provided.')
group.add_argument('--num-workers', type=int, default=2,
help="""Number of workers to use for dataloading""")
group.add_argument('--tokenizer-model-type', type=str,
default='bert-large-uncased',
help="Model type to use for sentencepiece tokenization \
(one of ['bpe', 'char', 'unigram', 'word']) or \
bert vocab to use for BertWordPieceTokenizer (one of \
['bert-large-uncased', 'bert-large-cased', etc.])")
group.add_argument('--tokenizer-path', type=str, default='tokenizer.model',
help='path used to save/load sentencepiece tokenization '
'models')
group.add_argument('--tokenizer-type', type=str,
default='BertWordPieceTokenizer',
choices=['CharacterLevelTokenizer',
'SentencePieceTokenizer',
'BertWordPieceTokenizer',
'GPT2BPETokenizer'],
help='what type of tokenizer to use')
group.add_argument("--cache-dir", default=None, type=str,
help="Where to store pre-trained BERT downloads")
group.add_argument('--use-tfrecords', action='store_true',
help='load `--train-data`, `--valid-data`, '
'`--test-data` from BERT tf records instead of '
'normal data pipeline')
group.add_argument('--seq-length', type=int, default=512,
help="Maximum sequence length to process")
group.add_argument('--max-preds-per-seq', type=int, default=None,
help='Maximum number of predictions to use per sequence.'
'Defaults to math.ceil(`--seq-length`*.15/10)*10.'
'MUST BE SPECIFIED IF `--use-tfrecords` is True.')
return parser
def get_args():
"""Parse all the args."""
parser = argparse.ArgumentParser(description='PyTorch BERT Model')
parser = add_model_config_args(parser)
parser = add_fp16_config_args(parser)
parser = add_training_args(parser)
parser = add_evaluation_args(parser)
parser = add_text_generate_args(parser)
parser = add_data_args(parser)
# Include DeepSpeed configuration arguments
parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args()
if not args.data_dir:
print('WARNING: No data specified')
args.cuda = torch.cuda.is_available()
args.rank = int(os.getenv('RANK', '0'))
args.world_size = int(os.getenv("WORLD_SIZE", '1'))
if os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'):
# We are using (OpenMPI) mpirun for launching distributed data parallel processes
local_rank = int(os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'))
local_size = int(os.getenv('OMPI_COMM_WORLD_LOCAL_SIZE'))
# Possibly running with Slurm
num_nodes = int(os.getenv('SLURM_JOB_NUM_NODES', '1'))
nodeid = int(os.getenv('SLURM_NODEID', '0'))
args.local_rank = local_rank
args.rank = nodeid*local_size + local_rank
args.world_size = num_nodes*local_size
args.model_parallel_size = min(args.model_parallel_size, args.world_size)
if args.rank == 0:
print('using world size: {} and model-parallel size: {} '.format(
args.world_size, args.model_parallel_size))
args.dynamic_loss_scale = False
if args.loss_scale is None:
args.dynamic_loss_scale = True
if args.rank == 0:
print(' > using dynamic loss scaling')
# The args fp32_* or fp16_* meant to be active when the
# args fp16 is set. So the default behaviour should all
# be false.
if not args.fp16:
args.fp32_embedding = False
args.fp32_tokentypes = False
args.fp32_layernorm = False
return args
| 49.695906 | 89 | 0.600435 |
import argparse
import os
import torch
import deepspeed
def add_model_config_args(parser):
group = parser.add_argument_group('model', 'model configuration')
group.add_argument('--pretrained-bert', action='store_true',
help='use a pretrained bert-large-uncased model instead'
'of initializing from scratch. See '
'--tokenizer-model-type to specify which pretrained '
'BERT model to use')
group.add_argument('--attention-dropout', type=float, default=0.1,
help='dropout probability for attention weights')
group.add_argument('--num-attention-heads', type=int, default=16,
help='num of transformer attention heads')
group.add_argument('--hidden-size', type=int, default=1024,
help='tansformer hidden size')
group.add_argument('--intermediate-size', type=int, default=None,
help='transformer embedding dimension for FFN'
'set to 4*`--hidden-size` if it is None')
group.add_argument('--num-layers', type=int, default=24,
help='num decoder layers')
group.add_argument('--layernorm-epsilon', type=float, default=1e-5,
help='layer norm epsilon')
group.add_argument('--hidden-dropout', type=float, default=0.1,
help='dropout probability for hidden state transformer')
group.add_argument('--max-position-embeddings', type=int, default=512,
help='maximum number of position embeddings to use')
group.add_argument('--vocab-size', type=int, default=30522,
help='vocab size to use for non-character-level '
'tokenization. This value will only be used when '
'creating a tokenizer')
group.add_argument('--deep-init', action='store_true',
help='initialize bert model similar to gpt2 model.'
'scales initialization of projection layers by a '
'factor of 1/sqrt(2N). Necessary to train bert '
'models larger than BERT-Large.')
group.add_argument('--make-vocab-size-divisible-by', type=int, default=128,
help='Pad the vocab size to be divisible by this value.'
'This is added for computational efficieny reasons.')
group.add_argument('--cpu-optimizer', action='store_true',
help='Run optimizer on CPU')
group.add_argument('--cpu_torch_adam', action='store_true',
help='Use Torch Adam as optimizer on CPU.')
return parser
def add_fp16_config_args(parser):
group = parser.add_argument_group('fp16', 'fp16 configurations')
group.add_argument('--fp16', action='store_true',
help='Run model in fp16 mode')
group.add_argument('--fp32-embedding', action='store_true',
help='embedding in fp32')
group.add_argument('--fp32-layernorm', action='store_true',
help='layer norm in fp32')
group.add_argument('--fp32-tokentypes', action='store_true',
help='embedding token types in fp32')
group.add_argument('--fp32-allreduce', action='store_true',
help='all-reduce in fp32')
group.add_argument('--hysteresis', type=int, default=2,
help='hysteresis for dynamic loss scaling')
group.add_argument('--loss-scale', type=float, default=None,
help='Static loss scaling, positive power of 2 '
'values can improve fp16 convergence. If None, dynamic'
'loss scaling is used.')
group.add_argument('--loss-scale-window', type=float, default=1000,
help='Window over which to raise/lower dynamic scale')
group.add_argument('--min-scale', type=float, default=1,
help='Minimum loss scale for dynamic loss scale')
return parser
def add_training_args(parser):
group = parser.add_argument_group('train', 'training configurations')
group.add_argument('--do_train', action='store_true',
help="Do training")
group.add_argument('--do_eval', action='store_true',
help="Do evaluation")
group.add_argument('--zero_shot', action="store_true",
help="do zero-shot")
group.add_argument('--batch-size', type=int, default=4,
help='Data Loader batch size')
group.add_argument('--weight-decay', type=float, default=0.01,
help='weight decay coefficient for L2 regularization')
group.add_argument('--checkpoint-activations', action='store_true',
help='checkpoint activation to allow for training '
'with larger models and sequences')
group.add_argument('--checkpoint-num-layers', type=int, default=1,
help='chunk size (number of layers) for checkpointing')
group.add_argument('--deepspeed-activation-checkpointing', action='store_true',
help='uses activation checkpointing from deepspeed')
group.add_argument('--clip-grad', type=float, default=1.0,
help='gradient clipping')
group.add_argument('--epoch', type=int, default=10,
help='total number of iterations to train over all training runs')
group.add_argument('--log-interval', type=int, default=100,
help='report interval')
group.add_argument('--exit-interval', type=int, default=None,
help='Exit the program after this many new iterations.')
group.add_argument('--seed', type=int, default=1234,
help='random seed')
group.add_argument('--reset-position-ids', action='store_true',
help='Reset posistion ids after end-of-document token.')
group.add_argument('--reset-attention-mask', action='store_true',
help='Reset self attention maske after '
'end-of-document token.')
group.add_argument('--lr-decay-iters', type=int, default=None,
help='number of iterations to decay LR over,'
' If None defaults to `--train-iters`*`--epochs`')
group.add_argument('--lr-decay-style', type=str, default='linear',
choices=['constant', 'linear', 'cosine', 'exponential'],
help='learning rate decay function')
group.add_argument('--lr', type=float, default=1.0e-4,
help='initial learning rate')
group.add_argument('--warmup', type=float, default=0.01,
help='percentage of data to warmup on (.01 = 1% of all '
'training iters). Default 0.01')
group.add_argument('--save', type=str, default=None,
help='Output directory to save checkpoints to.')
group.add_argument('--save-interval', type=int, default=5000,
help='number of iterations between saves')
group.add_argument('--no-save-optim', action='store_true',
help='Do not save current optimizer.')
group.add_argument('--no-save-rng', action='store_true',
help='Do not save current rng state.')
group.add_argument('--load', type=str, default=None,
help='Path to a directory containing a model checkpoint.')
group.add_argument('--no-load-optim', action='store_true',
help='Do not load optimizer when loading checkpoint.')
group.add_argument('--no-load-rng', action='store_true',
help='Do not load rng state when loading checkpoint.')
group.add_argument('--finetune', action='store_true',
help='Load model for finetuning. Do not load optimizer '
'or rng state from checkpoint and set iteration to 0. '
'Assumed when loading a release checkpoint.')
group.add_argument('--distributed-backend', default='nccl',
help='which backend to use for distributed '
'training. One of [gloo, nccl]')
group.add_argument('--local_rank', type=int, default=None,
help='local rank passed from distributed launcher.')
group.add_argument('--results_dir', type=str, default=None,
help='The dir to save the model.')
group.add_argument('--model_name', type=str, default="test",
help="The name you give to the model.")
group.add_argument('--eval_ckpt_path', type=str, default=None,
help='The checkpoint path used for evaluation')
return parser
def add_evaluation_args(parser):
group = parser.add_argument_group('validation', 'validation configurations')
group.add_argument('--eval-batch-size', type=int, default=None,
help='Data Loader batch size for evaluation datasets.'
'Defaults to `--batch-size`')
group.add_argument('--eval-iters', type=int, default=100,
help='number of iterations to run for evaluation'
'validation/test for')
group.add_argument('--eval-interval', type=int, default=1000,
help='interval between running evaluation on validation set')
group.add_argument('--eval-seq-length', type=int, default=None,
help='Maximum sequence length to process for '
'evaluation. Defaults to `--seq-length`')
group.add_argument('--eval-max-preds-per-seq', type=int, default=None,
help='Maximum number of predictions to use for '
'evaluation. Defaults to '
'math.ceil(`--eval-seq-length`*.15/10)*10')
group.add_argument('--overlapping-eval', type=int, default=32,
help='sliding window for overlapping eval ')
group.add_argument('--cloze-eval', action='store_true',
help='Evaluation dataset from `--valid-data` is a cloze task')
group.add_argument('--eval-hf', action='store_true',
help='perform evaluation with huggingface openai model.'
'use `--load` to specify weights path to be loaded')
group.add_argument('--load-openai', action='store_true',
help='load openai weights into our model. Use `--load` '
'to specify weights path to be loaded')
return parser
def add_text_generate_args(parser):
group = parser.add_argument_group('Text generation', 'configurations')
group.add_argument("--temperature", type=float, default=1.0)
group.add_argument("--top_p", type=float, default=0.0)
group.add_argument("--top_k", type=int, default=0)
group.add_argument("--out-seq-length", type=int, default=256)
return parser
def add_data_args(parser):
group = parser.add_argument_group('data', 'data configurations')
group.add_argument('--data_dir', type=str, required=True,
help="Training data dir")
group.add_argument('--mmap-warmup', action='store_true',
help='Warm up mmap files.')
group.add_argument('--model-parallel-size', type=int, default=1,
help='size of the model parallel.')
group.add_argument('--shuffle', action='store_true',
help='Shuffle data. Shuffling is deterministic '
'based on seed and current epoch.')
group.add_argument('--use-npy-data-loader', action='store_true',
help='Use the numpy data loader. If set, then'
'train-data-path, val-data-path, and test-data-path'
'should also be provided.')
group.add_argument('--num-workers', type=int, default=2,
help="""Number of workers to use for dataloading""")
group.add_argument('--tokenizer-model-type', type=str,
default='bert-large-uncased',
help="Model type to use for sentencepiece tokenization \
(one of ['bpe', 'char', 'unigram', 'word']) or \
bert vocab to use for BertWordPieceTokenizer (one of \
['bert-large-uncased', 'bert-large-cased', etc.])")
group.add_argument('--tokenizer-path', type=str, default='tokenizer.model',
help='path used to save/load sentencepiece tokenization '
'models')
group.add_argument('--tokenizer-type', type=str,
default='BertWordPieceTokenizer',
choices=['CharacterLevelTokenizer',
'SentencePieceTokenizer',
'BertWordPieceTokenizer',
'GPT2BPETokenizer'],
help='what type of tokenizer to use')
group.add_argument("--cache-dir", default=None, type=str,
help="Where to store pre-trained BERT downloads")
group.add_argument('--use-tfrecords', action='store_true',
help='load `--train-data`, `--valid-data`, '
'`--test-data` from BERT tf records instead of '
'normal data pipeline')
group.add_argument('--seq-length', type=int, default=512,
help="Maximum sequence length to process")
group.add_argument('--max-preds-per-seq', type=int, default=None,
help='Maximum number of predictions to use per sequence.'
'Defaults to math.ceil(`--seq-length`*.15/10)*10.'
'MUST BE SPECIFIED IF `--use-tfrecords` is True.')
return parser
def get_args():
parser = argparse.ArgumentParser(description='PyTorch BERT Model')
parser = add_model_config_args(parser)
parser = add_fp16_config_args(parser)
parser = add_training_args(parser)
parser = add_evaluation_args(parser)
parser = add_text_generate_args(parser)
parser = add_data_args(parser)
parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args()
if not args.data_dir:
print('WARNING: No data specified')
args.cuda = torch.cuda.is_available()
args.rank = int(os.getenv('RANK', '0'))
args.world_size = int(os.getenv("WORLD_SIZE", '1'))
if os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'):
local_rank = int(os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'))
local_size = int(os.getenv('OMPI_COMM_WORLD_LOCAL_SIZE'))
num_nodes = int(os.getenv('SLURM_JOB_NUM_NODES', '1'))
nodeid = int(os.getenv('SLURM_NODEID', '0'))
args.local_rank = local_rank
args.rank = nodeid*local_size + local_rank
args.world_size = num_nodes*local_size
args.model_parallel_size = min(args.model_parallel_size, args.world_size)
if args.rank == 0:
print('using world size: {} and model-parallel size: {} '.format(
args.world_size, args.model_parallel_size))
args.dynamic_loss_scale = False
if args.loss_scale is None:
args.dynamic_loss_scale = True
if args.rank == 0:
print(' > using dynamic loss scaling')
if not args.fp16:
args.fp32_embedding = False
args.fp32_tokentypes = False
args.fp32_layernorm = False
return args
| true | true |
f72cfd482c15f282554b38514a3e096adee885e0 | 22,294 | py | Python | configs/video_detect.py | me714/Dwin_Transformer | 825a63869c46db4ef83ccc31d479bbd971ffd47c | [
"Apache-2.0"
] | null | null | null | configs/video_detect.py | me714/Dwin_Transformer | 825a63869c46db4ef83ccc31d479bbd971ffd47c | [
"Apache-2.0"
] | null | null | null | configs/video_detect.py | me714/Dwin_Transformer | 825a63869c46db4ef83ccc31d479bbd971ffd47c | [
"Apache-2.0"
] | 1 | 2022-03-15T06:21:57.000Z | 2022-03-15T06:21:57.000Z | import argparse
import math
import os
import shutil
import time
import numpy as np
from pathlib import Path
from ensemble_boxes import *
import copy
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
import matplotlib.pyplot as plt
from itertools import combinations
import random
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import (
check_img_size, non_max_suppression, apply_classifier, scale_coords,
xyxy2xywh, xywh2xyxy, plot_one_box, strip_optimizer, set_logging)
from utils.torch_utils import select_device, load_classifier, time_synchronized
from mmdet.apis import init_detector, inference_detector
fcap = cv2.VideoCapture('/root/Swin-Transformer-Object-Detection/demo/VID_20210909_164000.mp4')
data_root = '/root/Swin-Transformer-Object-Detection/'
config_file = data_root + 'configs/swin.py'
checkpoint_file = data_root + '2021_7_28/epoch_50.pth'
# build the model from a config file and a checkpoint file
swin_model = init_detector(config_file, checkpoint_file, device='cuda:0')
framerate = 10
def get_image(fcap, framerate):
c = 1
while True:
ret, frame = fcap.read()
if ret:
if (c % framerate == 0):
cv2.imwrite(data_root + 'demo/video_frame/' + str(c) + '.jpg', frame)
c += 1
cv2.waitKey(0)
else:
print('the task is end')
break
fcap.release()
def filterbox_iou(rec1, rec2):
"""
computing IoU
:param rec1: (y0, x0, y1, x1), which reflects
(top, left, bottom, right)
:param rec2: (y0, x0, y1, x1)
:return: scala value of IoU
"""
# computing area of each rectangles
S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])
S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])
# computing the sum_area
sum_area = S_rec1 + S_rec2
# find the each edge of intersect rectangle
left_line = max(rec1[1], rec2[1])
right_line = min(rec1[3], rec2[3])
top_line = max(rec1[0], rec2[0])
bottom_line = min(rec1[2], rec2[2])
# judge if there is an intersect
if left_line >= right_line or top_line >= bottom_line:
return 0
else:
intersect = (right_line - left_line) * (bottom_line - top_line)
return (intersect / (sum_area - intersect)) * 1.0
def detect(save_img=False):
out, source, weights, view_img, save_txt, imgsz = \
opt.save_dir, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
webcam = source.isnumeric() or source.startswith(('rtsp://', 'rtmp://', 'http://')) or source.endswith('.txt')
# Initialize
set_logging()
device = select_device(opt.device)
if os.path.exists(out): # output dir
shutil.rmtree(out) # delete dir
os.makedirs(out) # make new dir
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
if half:
model.half() # to FP16
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']) # load weights
modelc.to(device).eval()
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = True
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz)
else:
save_img = True
dataset = LoadImages(source, img_size=imgsz)
# Get names and colors
names = model.module.names if hasattr(model, 'module') else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
# Run inference
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
f_detect = 0
counting_img = 0
full_detect = 0
full_truth = 0
img_dict = {}
frame_key = 0
dict2 = {}
for path, img, im0s, vid_cap in dataset:
img_before = img
img = torch.from_numpy(img).to(device)
# img_before = img
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
# Apply NMS
nms_pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=1,
agnostic=opt.agnostic_nms)
# nms_pred = cross_class_nms(nms_pred, opt.conf_thres, 0.9, agnostic=opt.agnostic_nms)
t2 = time_synchronized()
# Process detections
for i, det in enumerate(nms_pred): # detections per image
print(det)
dict1 = {'total': 0}
if webcam: # batch_size >= 1
p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
else:
p, s, im0 = path, '', im0s
save_path = str(Path(out) / Path(p).name)
txt_path = str(Path(out) / Path(p).stem) + ('_%g' % dataset.frame if dataset.mode == 'video' else '')
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
swin_img = cv2.imread(p)
result = inference_detector(swin_model, swin_img)
swin_bbox_list, swin_score_list, swin_label_list = swin_model.show_result(swin_img, result,
out_file=save_path)
yolo_bbox_list = det[:, 0:4].cpu().detach().numpy().tolist()
yolo_score_list = det[:, 4].cpu().detach().numpy().tolist()
yolo_label_list = det[:, 5].cpu().detach().numpy().tolist()
swin_list = ['txd', 'jgc', 'xbs', 'wbs', 'c-pg', 'lwz', 'tc', 'a-pg', 'b-pg', 'g-pg', 'z-pg', 'bbt', 'lxb',
'xgg', 'lsd', 'wt']
yolo_list = ['wt', 'jgc', 'lsd', 'lxb', 'bbt', 'xgg', 'txd', 'lwz', 'tc', 'xbs', 'wbs', 'a-pg', 'b-pg',
'c-pg', 'g-pg', 'z-pg']
swin_trueLabel_list = []
for i in swin_label_list:
swin_trueLabel_list.append(yolo_list.index(swin_list[i]))
# NMS for different class, high thresh
# nms_bbox, nms_score, nms_label = yolo_bbox_list, yolo_score_list, yolo_label_list
# nms_bbox, nms_score, nms_label = torch.from_numpy(np.array(nms_bbox)).reshape(-1, 4), torch.from_numpy(
# np.array(nms_score)).reshape(-1, 1), torch.from_numpy(np.array(nms_label)).reshape(-1, 1)
# two_det = torch.cat((torch.cat((nms_bbox, nms_score), 1), nms_label), 1)
# normalize
# 需要将框进行归一化操作
# for i, single in enumerate(swin_bbox_list):
# swin_bbox_list[i] = [single[0] / 640, single[1] / 480, single[2] / 640, single[3] / 480]
#
# for i, single in enumerate(yolo_bbox_list):
# yolo_bbox_list[i] = [single[0] / 640, single[1] / 480, single[2] / 640, single[3] / 480]
swin_object = [0, 1, 2, 3, 6, 7, 8, 9, 10] # from yolo_list:wt lsd lwz tc xbs wbs
# yolo_list = ['0wt', 'jgc', '2lsd', 'lxb', '4bbt', 'xgg', '6txd', 'lwz', '8tc', 'xbs', '10wbs', 'a-pg', '12b-pg',
# 'c-pg', '14g-pg', 'z-pg']
yolo_label_list_copy = yolo_label_list.copy()
swin_trueLabel_list_copy = swin_trueLabel_list.copy()
for i in yolo_label_list_copy:
if i in swin_object:
index1 = yolo_label_list.index(i)
del yolo_bbox_list[index1]
del yolo_score_list[index1]
del yolo_label_list[index1]
# label_filter = [4, 5, 11, 12, 13, 14, 15]
# filer_box = {}
# filter_list = []
# filter_label_list = []
# for i in range(len(yolo_label_list)):
# if yolo_label_list_copy[i] in label_filter:
# filter_list.append(i)
# filter_label_list.append(yolo_label_list_copy[i])
# yolo_bbox_list_copy = yolo_bbox_list
# yolo_score_list_copy = yolo_score_list
#
#
# for pair in combinations(filter_list, 2):
# box1 = yolo_bbox_list_copy[pair[0]]
# box2 = yolo_bbox_list_copy[pair[1]]
# b_iou = filterbox_iou(box1, box2)
# if b_iou >= 0.9:
# if box1 in yolo_bbox_list and box2 in yolo_bbox_list:
# index_0 = yolo_bbox_list.index(box1)
# index_1 = yolo_bbox_list.index(box2)
# index = index_0 if yolo_score_list[pair[0]] <= yolo_score_list[pair[1]] else index_1
# del yolo_bbox_list[index]
# del yolo_score_list[index]
# del yolo_label_list[index]
for i in swin_trueLabel_list_copy:
if i not in swin_object:
index2 = swin_trueLabel_list.index(i)
del swin_bbox_list[index2]
del swin_score_list[index2]
del swin_trueLabel_list[index2]
two_bbox, two_score, two_label = copy.deepcopy(swin_bbox_list), copy.deepcopy(swin_score_list), copy.deepcopy(swin_trueLabel_list)
for i in range(len(yolo_bbox_list)):
two_bbox.append(yolo_bbox_list[i])
two_score.append(yolo_score_list[i])
two_label.append(yolo_label_list[i])
two_bbox, two_score, two_label = torch.from_numpy(np.array(two_bbox)).reshape(-1, 4), torch.from_numpy(
np.array(two_score)).reshape(-1, 1), torch.from_numpy(np.array(two_label)).reshape(-1, 1)
yolo_bbox_list, yolo_score_list, yolo_label_list = torch.from_numpy(np.array(yolo_bbox_list)).reshape(-1,
4), torch.from_numpy(
np.array(yolo_score_list)).reshape(-1, 1), torch.from_numpy(np.array(yolo_label_list)).reshape(-1, 1)
swin_bbox_list, swin_score_list, swin_trueLabel_list = torch.from_numpy(np.array(swin_bbox_list)).reshape(
-1,
4), torch.from_numpy(
np.array(swin_score_list)).reshape(-1, 1), torch.from_numpy(np.array(swin_trueLabel_list)).reshape(-1,
1)
# det = torch.cat((torch.cat((swin_bbox_list, swin_score_list), 1), swin_trueLabel_list), 1) # only show swin_model inference result
# det = torch.cat((torch.cat((yolo_bbox_list, yolo_score_list), 1), yolo_label_list),1) # only show yolo_model inference result
det = torch.cat((torch.cat((two_bbox, two_score), 1), two_label), 1) # show two_model inference result
# bbox_list = [swin_bbox_list, yolo_bbox_list]
# score_list = [swin_score_list, yolo_score_list]
# label_list = [swin_trueLabel_list, yolo_label_list]
#
# wbf_weight = [1, 1]
# iou_thr = 0.55
# skip_box_thr = 0.0001
#
# boxes, scores, labels = weighted_boxes_fusion(bbox_list, score_list, label_list, weights=wbf_weight,
# iou_thr=iou_thr, skip_box_thr=skip_box_thr)
# for in_file in boxes:
# in_file[0], in_file[1], in_file[2], in_file[3] = int(in_file[0] * 640), int(in_file[1] * 480), int(
# in_file[2] * 640), int(in_file[3] * 480)
# boxes, scores, labels = boxes.reshape(-1, 4), scores.reshape(-1, 1), labels.reshape(-1, 1)
# boxes, scores, labels = torch.from_numpy(boxes), torch.from_numpy(scores), torch.from_numpy(labels)
# det2model = torch.cat((torch.cat((boxes, scores), 1), labels), 1)
# det = det2model
if det is not None and len(det):
numers = len(det)
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += '%g %ss, ' % (n, names[int(c)]) # add to string
# Write results 包围框、置信度、种类
for *xyxy, conf, cls in reversed(det):
if dict1.__contains__(cls):
dict1[cls] = dict1[cls] + 1
dict1['total'] = dict1['total'] + 1
else:
dict1[cls] = 0
dict1['total'] = dict1['total'] + 1
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, conf, *xywh) if opt.save_conf else (cls, *xywh) # label format
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line) + '\n') % line)
if save_img or view_img: # Add bbox to image
label = '%s %.2f' % (names[int(cls)], conf)
img1 = im0.copy()
# if cv2.waitKey(1)==32:
# count = 0
# for filename in os.listdir('new_image/'):
# if filename.endswith('.jpg'):
# count += 1
# # print(count)
# print(f"保存第{count + 1}张图片")
# # 保存图像,保存到上一层的imgs文件夹内,以1、2、3、4...为文件名保存图像
# cv2.imwrite('new_image/{}.jpg'.format(count + 1), img1)
# plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=0.5) # 线的粗细
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=2) # 线的粗细
# print(f"\n{names[int(cls)]}的包围框坐标是{int(xyxy[0]),int(xyxy[1]),int(xyxy[2]),int(xyxy[3])}")
# print(f"\n{names[int(cls)]}的中心坐标是{(int(xyxy[0])+int(xyxy[2]))/2, (int(xyxy[1])+int(xyxy[3]))/2}")
# Print time (inference + NMS)
# print('%sDone. (%.3fs)' % (s, t2 - t1))
print(f"{s}")
print(f"s")
# 打印坐标、种类
# print('%s' % (names[int(cls)]))
# Stream results
# view_img = True
if view_img:
cv2.imshow(p, im0)
if cv2.waitKey(1) == ord('q'): # q to quit
raise StopIteration
# Save results (image with detections)
if save_img:
if dataset.mode == 'images':
txt = f".numers={len(det)}"
cv2.putText(im0, txt,
(50, 100),
cv2.FONT_HERSHEY_SIMPLEX, 1.2, (34, 157, 255), 2)
cv2.imwrite(save_path, im0)
else:
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
fourcc = 'mp4v' # output video codec
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
vid_writer.write(im0)
im_after = im0
img_dict[frame_key] = dict1
frame_key += 1
detected = len(det)
img_category = save_path.split('/')[-1].split('_')[0]
if img_category == 'body':
true = 17
elif img_category =='op':
true = 12
else:
true = 29
root_path = '/root/results/'
if detected == true:
plt.figure()
plt.subplot(1, 3, 1)
plt.title('original image', size=10)
plt.axis([0, 640, 0, 480])
plt.xticks([])
plt.yticks([])
plt.imshow(img_before.transpose(1, 2, 0))
plt.subplot(1, 3, 2)
plt.title('detected image', size=10)
plt.axis([0, 640, 0, 480])
plt.xticks([])
plt.yticks([])
plt.imshow(im_after)
plt.text(700, 300, f"Original:{true}", size=10)
plt.text(700, 100, f"Detected:{detected}", size=10)
# plt.text(700, 100, f"Average confidence:{conf}%")
plt.savefig(root_path + f'{img_category}_{counting_img}.jpg', bbox_inches='tight', pad_inches=0.1,
dpi=800)
counting_img += 1
full_detect += detected
full_truth += true
elif detected != true and f_detect <= 7 and random.uniform(0, 1) > 0.65:
plt.figure()
plt.subplot(1, 3, 1)
plt.title(f'original image', size=10)
plt.axis([0, 640, 0, 480])
plt.xticks([])
plt.yticks([])
plt.imshow(img_before.transpose(1, 2, 0))
plt.subplot(1, 3, 2)
plt.title(f'detected image', size=10)
plt.axis([0, 640, 0, 480])
plt.xticks([])
plt.yticks([])
plt.imshow(im_after)
plt.text(700, 300, f"Original:{true}", size=10)
plt.text(700, 100, f"Detected:{detected}", size=10)
plt.savefig(root_path + f'{img_category}_{counting_img}.jpg', bbox_inches='tight', pad_inches=0.1,
dpi=800)
counting_img += 1
f_detect+=1
full_detect += detected
full_truth += true
else:
# print('wrong-------', save_path)
pass
# plt.show()
# plt.figure()
# plt.axis([0, 640, 0, 480])
# plt.text(700, 300, f"Origina:{count_acc}%")
# plt.text(700, 200, f"Detected:{classify_acc}%")
# plt.text(700, 100, f"Average confidence:{conf}%")
# break
if save_txt or save_img:
print('Results saved to %s' % Path(out))
full_time = time.time() - t0
print('Done. (%.3fs)' % full_time)
merege = math.ceil(full_detect/frame_key)
for i in img_dict:
if img_dict[i]['total'] == merege:
dict2 = img_dict[i]
plt.figure()
plt.xticks([])
plt.yticks([])
plt.axis([0, 640, 0, 680])
plt.text(50, 620, f"Calming detection report:{dict2}", color='blue', size=5)
plt.text(50, 520, f"Calming detection report", color='blue', size=10)
plt.text(50, 420, f"the detect: {merege}", color='blue', size=10)
plt.text(50, 320, f"All equipment Detected: {full_detect}", size=10)
plt.text(50, 220, f"All equipment manually counted: {full_truth}", size=10)
plt.text(50, 120, f"Counting Accuracy: %.2f" % (full_detect*100/full_truth) + '%', size=10)
plt.text(50, 40, f"Average time: %.2f" % (full_time/counting_img) + " s", size=10)
print('dfddddddddddddddddddddddddddddddddddddddddd')
plt.savefig('/root/Downloads/report.jpg')
if __name__ == '__main__':
get_image(fcap,framerate)
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='super_yolo.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='/root/Swin-Transformer-Object-Detection/demo/video_frame', help='source') # file/folder, 0 for webcam
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.85, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-dir', type=str, default='/root/Calming_final_test/results', help='directory to save results')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
opt = parser.parse_args()
print(opt)
with torch.no_grad():
if opt.update: # update all models (to fix SourceChangeWarning)
for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
detect()
strip_optimizer(opt.weights)
else:
detect()
| 44.322068 | 157 | 0.540056 | import argparse
import math
import os
import shutil
import time
import numpy as np
from pathlib import Path
from ensemble_boxes import *
import copy
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
import matplotlib.pyplot as plt
from itertools import combinations
import random
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import (
check_img_size, non_max_suppression, apply_classifier, scale_coords,
xyxy2xywh, xywh2xyxy, plot_one_box, strip_optimizer, set_logging)
from utils.torch_utils import select_device, load_classifier, time_synchronized
from mmdet.apis import init_detector, inference_detector
fcap = cv2.VideoCapture('/root/Swin-Transformer-Object-Detection/demo/VID_20210909_164000.mp4')
data_root = '/root/Swin-Transformer-Object-Detection/'
config_file = data_root + 'configs/swin.py'
checkpoint_file = data_root + '2021_7_28/epoch_50.pth'
swin_model = init_detector(config_file, checkpoint_file, device='cuda:0')
framerate = 10
def get_image(fcap, framerate):
c = 1
while True:
ret, frame = fcap.read()
if ret:
if (c % framerate == 0):
cv2.imwrite(data_root + 'demo/video_frame/' + str(c) + '.jpg', frame)
c += 1
cv2.waitKey(0)
else:
print('the task is end')
break
fcap.release()
def filterbox_iou(rec1, rec2):
S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])
S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])
sum_area = S_rec1 + S_rec2
left_line = max(rec1[1], rec2[1])
right_line = min(rec1[3], rec2[3])
top_line = max(rec1[0], rec2[0])
bottom_line = min(rec1[2], rec2[2])
if left_line >= right_line or top_line >= bottom_line:
return 0
else:
intersect = (right_line - left_line) * (bottom_line - top_line)
return (intersect / (sum_area - intersect)) * 1.0
def detect(save_img=False):
out, source, weights, view_img, save_txt, imgsz = \
opt.save_dir, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
webcam = source.isnumeric() or source.startswith(('rtsp://', 'rtmp://', 'http://')) or source.endswith('.txt')
set_logging()
device = select_device(opt.device)
if os.path.exists(out):
shutil.rmtree(out)
os.makedirs(out)
half = device.type != 'cpu'
model = attempt_load(weights, map_location=device)
imgsz = check_img_size(imgsz, s=model.stride.max())
if half:
model.half()
classify = False
if classify:
modelc = load_classifier(name='resnet101', n=2)
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model'])
modelc.to(device).eval()
vid_path, vid_writer = None, None
if webcam:
view_img = True
cudnn.benchmark = True
dataset = LoadStreams(source, img_size=imgsz)
else:
save_img = True
dataset = LoadImages(source, img_size=imgsz)
names = model.module.names if hasattr(model, 'module') else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device)
_ = model(img.half() if half else img) if device.type != 'cpu' else None
f_detect = 0
counting_img = 0
full_detect = 0
full_truth = 0
img_dict = {}
frame_key = 0
dict2 = {}
for path, img, im0s, vid_cap in dataset:
img_before = img
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float()
img /= 255.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
nms_pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=1,
agnostic=opt.agnostic_nms)
t2 = time_synchronized()
for i, det in enumerate(nms_pred):
print(det)
dict1 = {'total': 0}
if webcam:
p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
else:
p, s, im0 = path, '', im0s
save_path = str(Path(out) / Path(p).name)
txt_path = str(Path(out) / Path(p).stem) + ('_%g' % dataset.frame if dataset.mode == 'video' else '')
s += '%gx%g ' % img.shape[2:]
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]
swin_img = cv2.imread(p)
result = inference_detector(swin_model, swin_img)
swin_bbox_list, swin_score_list, swin_label_list = swin_model.show_result(swin_img, result,
out_file=save_path)
yolo_bbox_list = det[:, 0:4].cpu().detach().numpy().tolist()
yolo_score_list = det[:, 4].cpu().detach().numpy().tolist()
yolo_label_list = det[:, 5].cpu().detach().numpy().tolist()
swin_list = ['txd', 'jgc', 'xbs', 'wbs', 'c-pg', 'lwz', 'tc', 'a-pg', 'b-pg', 'g-pg', 'z-pg', 'bbt', 'lxb',
'xgg', 'lsd', 'wt']
yolo_list = ['wt', 'jgc', 'lsd', 'lxb', 'bbt', 'xgg', 'txd', 'lwz', 'tc', 'xbs', 'wbs', 'a-pg', 'b-pg',
'c-pg', 'g-pg', 'z-pg']
swin_trueLabel_list = []
for i in swin_label_list:
swin_trueLabel_list.append(yolo_list.index(swin_list[i]))
swin_object = [0, 1, 2, 3, 6, 7, 8, 9, 10]
yolo_label_list_copy = yolo_label_list.copy()
swin_trueLabel_list_copy = swin_trueLabel_list.copy()
for i in yolo_label_list_copy:
if i in swin_object:
index1 = yolo_label_list.index(i)
del yolo_bbox_list[index1]
del yolo_score_list[index1]
del yolo_label_list[index1]
for i in swin_trueLabel_list_copy:
if i not in swin_object:
index2 = swin_trueLabel_list.index(i)
del swin_bbox_list[index2]
del swin_score_list[index2]
del swin_trueLabel_list[index2]
two_bbox, two_score, two_label = copy.deepcopy(swin_bbox_list), copy.deepcopy(swin_score_list), copy.deepcopy(swin_trueLabel_list)
for i in range(len(yolo_bbox_list)):
two_bbox.append(yolo_bbox_list[i])
two_score.append(yolo_score_list[i])
two_label.append(yolo_label_list[i])
two_bbox, two_score, two_label = torch.from_numpy(np.array(two_bbox)).reshape(-1, 4), torch.from_numpy(
np.array(two_score)).reshape(-1, 1), torch.from_numpy(np.array(two_label)).reshape(-1, 1)
yolo_bbox_list, yolo_score_list, yolo_label_list = torch.from_numpy(np.array(yolo_bbox_list)).reshape(-1,
4), torch.from_numpy(
np.array(yolo_score_list)).reshape(-1, 1), torch.from_numpy(np.array(yolo_label_list)).reshape(-1, 1)
swin_bbox_list, swin_score_list, swin_trueLabel_list = torch.from_numpy(np.array(swin_bbox_list)).reshape(
-1,
4), torch.from_numpy(
np.array(swin_score_list)).reshape(-1, 1), torch.from_numpy(np.array(swin_trueLabel_list)).reshape(-1,
1)
, two_label), 1)
if det is not None and len(det):
numers = len(det)
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum()
s += '%g %ss, ' % (n, names[int(c)])
for *xyxy, conf, cls in reversed(det):
if dict1.__contains__(cls):
dict1[cls] = dict1[cls] + 1
dict1['total'] = dict1['total'] + 1
else:
dict1[cls] = 0
dict1['total'] = dict1['total'] + 1
if save_txt:
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()
line = (cls, conf, *xywh) if opt.save_conf else (cls, *xywh)
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line) + '\n') % line)
if save_img or view_img:
label = '%s %.2f' % (names[int(cls)], conf)
img1 = im0.copy()
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=2)
print(f"{s}")
print(f"s")
if view_img:
cv2.imshow(p, im0)
if cv2.waitKey(1) == ord('q'):
raise StopIteration
if save_img:
if dataset.mode == 'images':
txt = f".numers={len(det)}"
cv2.putText(im0, txt,
(50, 100),
cv2.FONT_HERSHEY_SIMPLEX, 1.2, (34, 157, 255), 2)
cv2.imwrite(save_path, im0)
else:
if vid_path != save_path:
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release()
fourcc = 'mp4v'
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
vid_writer.write(im0)
im_after = im0
img_dict[frame_key] = dict1
frame_key += 1
detected = len(det)
img_category = save_path.split('/')[-1].split('_')[0]
if img_category == 'body':
true = 17
elif img_category =='op':
true = 12
else:
true = 29
root_path = '/root/results/'
if detected == true:
plt.figure()
plt.subplot(1, 3, 1)
plt.title('original image', size=10)
plt.axis([0, 640, 0, 480])
plt.xticks([])
plt.yticks([])
plt.imshow(img_before.transpose(1, 2, 0))
plt.subplot(1, 3, 2)
plt.title('detected image', size=10)
plt.axis([0, 640, 0, 480])
plt.xticks([])
plt.yticks([])
plt.imshow(im_after)
plt.text(700, 300, f"Original:{true}", size=10)
plt.text(700, 100, f"Detected:{detected}", size=10)
plt.savefig(root_path + f'{img_category}_{counting_img}.jpg', bbox_inches='tight', pad_inches=0.1,
dpi=800)
counting_img += 1
full_detect += detected
full_truth += true
elif detected != true and f_detect <= 7 and random.uniform(0, 1) > 0.65:
plt.figure()
plt.subplot(1, 3, 1)
plt.title(f'original image', size=10)
plt.axis([0, 640, 0, 480])
plt.xticks([])
plt.yticks([])
plt.imshow(img_before.transpose(1, 2, 0))
plt.subplot(1, 3, 2)
plt.title(f'detected image', size=10)
plt.axis([0, 640, 0, 480])
plt.xticks([])
plt.yticks([])
plt.imshow(im_after)
plt.text(700, 300, f"Original:{true}", size=10)
plt.text(700, 100, f"Detected:{detected}", size=10)
plt.savefig(root_path + f'{img_category}_{counting_img}.jpg', bbox_inches='tight', pad_inches=0.1,
dpi=800)
counting_img += 1
f_detect+=1
full_detect += detected
full_truth += true
else:
pass
if save_txt or save_img:
print('Results saved to %s' % Path(out))
full_time = time.time() - t0
print('Done. (%.3fs)' % full_time)
merege = math.ceil(full_detect/frame_key)
for i in img_dict:
if img_dict[i]['total'] == merege:
dict2 = img_dict[i]
plt.figure()
plt.xticks([])
plt.yticks([])
plt.axis([0, 640, 0, 680])
plt.text(50, 620, f"Calming detection report:{dict2}", color='blue', size=5)
plt.text(50, 520, f"Calming detection report", color='blue', size=10)
plt.text(50, 420, f"the detect: {merege}", color='blue', size=10)
plt.text(50, 320, f"All equipment Detected: {full_detect}", size=10)
plt.text(50, 220, f"All equipment manually counted: {full_truth}", size=10)
plt.text(50, 120, f"Counting Accuracy: %.2f" % (full_detect*100/full_truth) + '%', size=10)
plt.text(50, 40, f"Average time: %.2f" % (full_time/counting_img) + " s", size=10)
print('dfddddddddddddddddddddddddddddddddddddddddd')
plt.savefig('/root/Downloads/report.jpg')
if __name__ == '__main__':
get_image(fcap,framerate)
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='super_yolo.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='/root/Swin-Transformer-Object-Detection/demo/video_frame', help='source')
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.85, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-dir', type=str, default='/root/Calming_final_test/results', help='directory to save results')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
opt = parser.parse_args()
print(opt)
with torch.no_grad():
if opt.update:
for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
detect()
strip_optimizer(opt.weights)
else:
detect()
| true | true |
f72cfd7e4073d6621ae92411769b73ecd011c187 | 3,768 | py | Python | api/vk_methods.py | greenjew/deeploma | 499de7ad844546acf0760aa00096d66216fd3ee9 | [
"MIT"
] | null | null | null | api/vk_methods.py | greenjew/deeploma | 499de7ad844546acf0760aa00096d66216fd3ee9 | [
"MIT"
] | null | null | null | api/vk_methods.py | greenjew/deeploma | 499de7ad844546acf0760aa00096d66216fd3ee9 | [
"MIT"
] | 1 | 2020-07-08T16:26:18.000Z | 2020-07-08T16:26:18.000Z | import requests as r
import pandas as pd
import time
from datetime import datetime
import re
TOKEN_VK = '23acc95023acc95023acc9504023c092a1223ac23acc9507ef4dc240205bcafea27244d' # vk service token
version = 5.101
def get_members(group_id):
try_count = 0
while try_count < 2:
try:
response = r.get('https://api.vk.com/method/groups.getById',
params={
'v': version,
'access_token': TOKEN_VK,
'group_ids': group_id,
'fields': 'members_count'
})
return response.json()['response'][0]['members_count']
except:
try_count += 1
time.sleep(0.06)
def cleanText(raw_text):
cleanr = re.compile('<.*?>|(\[.*?\|)|\]')
cleantext = re.sub(cleanr, '', raw_text)
return cleantext
def load_from_vk(group_id, date_from, date_to):
headers = ['group_name', 'members', 'post_date', 'post_link', 'text', 'views', 'likes', 'reposts', 'comments']
posts_in_group = []
offset = 0
members = get_members(group_id)
date_ok = True
last_try = 0
# Выгружаем посты на стенке, пока не выйдем за "левую" дату
while date_ok or last_try <= 1:
res = r.get('https://api.vk.com/method/wall.get',
params={
'v': version,
'access_token': TOKEN_VK,
'domain': group_id,
'offset': offset,
'count': '100',
'extended': '1',
'fields': 'name'
})
try:
response = res.json()['response']
except:
if res.json()['error']['error_code'] != 0:
raise Exception(group_id, 'channel_not_found')
if response['count'] == 0: # если в выгрузке пусто, переходим к следующей группе
date_ok = False
last_try = 2
continue
# считаем посты удовлетворяющие условию по датам
all_posts = response['items']
group_name = response['groups'][0]['name']
if all(datetime.fromtimestamp(post['date']).date() < date_from
for post in all_posts):
date_ok = False
last_try += 1
else:
for post in all_posts:
post_info = []
post_date = datetime.fromtimestamp(post['date'])
if date_from < post_date.date() < date_to:
print(post_date)
post_link = 'https://vk.com/wall' + str(post['owner_id']) + '_' + str(post['id'])
post_text = cleanText(post['text'])
post_info.append((group_name, members, post_date, post_link, post_text,
post['views']['count'], post['likes']['count'], post['reposts']['count'],
post['comments']['count']))
posts_in_group.extend(post_info)
offset += len(all_posts)
time.sleep(0.06)
posts_data = pd.DataFrame(posts_in_group, columns=headers)
mean_ = int(posts_data.groupby(posts_data['post_date'].dt.to_period('d')).mean()['views'].mean())
std_ = int(posts_data.groupby(posts_data['post_date'].dt.to_period('d')).std()['views'].mean())
def three_sigma_anomaly(views):
ano_cut_off = 3 * std_
upper_cut = mean_ + ano_cut_off
if views > upper_cut:
return 'Да'
else:
return 'Нет'
anomalies = posts_data.views.apply(three_sigma_anomaly)
posts_data['is_anomaly'] = anomalies
return posts_data | 35.885714 | 114 | 0.522293 | import requests as r
import pandas as pd
import time
from datetime import datetime
import re
TOKEN_VK = '23acc95023acc95023acc9504023c092a1223ac23acc9507ef4dc240205bcafea27244d'
version = 5.101
def get_members(group_id):
try_count = 0
while try_count < 2:
try:
response = r.get('https://api.vk.com/method/groups.getById',
params={
'v': version,
'access_token': TOKEN_VK,
'group_ids': group_id,
'fields': 'members_count'
})
return response.json()['response'][0]['members_count']
except:
try_count += 1
time.sleep(0.06)
def cleanText(raw_text):
cleanr = re.compile('<.*?>|(\[.*?\|)|\]')
cleantext = re.sub(cleanr, '', raw_text)
return cleantext
def load_from_vk(group_id, date_from, date_to):
headers = ['group_name', 'members', 'post_date', 'post_link', 'text', 'views', 'likes', 'reposts', 'comments']
posts_in_group = []
offset = 0
members = get_members(group_id)
date_ok = True
last_try = 0
while date_ok or last_try <= 1:
res = r.get('https://api.vk.com/method/wall.get',
params={
'v': version,
'access_token': TOKEN_VK,
'domain': group_id,
'offset': offset,
'count': '100',
'extended': '1',
'fields': 'name'
})
try:
response = res.json()['response']
except:
if res.json()['error']['error_code'] != 0:
raise Exception(group_id, 'channel_not_found')
if response['count'] == 0:
date_ok = False
last_try = 2
continue
all_posts = response['items']
group_name = response['groups'][0]['name']
if all(datetime.fromtimestamp(post['date']).date() < date_from
for post in all_posts):
date_ok = False
last_try += 1
else:
for post in all_posts:
post_info = []
post_date = datetime.fromtimestamp(post['date'])
if date_from < post_date.date() < date_to:
print(post_date)
post_link = 'https://vk.com/wall' + str(post['owner_id']) + '_' + str(post['id'])
post_text = cleanText(post['text'])
post_info.append((group_name, members, post_date, post_link, post_text,
post['views']['count'], post['likes']['count'], post['reposts']['count'],
post['comments']['count']))
posts_in_group.extend(post_info)
offset += len(all_posts)
time.sleep(0.06)
posts_data = pd.DataFrame(posts_in_group, columns=headers)
mean_ = int(posts_data.groupby(posts_data['post_date'].dt.to_period('d')).mean()['views'].mean())
std_ = int(posts_data.groupby(posts_data['post_date'].dt.to_period('d')).std()['views'].mean())
def three_sigma_anomaly(views):
ano_cut_off = 3 * std_
upper_cut = mean_ + ano_cut_off
if views > upper_cut:
return 'Да'
else:
return 'Нет'
anomalies = posts_data.views.apply(three_sigma_anomaly)
posts_data['is_anomaly'] = anomalies
return posts_data | true | true |
f72cfe472a8204ac2f26dd570050027f127c9500 | 956 | py | Python | examples/OGLE-BLG-ECL-234840/plot_v8.py | NewCPM/MCPM | 9fb9b7725ccc4452701be47d103ab61f81b4595b | [
"MIT"
] | 2 | 2018-04-10T22:35:11.000Z | 2018-05-16T21:00:40.000Z | examples/OGLE-BLG-ECL-234840/plot_v8.py | CPM-project/MCPM | 9fb9b7725ccc4452701be47d103ab61f81b4595b | [
"MIT"
] | null | null | null | examples/OGLE-BLG-ECL-234840/plot_v8.py | CPM-project/MCPM | 9fb9b7725ccc4452701be47d103ab61f81b4595b | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
in_data = "run_6/run_6_e2_phot_prf_limit.dat"
in_model = "run_6/run_6_e2_phot.res"
out_file = "run_6/plot_eb234840_v8.png"
kwargs = {'color': 'red', 'marker': '.', 'ls': 'none'}
x_lim = [7500., 7528.]
y_lim = [-4000., 500.]
kwargs_1 = {'color': 'blue', 'ls': ':', 'lw': 2, 'zorder': 10}
xlabel = 'BJD - 2450000'
ylabel = 'delta flux'
band = np.arange(7500, 7508.0001)
kwargs_band = {'color': 'blue', 'lw': 2, 'zorder': 10}
################
# End of settings
(times, values, errors) = np.loadtxt(in_data, unpack=True)
(times_model, _, _, values_model) = np.loadtxt(in_model, unpack=True)
plt.errorbar(times, values, yerr=errors, **kwargs)
mask = (times_model > band[-1])
plt.plot(times_model[mask], values_model[mask], **kwargs_1)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xlim(x_lim)
plt.ylim(y_lim)
plt.plot(band, band*0., **kwargs_band)
plt.savefig(out_file)
| 23.9 | 69 | 0.676778 | import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
in_data = "run_6/run_6_e2_phot_prf_limit.dat"
in_model = "run_6/run_6_e2_phot.res"
out_file = "run_6/plot_eb234840_v8.png"
kwargs = {'color': 'red', 'marker': '.', 'ls': 'none'}
x_lim = [7500., 7528.]
y_lim = [-4000., 500.]
kwargs_1 = {'color': 'blue', 'ls': ':', 'lw': 2, 'zorder': 10}
xlabel = 'BJD - 2450000'
ylabel = 'delta flux'
band = np.arange(7500, 7508.0001)
kwargs_band = {'color': 'blue', 'lw': 2, 'zorder': 10}
npack=True)
plt.errorbar(times, values, yerr=errors, **kwargs)
mask = (times_model > band[-1])
plt.plot(times_model[mask], values_model[mask], **kwargs_1)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xlim(x_lim)
plt.ylim(y_lim)
plt.plot(band, band*0., **kwargs_band)
plt.savefig(out_file)
| true | true |
f72cfecb9a75e28d76c6235057fe3ad2011e3f3f | 4,092 | py | Python | code/txburstML.py | astrophys/Python_Debugging_Examples | 510b4b6966166dddc14eda3f6813700386d2324f | [
"MIT"
] | null | null | null | code/txburstML.py | astrophys/Python_Debugging_Examples | 510b4b6966166dddc14eda3f6813700386d2324f | [
"MIT"
] | null | null | null | code/txburstML.py | astrophys/Python_Debugging_Examples | 510b4b6966166dddc14eda3f6813700386d2324f | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import argparse
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
from joblib import delayed,Parallel
import os
def whichKeep(est_params):
kon = np.array(est_params)[:,0]
koff = np.array(est_params)[:,1]
ksyn = np.array(est_params)[:,2]
which_kon = ~(kon < 2*1e-3)*~(kon > 1e3 - 1)
which_koff = ~(koff < 2*1e-3)*~(koff > 1e3 - 1)
which_burst = ksyn/koff > 1
which_ksyn = ksyn > 1
which = which_burst*which_koff*which_kon*which_ksyn
return which
def MaximumLikelihood(vals, export_asymp_ci = False, fix = 0, metod = 'L-BFGS-B'):
from scipy.interpolate import interp1d
from scipy.optimize import minimize
from scipy import special
from scipy.stats import poisson,norm
from scipy.special import j_roots
from scipy.special import beta as beta_fun
import numpy as np
if len(vals) == 0:
return np.array([np.nan, np.nan, np.nan])
def dBP(at, alpha, bet, lam):
at.shape = (len(at), 1)
np.repeat(at, 50, axis = 1)
def fun(at, m):
if(max(m) < 1e6):
return(poisson.pmf(at,m))
else:
return(norm.pdf(at,loc=m,scale=sqrt(m)))
x,w = j_roots(50,alpha = bet - 1, beta = alpha - 1)
gs = np.sum(w*fun(at, m = lam*(1+x)/2), axis=1)
prob = 1/beta_fun(alpha, bet)*2**(-alpha-bet+1)*gs
return(prob)
def LogLikelihood(x, vals):
kon = x[0]
koff = x[1]
ksyn = x[2]
return(-np.sum(np.log( dBP(vals,kon,koff,ksyn) + 1e-10) ) )
x0 = MomentInference(vals)
if np.isnan(x0).any() or any(x0 < 0):
x0 = np.array([10,10,10])
bnds = ((1e-3,1e3),(1e-3,1e3), (1, 1e4))
vals_ = np.copy(vals) # Otherwise the structure is violated.
try:
ll = minimize(LogLikelihood, x0, args = (vals_), method=metod, bounds=bnds)
except:
return np.array([np.nan,np.nan,np.nan])
#se = ll.hess_inv.todense().diagonal()
estim = ll.x
return estim
# moment-based inference
def MomentInference(vals, export_moments=False):
# code from Anton Larsson's R implementation
from scipy import stats # needs imports inside function when run in ipyparallel
import numpy as np
m1 = float(np.mean(vals))
m2 = float(sum(vals*(vals - 1))/len(vals))
m3 = float(sum(vals*(vals - 1)*(vals - 2))/len(vals))
# sanity check on input (e.g. need at least on expression level)
if sum(vals) == 0: return np.nan
if m1 == 0: return np.nan
if m2 == 0: return np.nan
r1=m1
r2=m2/m1
r3=m3/m2
if (r1*r2-2*r1*r3 + r2*r3) == 0: return np.nan
if ((r1*r2 - 2*r1*r3 + r2*r3)*(r1-2*r2+r3)) == 0: return np.nan
if (r1 - 2*r2 + r3) == 0: return np.nan
lambda_est = (2*r1*(r3-r2))/(r1*r2-2*r1*r3 + r2*r3)
mu_est = (2*(r3-r2)*(r1-r3)*(r2-r1))/((r1*r2 - 2*r1*r3 + r2*r3)*(r1-2*r2+r3))
v_est = (2*r1*r3 - r1*r2 - r2*r3)/(r1 - 2*r2 + r3)
if export_moments:
return np.array([lambda_est, mu_est, v_est, r1, r2, r3])
return np.array([lambda_est, mu_est, v_est])
parser = argparse.ArgumentParser(description='Maximum likelihood inference of bursting kinetics from scRNA-seq data')
parser.add_argument('file', metavar='file', type=str, nargs=1,help='.csv file with allelic-resolution transcript counts' )
parser.add_argument('--njobs', default=[50], nargs=1, type=int, help='Number of jobs for the parallelization, default 50')
args = parser.parse_args()
filename = args.file[0]
njobs = args.njobs[0]
print('Reading file ' + filename)
rpkm = pd.read_csv(filename, index_col=0)
print('Inferring kinetics:')
params = Parallel(n_jobs=njobs, verbose = 3)(delayed(MaximumLikelihood)(np.around(rpkm[pd.notnull(rpkm)])) for i,rpkm in rpkm.iterrows())
keep = whichKeep(params)
print('Inferred kinetics of {} genes out of {} total'.format(np.sum(keep), len(keep)))
base = os.path.splitext(os.path.basename(filename))[0]
base = base + '_ML.pkl'
print('Saving result to ' + base)
pd.to_pickle(pd.DataFrame([ params, list(keep)], columns=rpkm.index).T, base)
| 35.582609 | 137 | 0.631232 |
import argparse
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
from joblib import delayed,Parallel
import os
def whichKeep(est_params):
kon = np.array(est_params)[:,0]
koff = np.array(est_params)[:,1]
ksyn = np.array(est_params)[:,2]
which_kon = ~(kon < 2*1e-3)*~(kon > 1e3 - 1)
which_koff = ~(koff < 2*1e-3)*~(koff > 1e3 - 1)
which_burst = ksyn/koff > 1
which_ksyn = ksyn > 1
which = which_burst*which_koff*which_kon*which_ksyn
return which
def MaximumLikelihood(vals, export_asymp_ci = False, fix = 0, metod = 'L-BFGS-B'):
from scipy.interpolate import interp1d
from scipy.optimize import minimize
from scipy import special
from scipy.stats import poisson,norm
from scipy.special import j_roots
from scipy.special import beta as beta_fun
import numpy as np
if len(vals) == 0:
return np.array([np.nan, np.nan, np.nan])
def dBP(at, alpha, bet, lam):
at.shape = (len(at), 1)
np.repeat(at, 50, axis = 1)
def fun(at, m):
if(max(m) < 1e6):
return(poisson.pmf(at,m))
else:
return(norm.pdf(at,loc=m,scale=sqrt(m)))
x,w = j_roots(50,alpha = bet - 1, beta = alpha - 1)
gs = np.sum(w*fun(at, m = lam*(1+x)/2), axis=1)
prob = 1/beta_fun(alpha, bet)*2**(-alpha-bet+1)*gs
return(prob)
def LogLikelihood(x, vals):
kon = x[0]
koff = x[1]
ksyn = x[2]
return(-np.sum(np.log( dBP(vals,kon,koff,ksyn) + 1e-10) ) )
x0 = MomentInference(vals)
if np.isnan(x0).any() or any(x0 < 0):
x0 = np.array([10,10,10])
bnds = ((1e-3,1e3),(1e-3,1e3), (1, 1e4))
vals_ = np.copy(vals)
try:
ll = minimize(LogLikelihood, x0, args = (vals_), method=metod, bounds=bnds)
except:
return np.array([np.nan,np.nan,np.nan])
estim = ll.x
return estim
def MomentInference(vals, export_moments=False):
from scipy import stats # needs imports inside function when run in ipyparallel
import numpy as np
m1 = float(np.mean(vals))
m2 = float(sum(vals*(vals - 1))/len(vals))
m3 = float(sum(vals*(vals - 1)*(vals - 2))/len(vals))
# sanity check on input (e.g. need at least on expression level)
if sum(vals) == 0: return np.nan
if m1 == 0: return np.nan
if m2 == 0: return np.nan
r1=m1
r2=m2/m1
r3=m3/m2
if (r1*r2-2*r1*r3 + r2*r3) == 0: return np.nan
if ((r1*r2 - 2*r1*r3 + r2*r3)*(r1-2*r2+r3)) == 0: return np.nan
if (r1 - 2*r2 + r3) == 0: return np.nan
lambda_est = (2*r1*(r3-r2))/(r1*r2-2*r1*r3 + r2*r3)
mu_est = (2*(r3-r2)*(r1-r3)*(r2-r1))/((r1*r2 - 2*r1*r3 + r2*r3)*(r1-2*r2+r3))
v_est = (2*r1*r3 - r1*r2 - r2*r3)/(r1 - 2*r2 + r3)
if export_moments:
return np.array([lambda_est, mu_est, v_est, r1, r2, r3])
return np.array([lambda_est, mu_est, v_est])
parser = argparse.ArgumentParser(description='Maximum likelihood inference of bursting kinetics from scRNA-seq data')
parser.add_argument('file', metavar='file', type=str, nargs=1,help='.csv file with allelic-resolution transcript counts' )
parser.add_argument('--njobs', default=[50], nargs=1, type=int, help='Number of jobs for the parallelization, default 50')
args = parser.parse_args()
filename = args.file[0]
njobs = args.njobs[0]
print('Reading file ' + filename)
rpkm = pd.read_csv(filename, index_col=0)
print('Inferring kinetics:')
params = Parallel(n_jobs=njobs, verbose = 3)(delayed(MaximumLikelihood)(np.around(rpkm[pd.notnull(rpkm)])) for i,rpkm in rpkm.iterrows())
keep = whichKeep(params)
print('Inferred kinetics of {} genes out of {} total'.format(np.sum(keep), len(keep)))
base = os.path.splitext(os.path.basename(filename))[0]
base = base + '_ML.pkl'
print('Saving result to ' + base)
pd.to_pickle(pd.DataFrame([ params, list(keep)], columns=rpkm.index).T, base)
| true | true |
f72cffe0eeccd3aa5694823b8d218f07ea6e87a0 | 1,413 | py | Python | AI hack/Codes and Samples/image_process.py | AdarshSrivatsa98/AIhackathon | 147f6f2ada2ebf1ba6e87df3c3d1d6ee964ac7ee | [
"BSD-3-Clause"
] | 1 | 2021-03-29T04:27:27.000Z | 2021-03-29T04:27:27.000Z | codes and samples/image_process.py | SrivatsaAdarsh/Obstacle-Detection-using-CNN | 008940faffb8a9977b8dcc7a21dda4f328f0a81f | [
"MIT"
] | null | null | null | codes and samples/image_process.py | SrivatsaAdarsh/Obstacle-Detection-using-CNN | 008940faffb8a9977b8dcc7a21dda4f328f0a81f | [
"MIT"
] | null | null | null | import time
import cv2
import sys
import os,os.path
path = sys.argv[1]
data_path=sys.argv[2]
fpsLimit = 0.8
index = 0
currentFrame=0
intframe =0
startTime = time.time()
# Playing video from file:
cap = cv2.VideoCapture(str(path))
try:
if not os.path.exists('data_path'):
os.makedirs('data_path')
except OSError:
print ('Error: Creating directory of data')
while(True):
ret = cap.set(1,index)
ret1,frame = cap.read()
if ret == False or ret1 == False:
break
nowTime = time.time()
if (int(nowTime - startTime)) > fpsLimit:
temp = cv2.resize(frame,(400,400))
for intframe in range(4):
if intframe == 0:
t = temp[0:200,0:200]
if intframe == 1:
t = temp[200:400,0:200]
if intframe ==2:
t = temp[0:200,200:400]
if intframe == 3:
t = temp[200:400,200:400]
# Saves image of the current frame in jpg file
cv2.waitKey(2)
name = str(data_path) + str(currentFrame) + '.jpg'
print ('Creating...image' + str(currentFrame) )
cv2.imwrite(name, t)
currentFrame += 1
intframe=0
index+=100
startTime = time.time() # reset time
cap.release()
cv2.destroyAllWindows()
| 25.690909 | 64 | 0.521585 | import time
import cv2
import sys
import os,os.path
path = sys.argv[1]
data_path=sys.argv[2]
fpsLimit = 0.8
index = 0
currentFrame=0
intframe =0
startTime = time.time()
cap = cv2.VideoCapture(str(path))
try:
if not os.path.exists('data_path'):
os.makedirs('data_path')
except OSError:
print ('Error: Creating directory of data')
while(True):
ret = cap.set(1,index)
ret1,frame = cap.read()
if ret == False or ret1 == False:
break
nowTime = time.time()
if (int(nowTime - startTime)) > fpsLimit:
temp = cv2.resize(frame,(400,400))
for intframe in range(4):
if intframe == 0:
t = temp[0:200,0:200]
if intframe == 1:
t = temp[200:400,0:200]
if intframe ==2:
t = temp[0:200,200:400]
if intframe == 3:
t = temp[200:400,200:400]
cv2.waitKey(2)
name = str(data_path) + str(currentFrame) + '.jpg'
print ('Creating...image' + str(currentFrame) )
cv2.imwrite(name, t)
currentFrame += 1
intframe=0
index+=100
startTime = time.time()
cap.release()
cv2.destroyAllWindows()
| true | true |
f72cffe6fb7e02f2604f6918e08414a09ad9a4c2 | 2,529 | py | Python | recommendations_system/ffm/ffm.py | mmikolajczak/recommendation_system_hetrec2011_movielens | 3ae13e62605ffbf5517bc2079e086a400de48748 | [
"MIT"
] | 4 | 2019-12-04T08:42:21.000Z | 2020-06-07T07:22:08.000Z | recommendations_system/ffm/ffm.py | mmikolajczak/recommendation_system_hetrec2011_movielens | 3ae13e62605ffbf5517bc2079e086a400de48748 | [
"MIT"
] | null | null | null | recommendations_system/ffm/ffm.py | mmikolajczak/recommendation_system_hetrec2011_movielens | 3ae13e62605ffbf5517bc2079e086a400de48748 | [
"MIT"
] | null | null | null | import subprocess
import warnings
import os.path as osp
import os
import numpy as np
# Note: libffm doesn't handle relative paths very well, hence abspath used.
class FFM:
def __init__(self, train_binary_path, predict_binary_path, model_path=None):
self.train_binary_path = osp.abspath(train_binary_path)
self.predict_binary_path = osp.abspath(predict_binary_path)
self.model_path = osp.abspath(model_path) if model_path is not None else None
def fit(self, X, model_path='model', l=0.00002, k=4, t=15, r=0.2, s=1):
"""
-l <lambda>: regularization parameter (default 0.00002)
-k <factor>: number of latent factors (default 4)
-t <iteration>: number of iterations (default 15)
-r <eta>: learning rate (default 0.2)
-s <nr_threads>: number of threads (default 1)
"""
# validation support?
warnings.warn('Please note that unix newline format (LF) is required for libffm binaries to work correctly.' +
' Windows (CR LF) will cause the issues.')
if type(X) != str:
raise ValueError(f'Improper input type {type(X)}.X must be a path to ffm file.')
self.model_path = osp.abspath(model_path)
train_data_abspath = osp.abspath(X)
cmd = f'{self.train_binary_path} -l {l} -k {k} -t {t} -r {r} -s {s} {train_data_abspath} {self.model_path}'
proc = subprocess.Popen(cmd)
proc.wait()
os.remove(f'{train_data_abspath}.bin')
def predict(self, X, output_file):
warnings.warn('Please note that unix newline format (LF) is required for libffm binaries to work correctly.' +
' Windows (CR LF) will cause the issues.')
if self.model_path is None:
raise RuntimeError('Model must be fitted first!')
if type(X) != str:
raise ValueError(f'Improper input type {type(X)}.X must be a path to ffm file.')
predicted_data_abspath = osp.abspath(X)
output_file_abspath = osp.abspath(output_file)
cmd = f'{self.predict_binary_path} {predicted_data_abspath} {self.model_path} {output_file_abspath}'
proc = subprocess.Popen(cmd)
proc.wait()
@classmethod
def pred_file_to_numpy(cls, preds_file):
return np.loadtxt(preds_file)
@classmethod
def ground_truth_from_ffm_file(cls, ffm_file):
with open(ffm_file, 'r') as f:
labels = [line.split(' ')[0] for line in f]
return np.array(labels).astype(float)
| 41.459016 | 118 | 0.644128 | import subprocess
import warnings
import os.path as osp
import os
import numpy as np
class FFM:
def __init__(self, train_binary_path, predict_binary_path, model_path=None):
self.train_binary_path = osp.abspath(train_binary_path)
self.predict_binary_path = osp.abspath(predict_binary_path)
self.model_path = osp.abspath(model_path) if model_path is not None else None
def fit(self, X, model_path='model', l=0.00002, k=4, t=15, r=0.2, s=1):
# validation support?
warnings.warn('Please note that unix newline format (LF) is required for libffm binaries to work correctly.' +
' Windows (CR LF) will cause the issues.')
if type(X) != str:
raise ValueError(f'Improper input type {type(X)}.X must be a path to ffm file.')
self.model_path = osp.abspath(model_path)
train_data_abspath = osp.abspath(X)
cmd = f'{self.train_binary_path} -l {l} -k {k} -t {t} -r {r} -s {s} {train_data_abspath} {self.model_path}'
proc = subprocess.Popen(cmd)
proc.wait()
os.remove(f'{train_data_abspath}.bin')
def predict(self, X, output_file):
warnings.warn('Please note that unix newline format (LF) is required for libffm binaries to work correctly.' +
' Windows (CR LF) will cause the issues.')
if self.model_path is None:
raise RuntimeError('Model must be fitted first!')
if type(X) != str:
raise ValueError(f'Improper input type {type(X)}.X must be a path to ffm file.')
predicted_data_abspath = osp.abspath(X)
output_file_abspath = osp.abspath(output_file)
cmd = f'{self.predict_binary_path} {predicted_data_abspath} {self.model_path} {output_file_abspath}'
proc = subprocess.Popen(cmd)
proc.wait()
@classmethod
def pred_file_to_numpy(cls, preds_file):
return np.loadtxt(preds_file)
@classmethod
def ground_truth_from_ffm_file(cls, ffm_file):
with open(ffm_file, 'r') as f:
labels = [line.split(' ')[0] for line in f]
return np.array(labels).astype(float)
| true | true |
f72d0142e44b378e8893afaa8b5bbafa3e81c8da | 2,259 | py | Python | src/EmailAlert/fancymail.py | JyotiSunkara/Energy-Monitoring-And-Control | efba4ac611e7054b78492ccf5e758a81621c8d6d | [
"MIT"
] | 1 | 2020-06-27T03:25:11.000Z | 2020-06-27T03:25:11.000Z | src/EmailAlert/fancymail.py | JyotiSunkara/Energy-Monitoring-And-Control | efba4ac611e7054b78492ccf5e758a81621c8d6d | [
"MIT"
] | null | null | null | src/EmailAlert/fancymail.py | JyotiSunkara/Energy-Monitoring-And-Control | efba4ac611e7054b78492ccf5e758a81621c8d6d | [
"MIT"
] | null | null | null | from smtplib import SMTP
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from jinja2 import Environment, FileSystemLoader
import os
from_email = 'krbdashboard@outlook.com'
password = 'ArushiSinghal'
env = Environment(
loader=FileSystemLoader('./templates/'))
def get_contacts(filename):
"""
Return two lists names, emails containing names and email addresses
read from a file specified by filename.
"""
names = []
emails = []
with open(filename, mode='r', encoding='utf-8') as contacts_file:
for a_contact in contacts_file:
names.append(a_contact.split()[0])
emails.append(a_contact.split()[1])
return names, emails
def get_data():
data = []
data.append(
{
"movies": [
{
"title": 'Gone Girl',
"description": 'This is a fancy email'
},
{
"title": 'Delhi 6',
"description": 'Good movie'
},
{
"title": 'The Lion King',
"description": 'Roar'
},
{
"title": 'The Great Gatsby',
"description": ':o'
}
]
})
return data
def send_mail(bodyContent):
names, emails = get_contacts('mycontacts.txt') # Read contacts
subject = 'Testing CSS/HTML again!'
server = SMTP('smtp-mail.outlook.com', 587)
server.starttls()
server.login(from_email, password)
for name, email in zip(names, emails):
message = MIMEMultipart()
message['Subject'] = subject
message['From'] = from_email
message['To'] = email
message.attach(MIMEText(bodyContent, "html"))
msgBody = message.as_string()
server.sendmail(from_email, email, msgBody)
del message
server.quit()
def send_movie_list():
json_data = get_data()
template = env.get_template('child.html')
output = template.render(data=json_data[0])
send_mail(output)
return "Mail sent successfully."
if __name__ == '__main__':
print(send_movie_list())
| 25.670455 | 71 | 0.555556 | from smtplib import SMTP
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from jinja2 import Environment, FileSystemLoader
import os
from_email = 'krbdashboard@outlook.com'
password = 'ArushiSinghal'
env = Environment(
loader=FileSystemLoader('./templates/'))
def get_contacts(filename):
names = []
emails = []
with open(filename, mode='r', encoding='utf-8') as contacts_file:
for a_contact in contacts_file:
names.append(a_contact.split()[0])
emails.append(a_contact.split()[1])
return names, emails
def get_data():
data = []
data.append(
{
"movies": [
{
"title": 'Gone Girl',
"description": 'This is a fancy email'
},
{
"title": 'Delhi 6',
"description": 'Good movie'
},
{
"title": 'The Lion King',
"description": 'Roar'
},
{
"title": 'The Great Gatsby',
"description": ':o'
}
]
})
return data
def send_mail(bodyContent):
names, emails = get_contacts('mycontacts.txt')
subject = 'Testing CSS/HTML again!'
server = SMTP('smtp-mail.outlook.com', 587)
server.starttls()
server.login(from_email, password)
for name, email in zip(names, emails):
message = MIMEMultipart()
message['Subject'] = subject
message['From'] = from_email
message['To'] = email
message.attach(MIMEText(bodyContent, "html"))
msgBody = message.as_string()
server.sendmail(from_email, email, msgBody)
del message
server.quit()
def send_movie_list():
json_data = get_data()
template = env.get_template('child.html')
output = template.render(data=json_data[0])
send_mail(output)
return "Mail sent successfully."
if __name__ == '__main__':
print(send_movie_list())
| true | true |
f72d017e70b5e4176196a0457bdf724775bda1b5 | 5,587 | py | Python | sdk/python/pulumi_azure_nextgen/apimanagement/latest/get_api_release.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/apimanagement/latest/get_api_release.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/apimanagement/latest/get_api_release.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetApiReleaseResult',
'AwaitableGetApiReleaseResult',
'get_api_release',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:apimanagement:getApiRelease'.""", DeprecationWarning)
@pulumi.output_type
class GetApiReleaseResult:
"""
ApiRelease details.
"""
def __init__(__self__, api_id=None, created_date_time=None, id=None, name=None, notes=None, type=None, updated_date_time=None):
if api_id and not isinstance(api_id, str):
raise TypeError("Expected argument 'api_id' to be a str")
pulumi.set(__self__, "api_id", api_id)
if created_date_time and not isinstance(created_date_time, str):
raise TypeError("Expected argument 'created_date_time' to be a str")
pulumi.set(__self__, "created_date_time", created_date_time)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if notes and not isinstance(notes, str):
raise TypeError("Expected argument 'notes' to be a str")
pulumi.set(__self__, "notes", notes)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if updated_date_time and not isinstance(updated_date_time, str):
raise TypeError("Expected argument 'updated_date_time' to be a str")
pulumi.set(__self__, "updated_date_time", updated_date_time)
@property
@pulumi.getter(name="apiId")
def api_id(self) -> Optional[str]:
"""
Identifier of the API the release belongs to.
"""
return pulumi.get(self, "api_id")
@property
@pulumi.getter(name="createdDateTime")
def created_date_time(self) -> str:
"""
The time the API was released. The date conforms to the following format: yyyy-MM-ddTHH:mm:ssZ as specified by the ISO 8601 standard.
"""
return pulumi.get(self, "created_date_time")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def notes(self) -> Optional[str]:
"""
Release Notes
"""
return pulumi.get(self, "notes")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updatedDateTime")
def updated_date_time(self) -> str:
"""
The time the API release was updated.
"""
return pulumi.get(self, "updated_date_time")
class AwaitableGetApiReleaseResult(GetApiReleaseResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApiReleaseResult(
api_id=self.api_id,
created_date_time=self.created_date_time,
id=self.id,
name=self.name,
notes=self.notes,
type=self.type,
updated_date_time=self.updated_date_time)
def get_api_release(api_id: Optional[str] = None,
release_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApiReleaseResult:
"""
ApiRelease details.
Latest API Version: 2019-12-01.
:param str api_id: API identifier. Must be unique in the current API Management service instance.
:param str release_id: Release identifier within an API. Must be unique in the current API Management service instance.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
pulumi.log.warn("get_api_release is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:apimanagement:getApiRelease'.")
__args__ = dict()
__args__['apiId'] = api_id
__args__['releaseId'] = release_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:apimanagement/latest:getApiRelease', __args__, opts=opts, typ=GetApiReleaseResult).value
return AwaitableGetApiReleaseResult(
api_id=__ret__.api_id,
created_date_time=__ret__.created_date_time,
id=__ret__.id,
name=__ret__.name,
notes=__ret__.notes,
type=__ret__.type,
updated_date_time=__ret__.updated_date_time)
| 36.279221 | 190 | 0.65026 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetApiReleaseResult',
'AwaitableGetApiReleaseResult',
'get_api_release',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:apimanagement:getApiRelease'.""", DeprecationWarning)
@pulumi.output_type
class GetApiReleaseResult:
def __init__(__self__, api_id=None, created_date_time=None, id=None, name=None, notes=None, type=None, updated_date_time=None):
if api_id and not isinstance(api_id, str):
raise TypeError("Expected argument 'api_id' to be a str")
pulumi.set(__self__, "api_id", api_id)
if created_date_time and not isinstance(created_date_time, str):
raise TypeError("Expected argument 'created_date_time' to be a str")
pulumi.set(__self__, "created_date_time", created_date_time)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if notes and not isinstance(notes, str):
raise TypeError("Expected argument 'notes' to be a str")
pulumi.set(__self__, "notes", notes)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if updated_date_time and not isinstance(updated_date_time, str):
raise TypeError("Expected argument 'updated_date_time' to be a str")
pulumi.set(__self__, "updated_date_time", updated_date_time)
@property
@pulumi.getter(name="apiId")
def api_id(self) -> Optional[str]:
return pulumi.get(self, "api_id")
@property
@pulumi.getter(name="createdDateTime")
def created_date_time(self) -> str:
return pulumi.get(self, "created_date_time")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def notes(self) -> Optional[str]:
return pulumi.get(self, "notes")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updatedDateTime")
def updated_date_time(self) -> str:
return pulumi.get(self, "updated_date_time")
class AwaitableGetApiReleaseResult(GetApiReleaseResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApiReleaseResult(
api_id=self.api_id,
created_date_time=self.created_date_time,
id=self.id,
name=self.name,
notes=self.notes,
type=self.type,
updated_date_time=self.updated_date_time)
def get_api_release(api_id: Optional[str] = None,
release_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApiReleaseResult:
pulumi.log.warn("get_api_release is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:apimanagement:getApiRelease'.")
__args__ = dict()
__args__['apiId'] = api_id
__args__['releaseId'] = release_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:apimanagement/latest:getApiRelease', __args__, opts=opts, typ=GetApiReleaseResult).value
return AwaitableGetApiReleaseResult(
api_id=__ret__.api_id,
created_date_time=__ret__.created_date_time,
id=__ret__.id,
name=__ret__.name,
notes=__ret__.notes,
type=__ret__.type,
updated_date_time=__ret__.updated_date_time)
| true | true |
f72d018e06b47ce1c3e6edeff90355eca52ef202 | 15,137 | py | Python | apps/resume/views.py | ozet-team/ozet-server | 4772d37339634adee6ace65a5e2380df4bd22bbb | [
"MIT"
] | null | null | null | apps/resume/views.py | ozet-team/ozet-server | 4772d37339634adee6ace65a5e2380df4bd22bbb | [
"MIT"
] | 4 | 2021-11-27T14:15:55.000Z | 2021-12-10T12:59:44.000Z | apps/resume/views.py | ozet-team/ozet-server | 4772d37339634adee6ace65a5e2380df4bd22bbb | [
"MIT"
] | null | null | null | from django.utils.functional import cached_property
from drf_spectacular.types import OpenApiTypes
from drf_spectacular.utils import extend_schema, OpenApiExample, OpenApiParameter
from rest_framework.exceptions import NotFound
from rest_framework.generics import (
ListCreateAPIView,
RetrieveUpdateAPIView,
RetrieveUpdateDestroyAPIView,
RetrieveAPIView,
)
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly, AllowAny
from apps.member.models import User
from apps.resume import models
from apps.resume import serializers
from apps.resume.models import Career, Certificate, AcademicBackground, MilitaryService, Resume
from utils.django.rest_framework.mixins import UserContextMixin, QuerySerializerMixin
from commons.contrib.drf_spectacular import tags as api_tags
class ResumeDetailView(UserContextMixin, RetrieveAPIView):
permission_classes = (AllowAny, )
serializer_class = serializers.ResumeSerializer
queryset = Resume.objects
lookup_field = 'id'
lookup_url_kwarg = 'resume_id'
@extend_schema(
tags=[api_tags.RESUME],
summary="회원 이력서 가져오기 API @AllowAny",
description="회원 이력서 가져오기 API 입니다.",
responses=serializers.ResumeSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeDetailView, self).get(request, *args, **kwargs)
class UserResumeDetailView(UserContextMixin, RetrieveAPIView):
permission_classes = (AllowAny, )
serializer_class = serializers.UserResumeSerializer
def get_object(self):
user_id = self.kwargs.get('user_id', None)
if not user_id:
raise NotFound()
return get_object_or_404(Resume, user_id=user_id)
@extend_schema(
tags=[api_tags.RESUME],
summary="회원 이력서 가져오기 API @AllowAny",
description="회원 이력서 가져오기 API 입니다.",
responses=serializers.UserResumeSerializer,
)
def get(self, request, *args, **kwargs):
return super(UserResumeDetailView, self).get(request, *args, **kwargs)
class UserMeResumeDetailView(UserContextMixin, RetrieveAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.ResumeSerializer
def get_object(self):
resume, is_created = Resume.objects.get_or_create(user_id=self.user.id)
return resume
@extend_schema(
tags=[api_tags.USER_ME],
summary="회원 이력서 가져오기 API @IsAuthenticated",
description="회원 이력서 가져오기 API 입니다.",
responses=serializers.ResumeSerializer,
)
def get(self, request, *args, **kwargs):
return super(UserMeResumeDetailView, self).get(request, *args, **kwargs)
class UserMeResumeDetailPDFView(UserContextMixin, RetrieveUpdateAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.ResumePDFSerializer
def get_object(self):
resume, is_created = Resume.objects.get_or_create(user_id=self.user.id)
return resume
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put" and method != "get"]
super(UserMeResumeDetailPDFView, self).__init__(*args, **kwargs)
@extend_schema(
tags=[api_tags.USER_ME],
summary="회원 이력서 PDF 업데이트 API @IsAuthenticated",
description="회원 이력서 PDF 업데이트 API 입니다.",
responses=serializers.ResumePDFSerializer,
)
def patch(self, request, *args, **kwargs):
return super(UserMeResumeDetailPDFView, self).patch(request, *args, **kwargs)
class ResumeCareerDetailView(UserContextMixin, RetrieveUpdateDestroyAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.CareerSerializer
lookup_field = 'id'
lookup_url_kwarg = 'id'
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeCareerDetailView, self).__init__(*args, **kwargs)
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return Career.objects.none()
return Career.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-join_at') \
.all()
@extend_schema(
tags=[api_tags.RESUME_CAREER],
summary="회원 커리어 가져오기 API @IsAuthenticated",
description="회원 커리어 가져오기 API 입니다.",
responses=serializers.CareerSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeCareerDetailView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CAREER],
summary="회원 커리어 업데이트 API @IsAuthenticated",
description="회원 커리어 업데이트 API 입니다.",
responses=serializers.CareerSerializer,
)
def patch(self, request, *args, **kwargs):
return super(ResumeCareerDetailView, self).patch(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CAREER],
summary="회원 커리어 삭제 API @IsAuthenticated",
description="회원 커리어 삭제 API 입니다.",
)
def delete(self, request, *args, **kwargs):
return super(ResumeCareerDetailView, self).delete(request, *args, **kwargs)
class ResumeCareerListView(UserContextMixin, ListCreateAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.CareerSerializer
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeCareerListView, self).__init__(*args, **kwargs)
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return Career.objects.none()
return Career.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-join_at') \
.all()
@extend_schema(
tags=[api_tags.RESUME_CAREER],
summary="회원 커리어 가져오기 API @IsAuthenticated",
description="회원 커리어 가져오기 API 입니다.\n"
"* **Position**\n"
" * **STAFF** - 스탭(인턴)\n"
" * **MANAGER** - 매니저\n"
" * **DESIGNER** - 디자이너\n"
" * **DIRECTOR** - 원장",
responses=serializers.CareerSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeCareerListView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CAREER],
summary="회원 커리어 추가 API @IsAuthenticated",
description="회원 커리어 추가 API 입니다.\n"
"* **Position**\n"
" * **STAFF** - 스탭(인턴)\n"
" * **MANAGER** - 매니저\n"
" * **DESIGNER** - 디자이너\n"
" * **DIRECTOR** - 원장",
responses=serializers.CareerSerializer,
)
def post(self, request, *args, **kwargs):
return super(ResumeCareerListView, self).post(request, *args, **kwargs)
class ResumeCertificateDetailView(UserContextMixin, RetrieveUpdateDestroyAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.CertificateSerializer
lookup_field = 'id'
lookup_url_kwarg = 'id'
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeCertificateDetailView, self).__init__(*args, **kwargs)
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return Certificate.objects.none()
return Certificate.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-certificate_at') \
.all()
@extend_schema(
tags=[api_tags.RESUME_CERTIFICATE],
summary="회원 자격증 가져오기 API @IsAuthenticated",
description="회원 자격증 가져오기 API 입니다.",
responses=serializers.CertificateSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeCertificateDetailView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CERTIFICATE],
summary="회원 자격증 업데이트 API @IsAuthenticated",
description="회원 자격증 업데이트 API 입니다.",
responses=serializers.CertificateSerializer,
)
def patch(self, request, *args, **kwargs):
return super(ResumeCertificateDetailView, self).patch(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CERTIFICATE],
summary="회원 자격증 삭제 API @IsAuthenticated",
description="회원 자격증 삭제 API 입니다.",
)
def delete(self, request, *args, **kwargs):
return super(ResumeCertificateDetailView, self).delete(request, *args, **kwargs)
class ResumeCertificateListView(UserContextMixin, ListCreateAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.CertificateSerializer
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeCertificateListView, self).__init__(*args, **kwargs)
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return Certificate.objects.none()
return Certificate.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-certificate_at') \
.all()
@extend_schema(
tags=[api_tags.RESUME_CERTIFICATE],
summary="회원 자격증 목록 가져오기 API @IsAuthenticated",
description="회원 자격증 목록 가져오기 API 입니다.",
responses=serializers.CertificateSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeCertificateListView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CERTIFICATE],
summary="회원 자격증 추가 API @IsAuthenticated",
description="회원 자격증 추가 API 입니다.",
responses=serializers.CertificateSerializer,
)
def post(self, request, *args, **kwargs):
return super(ResumeCertificateListView, self).post(request, *args, **kwargs)
class ResumeAcademicBackgroundDetailView(UserContextMixin, RetrieveUpdateDestroyAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.AcademicBackgroundSerializer
lookup_field = 'id'
lookup_url_kwarg = 'id'
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeAcademicBackgroundDetailView, self).__init__(*args, **kwargs)
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return AcademicBackground.objects.none()
return AcademicBackground.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-join_at') \
.all()
@extend_schema(
tags=[api_tags.RESUME_ACADEMIC],
summary="회원 학력 가져오기 API @IsAuthenticated",
description="회원 학력 가져오기 API 입니다.",
responses=serializers.AcademicBackgroundSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeAcademicBackgroundDetailView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_ACADEMIC],
summary="회원 학력 업데이트 API @IsAuthenticated",
description="회원 학력 업데이트 API 입니다.",
responses=serializers.AcademicBackgroundSerializer,
)
def patch(self, request, *args, **kwargs):
return super(ResumeAcademicBackgroundDetailView, self).patch(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_ACADEMIC],
summary="회원 학력 삭제 API @IsAuthenticated",
description="회원 학력 삭제 API 입니다.",
)
def delete(self, request, *args, **kwargs):
return super(ResumeAcademicBackgroundDetailView, self).delete(request, *args, **kwargs)
class ResumeAcademicBackgroundListView(UserContextMixin, ListCreateAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.AcademicBackgroundSerializer
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return AcademicBackground.objects.none()
return AcademicBackground.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-join_at') \
.all()
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeAcademicBackgroundListView, self).__init__(*args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_ACADEMIC],
summary="회원 학력 목록 가져오기 API @IsAuthenticated",
description="회원 학력 목록 가져오기 API 입니다.",
responses=serializers.AcademicBackgroundSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeAcademicBackgroundListView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_ACADEMIC],
summary="회원 학력 추가 API @IsAuthenticated",
description="회원 학력 추가 API 입니다.",
responses=serializers.AcademicBackgroundSerializer,
)
def post(self, request, *args, **kwargs):
return super(ResumeAcademicBackgroundListView, self).post(request, *args, **kwargs)
class ResumeMilitaryServiceView(UserContextMixin, RetrieveUpdateAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.MilitaryServiceSerializer
def get_object(self):
military, is_created = MilitaryService.objects.get_or_create(resume_id=self.user.resume.id)
return military
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeMilitaryServiceView, self).__init__(*args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_MILITARY],
summary="회원 병역 가져오기 API @IsAuthenticated",
description="회원 병역 가져오기 API 입니다.\n"
"* **Service Status**\n"
" * **NA** - 해당없음\n"
" * **EXEMPTION** - 면제\n"
" * **UNFINISHED** - 미필\n"
" * **FINISHED** - 군필",
responses=serializers.MilitaryServiceSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeMilitaryServiceView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_MILITARY],
summary="회원 병역 업데이트 API @IsAuthenticated",
description="회원 병역 업데이트 API 입니다.\n"
"* **Service Status**\n"
" * **NA** - 해당없음\n"
" * **EXEMPTION** - 면제\n"
" * **UNFINISHED** - 미필\n"
" * **FINISHED** - 군필",
responses=serializers.MilitaryServiceSerializer,
)
def patch(self, request, *args, **kwargs):
return super(ResumeMilitaryServiceView, self).patch(request, *args, **kwargs)
| 37.375309 | 117 | 0.658321 | from django.utils.functional import cached_property
from drf_spectacular.types import OpenApiTypes
from drf_spectacular.utils import extend_schema, OpenApiExample, OpenApiParameter
from rest_framework.exceptions import NotFound
from rest_framework.generics import (
ListCreateAPIView,
RetrieveUpdateAPIView,
RetrieveUpdateDestroyAPIView,
RetrieveAPIView,
)
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly, AllowAny
from apps.member.models import User
from apps.resume import models
from apps.resume import serializers
from apps.resume.models import Career, Certificate, AcademicBackground, MilitaryService, Resume
from utils.django.rest_framework.mixins import UserContextMixin, QuerySerializerMixin
from commons.contrib.drf_spectacular import tags as api_tags
class ResumeDetailView(UserContextMixin, RetrieveAPIView):
permission_classes = (AllowAny, )
serializer_class = serializers.ResumeSerializer
queryset = Resume.objects
lookup_field = 'id'
lookup_url_kwarg = 'resume_id'
@extend_schema(
tags=[api_tags.RESUME],
summary="회원 이력서 가져오기 API @AllowAny",
description="회원 이력서 가져오기 API 입니다.",
responses=serializers.ResumeSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeDetailView, self).get(request, *args, **kwargs)
class UserResumeDetailView(UserContextMixin, RetrieveAPIView):
permission_classes = (AllowAny, )
serializer_class = serializers.UserResumeSerializer
def get_object(self):
user_id = self.kwargs.get('user_id', None)
if not user_id:
raise NotFound()
return get_object_or_404(Resume, user_id=user_id)
@extend_schema(
tags=[api_tags.RESUME],
summary="회원 이력서 가져오기 API @AllowAny",
description="회원 이력서 가져오기 API 입니다.",
responses=serializers.UserResumeSerializer,
)
def get(self, request, *args, **kwargs):
return super(UserResumeDetailView, self).get(request, *args, **kwargs)
class UserMeResumeDetailView(UserContextMixin, RetrieveAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.ResumeSerializer
def get_object(self):
resume, is_created = Resume.objects.get_or_create(user_id=self.user.id)
return resume
@extend_schema(
tags=[api_tags.USER_ME],
summary="회원 이력서 가져오기 API @IsAuthenticated",
description="회원 이력서 가져오기 API 입니다.",
responses=serializers.ResumeSerializer,
)
def get(self, request, *args, **kwargs):
return super(UserMeResumeDetailView, self).get(request, *args, **kwargs)
class UserMeResumeDetailPDFView(UserContextMixin, RetrieveUpdateAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.ResumePDFSerializer
def get_object(self):
resume, is_created = Resume.objects.get_or_create(user_id=self.user.id)
return resume
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put" and method != "get"]
super(UserMeResumeDetailPDFView, self).__init__(*args, **kwargs)
@extend_schema(
tags=[api_tags.USER_ME],
summary="회원 이력서 PDF 업데이트 API @IsAuthenticated",
description="회원 이력서 PDF 업데이트 API 입니다.",
responses=serializers.ResumePDFSerializer,
)
def patch(self, request, *args, **kwargs):
return super(UserMeResumeDetailPDFView, self).patch(request, *args, **kwargs)
class ResumeCareerDetailView(UserContextMixin, RetrieveUpdateDestroyAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.CareerSerializer
lookup_field = 'id'
lookup_url_kwarg = 'id'
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeCareerDetailView, self).__init__(*args, **kwargs)
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return Career.objects.none()
return Career.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-join_at') \
.all()
@extend_schema(
tags=[api_tags.RESUME_CAREER],
summary="회원 커리어 가져오기 API @IsAuthenticated",
description="회원 커리어 가져오기 API 입니다.",
responses=serializers.CareerSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeCareerDetailView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CAREER],
summary="회원 커리어 업데이트 API @IsAuthenticated",
description="회원 커리어 업데이트 API 입니다.",
responses=serializers.CareerSerializer,
)
def patch(self, request, *args, **kwargs):
return super(ResumeCareerDetailView, self).patch(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CAREER],
summary="회원 커리어 삭제 API @IsAuthenticated",
description="회원 커리어 삭제 API 입니다.",
)
def delete(self, request, *args, **kwargs):
return super(ResumeCareerDetailView, self).delete(request, *args, **kwargs)
class ResumeCareerListView(UserContextMixin, ListCreateAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.CareerSerializer
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeCareerListView, self).__init__(*args, **kwargs)
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return Career.objects.none()
return Career.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-join_at') \
.all()
@extend_schema(
tags=[api_tags.RESUME_CAREER],
summary="회원 커리어 가져오기 API @IsAuthenticated",
description="회원 커리어 가져오기 API 입니다.\n"
"* **Position**\n"
" * **STAFF** - 스탭(인턴)\n"
" * **MANAGER** - 매니저\n"
" * **DESIGNER** - 디자이너\n"
" * **DIRECTOR** - 원장",
responses=serializers.CareerSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeCareerListView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CAREER],
summary="회원 커리어 추가 API @IsAuthenticated",
description="회원 커리어 추가 API 입니다.\n"
"* **Position**\n"
" * **STAFF** - 스탭(인턴)\n"
" * **MANAGER** - 매니저\n"
" * **DESIGNER** - 디자이너\n"
" * **DIRECTOR** - 원장",
responses=serializers.CareerSerializer,
)
def post(self, request, *args, **kwargs):
return super(ResumeCareerListView, self).post(request, *args, **kwargs)
class ResumeCertificateDetailView(UserContextMixin, RetrieveUpdateDestroyAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.CertificateSerializer
lookup_field = 'id'
lookup_url_kwarg = 'id'
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeCertificateDetailView, self).__init__(*args, **kwargs)
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return Certificate.objects.none()
return Certificate.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-certificate_at') \
.all()
@extend_schema(
tags=[api_tags.RESUME_CERTIFICATE],
summary="회원 자격증 가져오기 API @IsAuthenticated",
description="회원 자격증 가져오기 API 입니다.",
responses=serializers.CertificateSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeCertificateDetailView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CERTIFICATE],
summary="회원 자격증 업데이트 API @IsAuthenticated",
description="회원 자격증 업데이트 API 입니다.",
responses=serializers.CertificateSerializer,
)
def patch(self, request, *args, **kwargs):
return super(ResumeCertificateDetailView, self).patch(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CERTIFICATE],
summary="회원 자격증 삭제 API @IsAuthenticated",
description="회원 자격증 삭제 API 입니다.",
)
def delete(self, request, *args, **kwargs):
return super(ResumeCertificateDetailView, self).delete(request, *args, **kwargs)
class ResumeCertificateListView(UserContextMixin, ListCreateAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.CertificateSerializer
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeCertificateListView, self).__init__(*args, **kwargs)
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return Certificate.objects.none()
return Certificate.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-certificate_at') \
.all()
@extend_schema(
tags=[api_tags.RESUME_CERTIFICATE],
summary="회원 자격증 목록 가져오기 API @IsAuthenticated",
description="회원 자격증 목록 가져오기 API 입니다.",
responses=serializers.CertificateSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeCertificateListView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CERTIFICATE],
summary="회원 자격증 추가 API @IsAuthenticated",
description="회원 자격증 추가 API 입니다.",
responses=serializers.CertificateSerializer,
)
def post(self, request, *args, **kwargs):
return super(ResumeCertificateListView, self).post(request, *args, **kwargs)
class ResumeAcademicBackgroundDetailView(UserContextMixin, RetrieveUpdateDestroyAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.AcademicBackgroundSerializer
lookup_field = 'id'
lookup_url_kwarg = 'id'
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeAcademicBackgroundDetailView, self).__init__(*args, **kwargs)
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return AcademicBackground.objects.none()
return AcademicBackground.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-join_at') \
.all()
@extend_schema(
tags=[api_tags.RESUME_ACADEMIC],
summary="회원 학력 가져오기 API @IsAuthenticated",
description="회원 학력 가져오기 API 입니다.",
responses=serializers.AcademicBackgroundSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeAcademicBackgroundDetailView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_ACADEMIC],
summary="회원 학력 업데이트 API @IsAuthenticated",
description="회원 학력 업데이트 API 입니다.",
responses=serializers.AcademicBackgroundSerializer,
)
def patch(self, request, *args, **kwargs):
return super(ResumeAcademicBackgroundDetailView, self).patch(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_ACADEMIC],
summary="회원 학력 삭제 API @IsAuthenticated",
description="회원 학력 삭제 API 입니다.",
)
def delete(self, request, *args, **kwargs):
return super(ResumeAcademicBackgroundDetailView, self).delete(request, *args, **kwargs)
class ResumeAcademicBackgroundListView(UserContextMixin, ListCreateAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.AcademicBackgroundSerializer
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return AcademicBackground.objects.none()
return AcademicBackground.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-join_at') \
.all()
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeAcademicBackgroundListView, self).__init__(*args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_ACADEMIC],
summary="회원 학력 목록 가져오기 API @IsAuthenticated",
description="회원 학력 목록 가져오기 API 입니다.",
responses=serializers.AcademicBackgroundSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeAcademicBackgroundListView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_ACADEMIC],
summary="회원 학력 추가 API @IsAuthenticated",
description="회원 학력 추가 API 입니다.",
responses=serializers.AcademicBackgroundSerializer,
)
def post(self, request, *args, **kwargs):
return super(ResumeAcademicBackgroundListView, self).post(request, *args, **kwargs)
class ResumeMilitaryServiceView(UserContextMixin, RetrieveUpdateAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.MilitaryServiceSerializer
def get_object(self):
military, is_created = MilitaryService.objects.get_or_create(resume_id=self.user.resume.id)
return military
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeMilitaryServiceView, self).__init__(*args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_MILITARY],
summary="회원 병역 가져오기 API @IsAuthenticated",
description="회원 병역 가져오기 API 입니다.\n"
"* **Service Status**\n"
" * **NA** - 해당없음\n"
" * **EXEMPTION** - 면제\n"
" * **UNFINISHED** - 미필\n"
" * **FINISHED** - 군필",
responses=serializers.MilitaryServiceSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeMilitaryServiceView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_MILITARY],
summary="회원 병역 업데이트 API @IsAuthenticated",
description="회원 병역 업데이트 API 입니다.\n"
"* **Service Status**\n"
" * **NA** - 해당없음\n"
" * **EXEMPTION** - 면제\n"
" * **UNFINISHED** - 미필\n"
" * **FINISHED** - 군필",
responses=serializers.MilitaryServiceSerializer,
)
def patch(self, request, *args, **kwargs):
return super(ResumeMilitaryServiceView, self).patch(request, *args, **kwargs)
| true | true |
f72d027eb1356111f2daf107008ee00d025ad541 | 2,954 | py | Python | find_deathdomains.py | caspase-like-homolog-identifier/c14_witcher | e2c481607b85fed749daec0e9b3b29b65d6b448f | [
"MIT"
] | null | null | null | find_deathdomains.py | caspase-like-homolog-identifier/c14_witcher | e2c481607b85fed749daec0e9b3b29b65d6b448f | [
"MIT"
] | null | null | null | find_deathdomains.py | caspase-like-homolog-identifier/c14_witcher | e2c481607b85fed749daec0e9b3b29b65d6b448f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from run_hmmer import RunHmmer
from Bio import SearchIO
import pandas as pd
import collections
import random
import tempfile
import argparse
import pprint
import glob
import sys
class FindDeathDomains(RunHmmer):
def __init__(self, seqfile, dd_hmm_path, *hmmersearch_args):
""" Subclass the Hmmer commandline wrapper """
self.dd_hmm_paths = glob.glob(dd_hmm_path)
super().__init__("hmmsearch", None, seqfile, None, None, *hmmersearch_args)
self.deathdomain_hits = {}
self.dd_dict = None
def deathdomains_iter(self):
""" iterate over the deathdomains """
self.dd_names = []
for hmm_file in self.dd_hmm_paths:
self.hmmfile = hmm_file
tmp1, tmp2 = [ tempfile.NamedTemporaryFile(delete=False) for _ in range(2) ]
self.align_out = tmp1.name
self.domtblout = tmp2.name
std, stderr = self()
deathdomain = self.has_deathdomain(self.domtblout)
if deathdomain:
self.deathdomain_hits[deathdomain[0].id] = deathdomain[0].hits
self.dd_names.append(deathdomain[0].id)
def has_deathdomain(self, domtab):
return list(SearchIO.parse(domtab, "hmmsearch3-domtab"))
def DeathDomains(self, feature):
"""Property to view the death domains.Start analysis if not done already"""
# _id
# _id_alt
# _query_id
# _description
# _description_alt
# _query_description
# attributes
# dbxrefs
# _items
# accession
# seq_len
# evalue
# bitscore
# bias
if not self.deathdomain_hits:
self.deathdomains_iter()
#create dict using seq.ids as keys and empty lists as values
dd_dict = collections.defaultdict(list)
for dd in self.deathdomain_hits:
#print(dd)
for hit in self.deathdomain_hits[dd]:
dd_dict[hit.id].append(vars(hit)[feature])
self.deathdomains = pd.DataFrame(columns = ['Seq_ID']+self.dd_names)
for seq_id, values in dd_dict.items():
self.deathdomains = self.deathdomains.append(pd.Series([seq_id]+values, index= ['Seq_ID']+self.dd_names, name = seq_id))
return self.deathdomains
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument('seqfile', action='store', type=str)
parser.add_argument('-g','--hmm_glob', default="/opt/DB_REF/Pfam/Ig*hmm")
args = parser.parse_args()
dd = FindDeathDomains(args.seqfile, args.hmm_glob)
dd.deathdomains_iter()
print("\n\n\n\n")
print(dd.DeathDomains('evalue'))
| 31.094737 | 133 | 0.58497 |
from run_hmmer import RunHmmer
from Bio import SearchIO
import pandas as pd
import collections
import random
import tempfile
import argparse
import pprint
import glob
import sys
class FindDeathDomains(RunHmmer):
def __init__(self, seqfile, dd_hmm_path, *hmmersearch_args):
self.dd_hmm_paths = glob.glob(dd_hmm_path)
super().__init__("hmmsearch", None, seqfile, None, None, *hmmersearch_args)
self.deathdomain_hits = {}
self.dd_dict = None
def deathdomains_iter(self):
self.dd_names = []
for hmm_file in self.dd_hmm_paths:
self.hmmfile = hmm_file
tmp1, tmp2 = [ tempfile.NamedTemporaryFile(delete=False) for _ in range(2) ]
self.align_out = tmp1.name
self.domtblout = tmp2.name
std, stderr = self()
deathdomain = self.has_deathdomain(self.domtblout)
if deathdomain:
self.deathdomain_hits[deathdomain[0].id] = deathdomain[0].hits
self.dd_names.append(deathdomain[0].id)
def has_deathdomain(self, domtab):
return list(SearchIO.parse(domtab, "hmmsearch3-domtab"))
def DeathDomains(self, feature):
if not self.deathdomain_hits:
self.deathdomains_iter()
dd_dict = collections.defaultdict(list)
for dd in self.deathdomain_hits:
for hit in self.deathdomain_hits[dd]:
dd_dict[hit.id].append(vars(hit)[feature])
self.deathdomains = pd.DataFrame(columns = ['Seq_ID']+self.dd_names)
for seq_id, values in dd_dict.items():
self.deathdomains = self.deathdomains.append(pd.Series([seq_id]+values, index= ['Seq_ID']+self.dd_names, name = seq_id))
return self.deathdomains
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument('seqfile', action='store', type=str)
parser.add_argument('-g','--hmm_glob', default="/opt/DB_REF/Pfam/Ig*hmm")
args = parser.parse_args()
dd = FindDeathDomains(args.seqfile, args.hmm_glob)
dd.deathdomains_iter()
print("\n\n\n\n")
print(dd.DeathDomains('evalue'))
| true | true |
f72d032ec7455ded65fafe668268d74e9cbda5cc | 2,306 | py | Python | models.py | phpwizz/SimpelApi | c2a5f28fff752fb84e99568a3e0dab5c37e03c94 | [
"MIT"
] | 1 | 2018-07-14T08:43:25.000Z | 2018-07-14T08:43:25.000Z | models.py | phpwizz/SimpelApi | c2a5f28fff752fb84e99568a3e0dab5c37e03c94 | [
"MIT"
] | null | null | null | models.py | phpwizz/SimpelApi | c2a5f28fff752fb84e99568a3e0dab5c37e03c94 | [
"MIT"
] | null | null | null | import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from passlib.apps import custom_app_context as pwd_context
import random, string
from itsdangerous import(TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired)
Base = declarative_base()
secret_key = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in xrange(32))
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key = True)
username = Column(String)
picture = Column (String)
description = Column(String)
name = Column(String)
password_hash = Column(String(64))
def hash_password(self,password):
self.password_hash = pwd_context.hash(password)
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
def generate_auth_token(self, expiration = 600):
s = Serializer(secret_key, expires_in = expiration)
return s.dumps({'id': self.id})
#Verify auth tokens
@staticmethod
def verify_auth_token(token):
s = Serializer(secret_key)
try:
data = s.loads(token)
except SignatureExpired:
#Valid but expired
return None
except BadSignature:
#Invalid token
return None
user_id = data['id']
return user_id
@property
def serialize(self):
return {
'id': self.id,
'user_about': self.description,
'username': self.username,
'picture': self.picture,
'name' : self.name
}
class Post(Base):
__tablename__ = 'posts'
id = Column(Integer, primary_key = True)
content = Column(String(250))
likes = Column(Integer)
user_id = Column(Integer,ForeignKey('user.id'))
user = relationship(User)
@property
def serialize(self):
return {
'id': self.id,
'zcontent': self.content,
'zlikes': self.likes,
'zauthor': self.user_id
}
engine = create_engine('sqlite:///simpelapi.db')
Base.metadata.create_all(engine) | 26.813953 | 103 | 0.641804 | import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from passlib.apps import custom_app_context as pwd_context
import random, string
from itsdangerous import(TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired)
Base = declarative_base()
secret_key = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in xrange(32))
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key = True)
username = Column(String)
picture = Column (String)
description = Column(String)
name = Column(String)
password_hash = Column(String(64))
def hash_password(self,password):
self.password_hash = pwd_context.hash(password)
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
def generate_auth_token(self, expiration = 600):
s = Serializer(secret_key, expires_in = expiration)
return s.dumps({'id': self.id})
@staticmethod
def verify_auth_token(token):
s = Serializer(secret_key)
try:
data = s.loads(token)
except SignatureExpired:
return None
except BadSignature:
return None
user_id = data['id']
return user_id
@property
def serialize(self):
return {
'id': self.id,
'user_about': self.description,
'username': self.username,
'picture': self.picture,
'name' : self.name
}
class Post(Base):
__tablename__ = 'posts'
id = Column(Integer, primary_key = True)
content = Column(String(250))
likes = Column(Integer)
user_id = Column(Integer,ForeignKey('user.id'))
user = relationship(User)
@property
def serialize(self):
return {
'id': self.id,
'zcontent': self.content,
'zlikes': self.likes,
'zauthor': self.user_id
}
engine = create_engine('sqlite:///simpelapi.db')
Base.metadata.create_all(engine) | true | true |
f72d04129209f13907fb9ece50d3696c445c1bc3 | 11,522 | py | Python | gammapy/catalog/tests/test_hess.py | watsonjj/gammapy | 8d2498c8f63f73d1fbe4ba81ab02d9e72552df67 | [
"BSD-3-Clause"
] | null | null | null | gammapy/catalog/tests/test_hess.py | watsonjj/gammapy | 8d2498c8f63f73d1fbe4ba81ab02d9e72552df67 | [
"BSD-3-Clause"
] | null | null | null | gammapy/catalog/tests/test_hess.py | watsonjj/gammapy | 8d2498c8f63f73d1fbe4ba81ab02d9e72552df67 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import Counter
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.coordinates import SkyCoord, Angle
from astropy.table import Table
from ...utils.testing import assert_quantity_allclose
from ...utils.testing import requires_data, requires_dependency
from ...spectrum.models import PowerLaw, ExponentialCutoffPowerLaw
from ..hess import SourceCatalogHGPS, SourceCatalogLargeScaleHGPS
@pytest.fixture(scope="session")
def cat():
return SourceCatalogHGPS("$GAMMAPY_DATA/catalogs/hgps_catalog_v1.fits.gz")
@requires_data("gammapy-data")
class TestSourceCatalogHGPS:
@staticmethod
def test_source_table(cat):
assert cat.name == "hgps"
assert len(cat.table) == 78
@staticmethod
def test_table_components(cat):
assert len(cat.table_components) == 98
@staticmethod
def test_table_associations(cat):
assert len(cat.table_associations) == 223
@staticmethod
def test_table_identifications(cat):
assert len(cat.table_identifications) == 31
@staticmethod
def test_gaussian_component(cat):
# Row index starts at 0, component numbers at 1
# Thus we expect `HGPSC 084` at row 83
c = cat.gaussian_component(83)
assert c.name == "HGPSC 084"
@staticmethod
def test_large_scale_component(cat):
assert isinstance(cat.large_scale_component, SourceCatalogLargeScaleHGPS)
@requires_data("gammapy-data")
class TestSourceCatalogObjectHGPS:
@pytest.fixture(scope="class")
def source(self, cat):
return cat["HESS J1843-033"]
@staticmethod
@pytest.mark.slow
def test_all_sources(cat):
"""Check that properties and methods work for all sources,
i.e. don't raise an error."""
for source in cat:
str(source)
source.energy_range
source.spectral_model_type
source.spectral_model()
source.spatial_model_type
source.is_pointlike
source.sky_model()
source.flux_points
@staticmethod
def test_basics(source):
assert source.name == "HESS J1843-033"
assert source.index == 64
data = source.data
assert data["Source_Class"] == "Unid"
assert "SourceCatalogObjectHGPS" in repr(source)
ss = str(source)
assert "Source name : HESS J1843-033" in ss
assert "Component HGPSC 083:" in ss
@staticmethod
def test_str(cat):
source = cat["HESS J1930+188"]
assert source.data["Spatial_Model"] == "Gaussian"
assert "Spatial components : HGPSC 097" in str(source)
source = cat["HESS J1825-137"]
assert source.data["Spatial_Model"] == "3-Gaussian"
assert "Spatial components : HGPSC 065, HGPSC 066, HGPSC 067" in str(source)
source = cat["HESS J1713-397"]
assert source.data["Spatial_Model"] == "Shell"
assert "Source name : HESS J1713-397" in str(source)
@staticmethod
def test_components(source):
components = source.components
assert len(components) == 2
c = components[1]
assert c.name == "HGPSC 084"
@staticmethod
def test_energy_range(source):
energy_range = source.energy_range
assert energy_range.unit == "TeV"
assert_allclose(energy_range.value, [0.21544346, 61.89658356])
@staticmethod
def test_spectral_model_type(cat):
spec_types = Counter([_.spectral_model_type for _ in cat])
assert spec_types == {"pl": 66, "ecpl": 12}
@staticmethod
@requires_dependency("uncertainties")
def test_spectral_model_pl(cat):
source = cat["HESS J1843-033"]
model = source.spectral_model()
assert isinstance(model, PowerLaw)
pars = model.parameters
assert_allclose(pars["amplitude"].value, 9.140179932365378e-13)
assert_allclose(pars["index"].value, 2.1513476371765137)
assert_allclose(pars["reference"].value, 1.867810606956482)
val, err = model.integral_error(1 * u.TeV, 1e5 * u.TeV).value
assert_allclose(val, source.data["Flux_Spec_Int_1TeV"].value, rtol=0.01)
assert_allclose(err, source.data["Flux_Spec_Int_1TeV_Err"].value, rtol=0.01)
@staticmethod
@requires_dependency("uncertainties")
def test_spectral_model_ecpl(cat):
source = cat["HESS J0835-455"]
model = source.spectral_model()
assert isinstance(model, ExponentialCutoffPowerLaw)
pars = model.parameters
assert_allclose(pars["amplitude"].value, 6.408420542586617e-12)
assert_allclose(pars["index"].value, 1.3543991614920847)
assert_allclose(pars["reference"].value, 1.696938754239)
assert_allclose(pars["lambda_"].value, 0.081517637)
val, err = model.integral_error(1 * u.TeV, 1e5 * u.TeV).value
assert_allclose(val, source.data["Flux_Spec_Int_1TeV"].value, rtol=0.01)
assert_allclose(err, source.data["Flux_Spec_Int_1TeV_Err"].value, rtol=0.01)
model = source.spectral_model("pl")
assert isinstance(model, PowerLaw)
pars = model.parameters
assert_allclose(pars["amplitude"].value, 1.833056926733856e-12)
assert_allclose(pars["index"].value, 1.8913707)
assert_allclose(pars["reference"].value, 3.0176312923431396)
val, err = model.integral_error(1 * u.TeV, 1e5 * u.TeV).value
assert_allclose(val, source.data["Flux_Spec_PL_Int_1TeV"].value, rtol=0.01)
assert_allclose(err, source.data["Flux_Spec_PL_Int_1TeV_Err"].value, rtol=0.01)
@staticmethod
def test_spatial_model_type(cat):
morph_types = Counter([_.spatial_model_type for _ in cat])
assert morph_types == {
"gaussian": 52,
"2-gaussian": 8,
"shell": 7,
"point-like": 6,
"3-gaussian": 5,
}
@staticmethod
def test_sky_model_point(cat):
model = cat["HESS J1826-148"].sky_model()
p = model.parameters
assert_allclose(p["amplitude"].value, 9.815771242691063e-13)
assert_allclose(p["lon_0"].value, 16.882482528686523)
assert_allclose(p["lat_0"].value, -1.2889292240142822)
@staticmethod
def test_sky_model_gaussian(cat):
model = cat["HESS J1119-614"].sky_model()
p = model.parameters
assert_allclose(p["amplitude"].value, 7.959899015960725e-13)
assert_allclose(p["lon_0"].value, 292.1280822753906)
assert_allclose(p["lat_0"].value, -0.5332353711128235)
assert_allclose(p["sigma"].value, 0.09785966575145721)
@staticmethod
def test_sky_model_gaussian2(cat):
model = cat["HESS J1843-033"].sky_model()
p = model.skymodels[0].parameters
assert_allclose(p["amplitude"].value, 4.259815e-13, rtol=1e-5)
assert_allclose(p["lon_0"].value, 29.047216415405273)
assert_allclose(p["lat_0"].value, 0.24389676749706268)
assert_allclose(p["sigma"].value, 0.12499100714921951)
p = model.skymodels[1].parameters
assert_allclose(p["amplitude"].value, 4.880365e-13, rtol=1e-5)
assert_allclose(p["lon_0"].value, 28.77037811279297)
assert_allclose(p["lat_0"].value, -0.0727819949388504)
assert_allclose(p["sigma"].value, 0.2294706553220749)
@staticmethod
def test_sky_model_gaussian3(cat):
model = cat["HESS J1825-137"].sky_model()
p = model.skymodels[0].parameters
assert_allclose(p["amplitude"].value, 1.8952104218765842e-11)
assert_allclose(p["lon_0"].value, 16.988601684570312)
assert_allclose(p["lat_0"].value, -0.4913068115711212)
assert_allclose(p["sigma"].value, 0.47650089859962463)
p = model.skymodels[1].parameters
assert_allclose(p["amplitude"].value, 4.4639763971527836e-11)
assert_allclose(p["lon_0"].value, 17.71169090270996)
assert_allclose(p["lat_0"].value, -0.6598004102706909)
assert_allclose(p["sigma"].value, 0.3910967707633972)
p = model.skymodels[2].parameters
assert_allclose(p["amplitude"].value, 5.870712920658374e-12)
assert_allclose(p["lon_0"].value, 17.840524673461914)
assert_allclose(p["lat_0"].value, -0.7057178020477295)
assert_allclose(p["sigma"].value, 0.10932201147079468)
@staticmethod
def test_sky_model_gaussian_extern(cat):
# special test for the only extern source with a gaussian morphology
model = cat["HESS J1801-233"].sky_model()
p = model.parameters
assert_allclose(p["amplitude"].value, 7.499999970031479e-13)
assert_allclose(p["lon_0"].value, 6.656888961791992)
assert_allclose(p["lat_0"].value, -0.267688125371933)
assert_allclose(p["sigma"].value, 0.17)
@staticmethod
def test_sky_model_shell(cat):
model = cat["Vela Junior"].sky_model()
p = model.parameters
assert_allclose(p["amplitude"].value, 3.2163001428830995e-11)
assert_allclose(p["lon_0"].value, 266.2873840332031)
assert_allclose(p["lat_0"].value, -1.243260383605957)
assert_allclose(p["radius"].value, 0.95)
assert_allclose(p["width"].value, 0.05)
@requires_data("gammapy-data")
class TestSourceCatalogObjectHGPSComponent:
@pytest.fixture(scope="class")
def component(self, cat):
return cat.gaussian_component(83)
@staticmethod
def test_repr(component):
assert "SourceCatalogObjectHGPSComponent" in repr(component)
@staticmethod
def test_str(component):
assert "Component HGPSC 084" in str(component)
@staticmethod
def test_name(component):
assert component.name == "HGPSC 084"
@staticmethod
def test_index(component):
assert component.index == 83
@staticmethod
def test_spatial_model(component):
model = component.spatial_model
p = model.parameters
assert_allclose(p["lon_0"].value, 28.77037811279297)
assert_allclose(p.error("lon_0"), 0.058748625218868256)
assert_allclose(p["lat_0"].value, -0.0727819949388504)
assert_allclose(p.error("lat_0"), 0.06880396604537964)
assert_allclose(p["sigma"].value, 0.2294706553220749)
assert_allclose(p.error("sigma"), 0.04618723690509796)
class TestSourceCatalogLargeScaleHGPS:
def setup(self):
table = Table()
table["GLON"] = [-30, -10, 10, 20] * u.deg
table["Surface_Brightness"] = [0, 1, 10, 0] * u.Unit("cm-2 s-1 sr-1")
table["GLAT"] = [-1, 0, 1, 0] * u.deg
table["Width"] = [0.4, 0.5, 0.3, 1.0] * u.deg
self.table = table
self.model = SourceCatalogLargeScaleHGPS(table)
def test_evaluate(self):
x = np.linspace(-100, 20, 5)
y = np.linspace(-2, 2, 7)
x, y = np.meshgrid(x, y)
coords = SkyCoord(x, y, unit="deg", frame="galactic")
image = self.model.evaluate(coords)
desired = 1.223962643740966 * u.Unit("cm-2 s-1 sr-1")
assert_quantity_allclose(image.sum(), desired)
def test_parvals(self):
glon = Angle(10, unit="deg")
assert_quantity_allclose(
self.model.peak_brightness(glon), 10 * u.Unit("cm-2 s-1 sr-1")
)
assert_quantity_allclose(self.model.peak_latitude(glon), 1 * u.deg)
assert_quantity_allclose(self.model.width(glon), 0.3 * u.deg)
| 37.167742 | 87 | 0.661083 |
from collections import Counter
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.coordinates import SkyCoord, Angle
from astropy.table import Table
from ...utils.testing import assert_quantity_allclose
from ...utils.testing import requires_data, requires_dependency
from ...spectrum.models import PowerLaw, ExponentialCutoffPowerLaw
from ..hess import SourceCatalogHGPS, SourceCatalogLargeScaleHGPS
@pytest.fixture(scope="session")
def cat():
return SourceCatalogHGPS("$GAMMAPY_DATA/catalogs/hgps_catalog_v1.fits.gz")
@requires_data("gammapy-data")
class TestSourceCatalogHGPS:
@staticmethod
def test_source_table(cat):
assert cat.name == "hgps"
assert len(cat.table) == 78
@staticmethod
def test_table_components(cat):
assert len(cat.table_components) == 98
@staticmethod
def test_table_associations(cat):
assert len(cat.table_associations) == 223
@staticmethod
def test_table_identifications(cat):
assert len(cat.table_identifications) == 31
@staticmethod
def test_gaussian_component(cat):
c = cat.gaussian_component(83)
assert c.name == "HGPSC 084"
@staticmethod
def test_large_scale_component(cat):
assert isinstance(cat.large_scale_component, SourceCatalogLargeScaleHGPS)
@requires_data("gammapy-data")
class TestSourceCatalogObjectHGPS:
@pytest.fixture(scope="class")
def source(self, cat):
return cat["HESS J1843-033"]
@staticmethod
@pytest.mark.slow
def test_all_sources(cat):
for source in cat:
str(source)
source.energy_range
source.spectral_model_type
source.spectral_model()
source.spatial_model_type
source.is_pointlike
source.sky_model()
source.flux_points
@staticmethod
def test_basics(source):
assert source.name == "HESS J1843-033"
assert source.index == 64
data = source.data
assert data["Source_Class"] == "Unid"
assert "SourceCatalogObjectHGPS" in repr(source)
ss = str(source)
assert "Source name : HESS J1843-033" in ss
assert "Component HGPSC 083:" in ss
@staticmethod
def test_str(cat):
source = cat["HESS J1930+188"]
assert source.data["Spatial_Model"] == "Gaussian"
assert "Spatial components : HGPSC 097" in str(source)
source = cat["HESS J1825-137"]
assert source.data["Spatial_Model"] == "3-Gaussian"
assert "Spatial components : HGPSC 065, HGPSC 066, HGPSC 067" in str(source)
source = cat["HESS J1713-397"]
assert source.data["Spatial_Model"] == "Shell"
assert "Source name : HESS J1713-397" in str(source)
@staticmethod
def test_components(source):
components = source.components
assert len(components) == 2
c = components[1]
assert c.name == "HGPSC 084"
@staticmethod
def test_energy_range(source):
energy_range = source.energy_range
assert energy_range.unit == "TeV"
assert_allclose(energy_range.value, [0.21544346, 61.89658356])
@staticmethod
def test_spectral_model_type(cat):
spec_types = Counter([_.spectral_model_type for _ in cat])
assert spec_types == {"pl": 66, "ecpl": 12}
@staticmethod
@requires_dependency("uncertainties")
def test_spectral_model_pl(cat):
source = cat["HESS J1843-033"]
model = source.spectral_model()
assert isinstance(model, PowerLaw)
pars = model.parameters
assert_allclose(pars["amplitude"].value, 9.140179932365378e-13)
assert_allclose(pars["index"].value, 2.1513476371765137)
assert_allclose(pars["reference"].value, 1.867810606956482)
val, err = model.integral_error(1 * u.TeV, 1e5 * u.TeV).value
assert_allclose(val, source.data["Flux_Spec_Int_1TeV"].value, rtol=0.01)
assert_allclose(err, source.data["Flux_Spec_Int_1TeV_Err"].value, rtol=0.01)
@staticmethod
@requires_dependency("uncertainties")
def test_spectral_model_ecpl(cat):
source = cat["HESS J0835-455"]
model = source.spectral_model()
assert isinstance(model, ExponentialCutoffPowerLaw)
pars = model.parameters
assert_allclose(pars["amplitude"].value, 6.408420542586617e-12)
assert_allclose(pars["index"].value, 1.3543991614920847)
assert_allclose(pars["reference"].value, 1.696938754239)
assert_allclose(pars["lambda_"].value, 0.081517637)
val, err = model.integral_error(1 * u.TeV, 1e5 * u.TeV).value
assert_allclose(val, source.data["Flux_Spec_Int_1TeV"].value, rtol=0.01)
assert_allclose(err, source.data["Flux_Spec_Int_1TeV_Err"].value, rtol=0.01)
model = source.spectral_model("pl")
assert isinstance(model, PowerLaw)
pars = model.parameters
assert_allclose(pars["amplitude"].value, 1.833056926733856e-12)
assert_allclose(pars["index"].value, 1.8913707)
assert_allclose(pars["reference"].value, 3.0176312923431396)
val, err = model.integral_error(1 * u.TeV, 1e5 * u.TeV).value
assert_allclose(val, source.data["Flux_Spec_PL_Int_1TeV"].value, rtol=0.01)
assert_allclose(err, source.data["Flux_Spec_PL_Int_1TeV_Err"].value, rtol=0.01)
@staticmethod
def test_spatial_model_type(cat):
morph_types = Counter([_.spatial_model_type for _ in cat])
assert morph_types == {
"gaussian": 52,
"2-gaussian": 8,
"shell": 7,
"point-like": 6,
"3-gaussian": 5,
}
@staticmethod
def test_sky_model_point(cat):
model = cat["HESS J1826-148"].sky_model()
p = model.parameters
assert_allclose(p["amplitude"].value, 9.815771242691063e-13)
assert_allclose(p["lon_0"].value, 16.882482528686523)
assert_allclose(p["lat_0"].value, -1.2889292240142822)
@staticmethod
def test_sky_model_gaussian(cat):
model = cat["HESS J1119-614"].sky_model()
p = model.parameters
assert_allclose(p["amplitude"].value, 7.959899015960725e-13)
assert_allclose(p["lon_0"].value, 292.1280822753906)
assert_allclose(p["lat_0"].value, -0.5332353711128235)
assert_allclose(p["sigma"].value, 0.09785966575145721)
@staticmethod
def test_sky_model_gaussian2(cat):
model = cat["HESS J1843-033"].sky_model()
p = model.skymodels[0].parameters
assert_allclose(p["amplitude"].value, 4.259815e-13, rtol=1e-5)
assert_allclose(p["lon_0"].value, 29.047216415405273)
assert_allclose(p["lat_0"].value, 0.24389676749706268)
assert_allclose(p["sigma"].value, 0.12499100714921951)
p = model.skymodels[1].parameters
assert_allclose(p["amplitude"].value, 4.880365e-13, rtol=1e-5)
assert_allclose(p["lon_0"].value, 28.77037811279297)
assert_allclose(p["lat_0"].value, -0.0727819949388504)
assert_allclose(p["sigma"].value, 0.2294706553220749)
@staticmethod
def test_sky_model_gaussian3(cat):
model = cat["HESS J1825-137"].sky_model()
p = model.skymodels[0].parameters
assert_allclose(p["amplitude"].value, 1.8952104218765842e-11)
assert_allclose(p["lon_0"].value, 16.988601684570312)
assert_allclose(p["lat_0"].value, -0.4913068115711212)
assert_allclose(p["sigma"].value, 0.47650089859962463)
p = model.skymodels[1].parameters
assert_allclose(p["amplitude"].value, 4.4639763971527836e-11)
assert_allclose(p["lon_0"].value, 17.71169090270996)
assert_allclose(p["lat_0"].value, -0.6598004102706909)
assert_allclose(p["sigma"].value, 0.3910967707633972)
p = model.skymodels[2].parameters
assert_allclose(p["amplitude"].value, 5.870712920658374e-12)
assert_allclose(p["lon_0"].value, 17.840524673461914)
assert_allclose(p["lat_0"].value, -0.7057178020477295)
assert_allclose(p["sigma"].value, 0.10932201147079468)
@staticmethod
def test_sky_model_gaussian_extern(cat):
model = cat["HESS J1801-233"].sky_model()
p = model.parameters
assert_allclose(p["amplitude"].value, 7.499999970031479e-13)
assert_allclose(p["lon_0"].value, 6.656888961791992)
assert_allclose(p["lat_0"].value, -0.267688125371933)
assert_allclose(p["sigma"].value, 0.17)
@staticmethod
def test_sky_model_shell(cat):
model = cat["Vela Junior"].sky_model()
p = model.parameters
assert_allclose(p["amplitude"].value, 3.2163001428830995e-11)
assert_allclose(p["lon_0"].value, 266.2873840332031)
assert_allclose(p["lat_0"].value, -1.243260383605957)
assert_allclose(p["radius"].value, 0.95)
assert_allclose(p["width"].value, 0.05)
@requires_data("gammapy-data")
class TestSourceCatalogObjectHGPSComponent:
@pytest.fixture(scope="class")
def component(self, cat):
return cat.gaussian_component(83)
@staticmethod
def test_repr(component):
assert "SourceCatalogObjectHGPSComponent" in repr(component)
@staticmethod
def test_str(component):
assert "Component HGPSC 084" in str(component)
@staticmethod
def test_name(component):
assert component.name == "HGPSC 084"
@staticmethod
def test_index(component):
assert component.index == 83
@staticmethod
def test_spatial_model(component):
model = component.spatial_model
p = model.parameters
assert_allclose(p["lon_0"].value, 28.77037811279297)
assert_allclose(p.error("lon_0"), 0.058748625218868256)
assert_allclose(p["lat_0"].value, -0.0727819949388504)
assert_allclose(p.error("lat_0"), 0.06880396604537964)
assert_allclose(p["sigma"].value, 0.2294706553220749)
assert_allclose(p.error("sigma"), 0.04618723690509796)
class TestSourceCatalogLargeScaleHGPS:
def setup(self):
table = Table()
table["GLON"] = [-30, -10, 10, 20] * u.deg
table["Surface_Brightness"] = [0, 1, 10, 0] * u.Unit("cm-2 s-1 sr-1")
table["GLAT"] = [-1, 0, 1, 0] * u.deg
table["Width"] = [0.4, 0.5, 0.3, 1.0] * u.deg
self.table = table
self.model = SourceCatalogLargeScaleHGPS(table)
def test_evaluate(self):
x = np.linspace(-100, 20, 5)
y = np.linspace(-2, 2, 7)
x, y = np.meshgrid(x, y)
coords = SkyCoord(x, y, unit="deg", frame="galactic")
image = self.model.evaluate(coords)
desired = 1.223962643740966 * u.Unit("cm-2 s-1 sr-1")
assert_quantity_allclose(image.sum(), desired)
def test_parvals(self):
glon = Angle(10, unit="deg")
assert_quantity_allclose(
self.model.peak_brightness(glon), 10 * u.Unit("cm-2 s-1 sr-1")
)
assert_quantity_allclose(self.model.peak_latitude(glon), 1 * u.deg)
assert_quantity_allclose(self.model.width(glon), 0.3 * u.deg)
| true | true |
f72d04c38d826a7bc9753a2f6f269707d60b54c5 | 2,335 | py | Python | nmmo/entity/player.py | jsuarez5341/neural-mmo | 0828982e8a30641986fdd947ab82f34c008fafde | [
"MIT"
] | 230 | 2019-07-03T06:52:29.000Z | 2021-12-10T18:47:37.000Z | nmmo/entity/player.py | jsuarez5341/neural-mmo | 0828982e8a30641986fdd947ab82f34c008fafde | [
"MIT"
] | 16 | 2019-10-11T16:51:27.000Z | 2021-12-06T14:32:31.000Z | nmmo/entity/player.py | jsuarez5341/neural-mmo | 0828982e8a30641986fdd947ab82f34c008fafde | [
"MIT"
] | 40 | 2019-08-02T19:36:38.000Z | 2021-12-02T09:59:08.000Z | import numpy as np
from pdb import set_trace as T
import nmmo
from nmmo.systems import ai, equipment
from nmmo.lib import material
from nmmo.systems.skill import Skills
from nmmo.systems.achievement import Diary
from nmmo.entity import entity
class Player(entity.Entity):
def __init__(self, realm, pos, agent, color, pop):
super().__init__(realm, pos, agent.iden, agent.name, color, pop)
self.agent = agent
self.pop = pop
#Scripted hooks
self.target = None
self.food = None
self.water = None
self.vision = 7
#Submodules
self.skills = Skills(self)
self.achievements = Diary(realm.config)
self.dataframe.init(nmmo.Serialized.Entity, self.entID, self.pos)
@property
def serial(self):
return self.population, self.entID
@property
def isPlayer(self) -> bool:
return True
@property
def population(self):
return self.pop
def applyDamage(self, dmg, style):
self.resources.food.increment(dmg)
self.resources.water.increment(dmg)
self.skills.applyDamage(dmg, style)
def receiveDamage(self, source, dmg):
if not super().receiveDamage(source, dmg):
if source:
source.history.playerKills += 1
return
self.resources.food.decrement(dmg)
self.resources.water.decrement(dmg)
self.skills.receiveDamage(dmg)
def receiveLoot(self, loadout):
if loadout.chestplate.level > self.loadout.chestplate.level:
self.loadout.chestplate = loadout.chestplate
if loadout.platelegs.level > self.loadout.platelegs.level:
self.loadout.platelegs = loadout.platelegs
def packet(self):
data = super().packet()
data['entID'] = self.entID
data['annID'] = self.population
data['base'] = self.base.packet()
data['resource'] = self.resources.packet()
data['skills'] = self.skills.packet()
return data
def update(self, realm, actions):
'''Post-action update. Do not include history'''
super().update(realm, actions)
if not self.alive:
return
self.resources.update(realm, self, actions)
self.skills.update(realm, self, actions)
self.achievements.update(realm, self)
#self.inventory.update(world, actions)
| 26.83908 | 71 | 0.651392 | import numpy as np
from pdb import set_trace as T
import nmmo
from nmmo.systems import ai, equipment
from nmmo.lib import material
from nmmo.systems.skill import Skills
from nmmo.systems.achievement import Diary
from nmmo.entity import entity
class Player(entity.Entity):
def __init__(self, realm, pos, agent, color, pop):
super().__init__(realm, pos, agent.iden, agent.name, color, pop)
self.agent = agent
self.pop = pop
self.target = None
self.food = None
self.water = None
self.vision = 7
self.skills = Skills(self)
self.achievements = Diary(realm.config)
self.dataframe.init(nmmo.Serialized.Entity, self.entID, self.pos)
@property
def serial(self):
return self.population, self.entID
@property
def isPlayer(self) -> bool:
return True
@property
def population(self):
return self.pop
def applyDamage(self, dmg, style):
self.resources.food.increment(dmg)
self.resources.water.increment(dmg)
self.skills.applyDamage(dmg, style)
def receiveDamage(self, source, dmg):
if not super().receiveDamage(source, dmg):
if source:
source.history.playerKills += 1
return
self.resources.food.decrement(dmg)
self.resources.water.decrement(dmg)
self.skills.receiveDamage(dmg)
def receiveLoot(self, loadout):
if loadout.chestplate.level > self.loadout.chestplate.level:
self.loadout.chestplate = loadout.chestplate
if loadout.platelegs.level > self.loadout.platelegs.level:
self.loadout.platelegs = loadout.platelegs
def packet(self):
data = super().packet()
data['entID'] = self.entID
data['annID'] = self.population
data['base'] = self.base.packet()
data['resource'] = self.resources.packet()
data['skills'] = self.skills.packet()
return data
def update(self, realm, actions):
super().update(realm, actions)
if not self.alive:
return
self.resources.update(realm, self, actions)
self.skills.update(realm, self, actions)
self.achievements.update(realm, self)
| true | true |
f72d0563aa56e9c5a65d9965f487213c64d35e84 | 3,423 | py | Python | newchain_keys/main.py | newtonproject/newchain-keys.py | d8a053d78787dfc4403b57e60d54d0472d59787c | [
"MIT"
] | null | null | null | newchain_keys/main.py | newtonproject/newchain-keys.py | d8a053d78787dfc4403b57e60d54d0472d59787c | [
"MIT"
] | null | null | null | newchain_keys/main.py | newtonproject/newchain-keys.py | d8a053d78787dfc4403b57e60d54d0472d59787c | [
"MIT"
] | null | null | null | from typing import (Any, Union, Type) # noqa: F401
from newchain_keys.datatypes import (
LazyBackend,
PublicKey,
PrivateKey,
Signature,
)
from newchain_keys.exceptions import (
ValidationError,
)
from newchain_keys.validation import (
validate_message_hash,
)
# These must be aliased due to a scoping issue in mypy
# https://github.com/python/mypy/issues/1775
_PublicKey = PublicKey
_PrivateKey = PrivateKey
_Signature = Signature
class KeyAPI(LazyBackend):
#
# datatype shortcuts
#
PublicKey = PublicKey # type: Type[_PublicKey]
PrivateKey = PrivateKey # type: Type[_PrivateKey]
Signature = Signature # type: Type[_Signature]
#
# Proxy method calls to the backends
#
def ecdsa_sign(self,
message_hash: bytes,
private_key: _PrivateKey) -> _Signature:
validate_message_hash(message_hash)
if not isinstance(private_key, PrivateKey):
raise ValidationError(
"The `private_key` must be an instance of `newchain_keys.datatypes.PrivateKey`"
)
signature = self.backend.ecdsa_sign(message_hash, private_key)
if not isinstance(signature, Signature):
raise ValidationError(
"Backend returned an invalid signature. Return value must be "
"an instance of `newchain_keys.datatypes.Signature`"
)
return signature
def ecdsa_verify(self,
message_hash: bytes,
signature: _Signature,
public_key: _PublicKey) -> bool:
if not isinstance(public_key, PublicKey):
raise ValidationError(
"The `public_key` must be an instance of `newchain_keys.datatypes.PublicKey`"
)
return self.ecdsa_recover(message_hash, signature) == public_key
def ecdsa_recover(self,
message_hash: bytes,
signature: _Signature) -> _PublicKey:
validate_message_hash(message_hash)
if not isinstance(signature, Signature):
raise ValidationError(
"The `signature` must be an instance of `newchain_keys.datatypes.Signature`"
)
public_key = self.backend.ecdsa_recover(message_hash, signature)
if not isinstance(public_key, _PublicKey):
raise ValidationError(
"Backend returned an invalid public_key. Return value must be "
"an instance of `newchain_keys.datatypes.PublicKey`"
)
return public_key
def private_key_to_public_key(self, private_key: _PrivateKey) -> _PublicKey:
if not isinstance(private_key, PrivateKey):
raise ValidationError(
"The `private_key` must be an instance of `newchain_keys.datatypes.PrivateKey`"
)
public_key = self.backend.private_key_to_public_key(private_key)
if not isinstance(public_key, PublicKey):
raise ValidationError(
"Backend returned an invalid public_key. Return value must be "
"an instance of `newchain_keys.datatypes.PublicKey`"
)
return public_key
# This creates an easy to import backend which will lazily fetch whatever
# backend has been configured at runtime (as opposed to import or instantiation time).
lazy_key_api = KeyAPI(backend=None)
| 36.414894 | 95 | 0.643003 | from typing import (Any, Union, Type)
from newchain_keys.datatypes import (
LazyBackend,
PublicKey,
PrivateKey,
Signature,
)
from newchain_keys.exceptions import (
ValidationError,
)
from newchain_keys.validation import (
validate_message_hash,
)
_PublicKey = PublicKey
_PrivateKey = PrivateKey
_Signature = Signature
class KeyAPI(LazyBackend):
PublicKey = PublicKey
PrivateKey = PrivateKey
Signature = Signature
def ecdsa_sign(self,
message_hash: bytes,
private_key: _PrivateKey) -> _Signature:
validate_message_hash(message_hash)
if not isinstance(private_key, PrivateKey):
raise ValidationError(
"The `private_key` must be an instance of `newchain_keys.datatypes.PrivateKey`"
)
signature = self.backend.ecdsa_sign(message_hash, private_key)
if not isinstance(signature, Signature):
raise ValidationError(
"Backend returned an invalid signature. Return value must be "
"an instance of `newchain_keys.datatypes.Signature`"
)
return signature
def ecdsa_verify(self,
message_hash: bytes,
signature: _Signature,
public_key: _PublicKey) -> bool:
if not isinstance(public_key, PublicKey):
raise ValidationError(
"The `public_key` must be an instance of `newchain_keys.datatypes.PublicKey`"
)
return self.ecdsa_recover(message_hash, signature) == public_key
def ecdsa_recover(self,
message_hash: bytes,
signature: _Signature) -> _PublicKey:
validate_message_hash(message_hash)
if not isinstance(signature, Signature):
raise ValidationError(
"The `signature` must be an instance of `newchain_keys.datatypes.Signature`"
)
public_key = self.backend.ecdsa_recover(message_hash, signature)
if not isinstance(public_key, _PublicKey):
raise ValidationError(
"Backend returned an invalid public_key. Return value must be "
"an instance of `newchain_keys.datatypes.PublicKey`"
)
return public_key
def private_key_to_public_key(self, private_key: _PrivateKey) -> _PublicKey:
if not isinstance(private_key, PrivateKey):
raise ValidationError(
"The `private_key` must be an instance of `newchain_keys.datatypes.PrivateKey`"
)
public_key = self.backend.private_key_to_public_key(private_key)
if not isinstance(public_key, PublicKey):
raise ValidationError(
"Backend returned an invalid public_key. Return value must be "
"an instance of `newchain_keys.datatypes.PublicKey`"
)
return public_key
lazy_key_api = KeyAPI(backend=None)
| true | true |
f72d059384ed8eef053b45a327b0c27c29814ac1 | 11,111 | py | Python | opentelemetry-sdk/tests/metrics/test_metrics.py | 3tilley/opentelemetry-python | 4ed4fd08db67de69369f87862e43562c2e43fed5 | [
"Apache-2.0"
] | null | null | null | opentelemetry-sdk/tests/metrics/test_metrics.py | 3tilley/opentelemetry-python | 4ed4fd08db67de69369f87862e43562c2e43fed5 | [
"Apache-2.0"
] | null | null | null | opentelemetry-sdk/tests/metrics/test_metrics.py | 3tilley/opentelemetry-python | 4ed4fd08db67de69369f87862e43562c2e43fed5 | [
"Apache-2.0"
] | null | null | null | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import WARNING
from unittest import TestCase
from unittest.mock import MagicMock, Mock, patch
from opentelemetry._metrics import NoOpMeter
from opentelemetry.sdk._metrics import Meter, MeterProvider
from opentelemetry.sdk._metrics.instrument import (
Counter,
Histogram,
ObservableCounter,
ObservableGauge,
ObservableUpDownCounter,
UpDownCounter,
)
from opentelemetry.sdk._metrics.metric_reader import MetricReader
from opentelemetry.sdk._metrics.point import AggregationTemporality
from opentelemetry.sdk.resources import Resource
from opentelemetry.test.concurrency_test import ConcurrencyTestBase, MockFunc
class DummyMetricReader(MetricReader):
def __init__(self):
super().__init__(AggregationTemporality.CUMULATIVE)
def _receive_metrics(self, metrics):
pass
def shutdown(self):
return True
class TestMeterProvider(ConcurrencyTestBase):
def test_resource(self):
"""
`MeterProvider` provides a way to allow a `Resource` to be specified.
"""
meter_provider_0 = MeterProvider()
meter_provider_1 = MeterProvider()
self.assertIs(
meter_provider_0._sdk_config.resource,
meter_provider_1._sdk_config.resource,
)
self.assertIsInstance(meter_provider_0._sdk_config.resource, Resource)
self.assertIsInstance(meter_provider_1._sdk_config.resource, Resource)
resource = Resource({"key": "value"})
self.assertIs(
MeterProvider(resource=resource)._sdk_config.resource, resource
)
def test_get_meter(self):
"""
`MeterProvider.get_meter` arguments are used to create an
`InstrumentationInfo` object on the created `Meter`.
"""
meter = MeterProvider().get_meter(
"name",
version="version",
schema_url="schema_url",
)
self.assertEqual(meter._instrumentation_info.name, "name")
self.assertEqual(meter._instrumentation_info.version, "version")
self.assertEqual(meter._instrumentation_info.schema_url, "schema_url")
def test_get_meter_empty(self):
"""
`MeterProvider.get_meter` called with None or empty string as name
should return a NoOpMeter.
"""
meter = MeterProvider().get_meter(
None,
version="version",
schema_url="schema_url",
)
self.assertIsInstance(meter, NoOpMeter)
self.assertEqual(meter._name, None)
meter = MeterProvider().get_meter(
"",
version="version",
schema_url="schema_url",
)
self.assertIsInstance(meter, NoOpMeter)
self.assertEqual(meter._name, "")
def test_get_meter_duplicate(self):
"""
Subsequent calls to `MeterProvider.get_meter` with the same arguments
should return the same `Meter` instance.
"""
mp = MeterProvider()
meter1 = mp.get_meter(
"name",
version="version",
schema_url="schema_url",
)
meter2 = mp.get_meter(
"name",
version="version",
schema_url="schema_url",
)
meter3 = mp.get_meter(
"name2",
version="version",
schema_url="schema_url",
)
self.assertIs(meter1, meter2)
self.assertIsNot(meter1, meter3)
def test_shutdown(self):
mock_metric_reader_0 = MagicMock(
**{
"shutdown.return_value": False,
"__str__.return_value": "mock_metric_reader_0",
}
)
mock_metric_reader_1 = Mock(**{"shutdown.return_value": True})
meter_provider = MeterProvider(
metric_readers=[mock_metric_reader_0, mock_metric_reader_1]
)
with self.assertLogs(level=WARNING) as log:
self.assertFalse(meter_provider.shutdown())
self.assertEqual(
log.records[0].getMessage(),
"MetricReader mock_metric_reader_0 failed to shutdown",
)
mock_metric_reader_0.shutdown.assert_called_once()
mock_metric_reader_1.shutdown.assert_called_once()
mock_metric_reader_0 = Mock(**{"shutdown.return_value": True})
mock_metric_reader_1 = Mock(**{"shutdown.return_value": True})
meter_provider = MeterProvider(
metric_readers=[mock_metric_reader_0, mock_metric_reader_1]
)
self.assertTrue(meter_provider.shutdown())
mock_metric_reader_0.shutdown.assert_called_once()
mock_metric_reader_1.shutdown.assert_called_once()
def test_shutdown_subsequent_calls(self):
"""
No subsequent attempts to get a `Meter` are allowed after calling
`MeterProvider.shutdown`
"""
meter_provider = MeterProvider()
with self.assertRaises(AssertionError):
with self.assertLogs(level=WARNING):
meter_provider.shutdown()
with self.assertLogs(level=WARNING):
meter_provider.shutdown()
@patch("opentelemetry.sdk._metrics._logger")
def test_shutdown_race(self, mock_logger):
mock_logger.warning = MockFunc()
meter_provider = MeterProvider()
num_threads = 70
self.run_with_many_threads(
meter_provider.shutdown, num_threads=num_threads
)
self.assertEqual(mock_logger.warning.call_count, num_threads - 1)
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_measurement_collect_callback(
self, mock_sync_measurement_consumer
):
metric_readers = [DummyMetricReader()] * 5
sync_consumer_instance = mock_sync_measurement_consumer()
sync_consumer_instance.collect = MockFunc()
MeterProvider(metric_readers=metric_readers)
for reader in metric_readers:
reader.collect()
self.assertEqual(
sync_consumer_instance.collect.call_count, len(metric_readers)
)
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_creates_sync_measurement_consumer(
self, mock_sync_measurement_consumer
):
MeterProvider()
mock_sync_measurement_consumer.assert_called()
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_register_asynchronous_instrument(
self, mock_sync_measurement_consumer
):
meter_provider = MeterProvider()
meter_provider._measurement_consumer.register_asynchronous_instrument.assert_called_with(
meter_provider.get_meter("name").create_observable_counter(
"name", Mock()
)
)
meter_provider._measurement_consumer.register_asynchronous_instrument.assert_called_with(
meter_provider.get_meter("name").create_observable_up_down_counter(
"name", Mock()
)
)
meter_provider._measurement_consumer.register_asynchronous_instrument.assert_called_with(
meter_provider.get_meter("name").create_observable_gauge(
"name", Mock()
)
)
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_consume_measurement_counter(self, mock_sync_measurement_consumer):
sync_consumer_instance = mock_sync_measurement_consumer()
meter_provider = MeterProvider()
counter = meter_provider.get_meter("name").create_counter("name")
counter.add(1)
sync_consumer_instance.consume_measurement.assert_called()
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_consume_measurement_up_down_counter(
self, mock_sync_measurement_consumer
):
sync_consumer_instance = mock_sync_measurement_consumer()
meter_provider = MeterProvider()
counter = meter_provider.get_meter("name").create_up_down_counter(
"name"
)
counter.add(1)
sync_consumer_instance.consume_measurement.assert_called()
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_consume_measurement_histogram(
self, mock_sync_measurement_consumer
):
sync_consumer_instance = mock_sync_measurement_consumer()
meter_provider = MeterProvider()
counter = meter_provider.get_meter("name").create_histogram("name")
counter.record(1)
sync_consumer_instance.consume_measurement.assert_called()
class TestMeter(TestCase):
def setUp(self):
self.meter = Meter(Mock(), Mock())
def test_create_counter(self):
counter = self.meter.create_counter(
"name", unit="unit", description="description"
)
self.assertIsInstance(counter, Counter)
self.assertEqual(counter.name, "name")
def test_create_up_down_counter(self):
up_down_counter = self.meter.create_up_down_counter(
"name", unit="unit", description="description"
)
self.assertIsInstance(up_down_counter, UpDownCounter)
self.assertEqual(up_down_counter.name, "name")
def test_create_observable_counter(self):
observable_counter = self.meter.create_observable_counter(
"name", Mock(), unit="unit", description="description"
)
self.assertIsInstance(observable_counter, ObservableCounter)
self.assertEqual(observable_counter.name, "name")
def test_create_histogram(self):
histogram = self.meter.create_histogram(
"name", unit="unit", description="description"
)
self.assertIsInstance(histogram, Histogram)
self.assertEqual(histogram.name, "name")
def test_create_observable_gauge(self):
observable_gauge = self.meter.create_observable_gauge(
"name", Mock(), unit="unit", description="description"
)
self.assertIsInstance(observable_gauge, ObservableGauge)
self.assertEqual(observable_gauge.name, "name")
def test_create_observable_up_down_counter(self):
observable_up_down_counter = (
self.meter.create_observable_up_down_counter(
"name", Mock(), unit="unit", description="description"
)
)
self.assertIsInstance(
observable_up_down_counter, ObservableUpDownCounter
)
self.assertEqual(observable_up_down_counter.name, "name")
| 34.187692 | 97 | 0.671587 |
from logging import WARNING
from unittest import TestCase
from unittest.mock import MagicMock, Mock, patch
from opentelemetry._metrics import NoOpMeter
from opentelemetry.sdk._metrics import Meter, MeterProvider
from opentelemetry.sdk._metrics.instrument import (
Counter,
Histogram,
ObservableCounter,
ObservableGauge,
ObservableUpDownCounter,
UpDownCounter,
)
from opentelemetry.sdk._metrics.metric_reader import MetricReader
from opentelemetry.sdk._metrics.point import AggregationTemporality
from opentelemetry.sdk.resources import Resource
from opentelemetry.test.concurrency_test import ConcurrencyTestBase, MockFunc
class DummyMetricReader(MetricReader):
def __init__(self):
super().__init__(AggregationTemporality.CUMULATIVE)
def _receive_metrics(self, metrics):
pass
def shutdown(self):
return True
class TestMeterProvider(ConcurrencyTestBase):
def test_resource(self):
meter_provider_0 = MeterProvider()
meter_provider_1 = MeterProvider()
self.assertIs(
meter_provider_0._sdk_config.resource,
meter_provider_1._sdk_config.resource,
)
self.assertIsInstance(meter_provider_0._sdk_config.resource, Resource)
self.assertIsInstance(meter_provider_1._sdk_config.resource, Resource)
resource = Resource({"key": "value"})
self.assertIs(
MeterProvider(resource=resource)._sdk_config.resource, resource
)
def test_get_meter(self):
meter = MeterProvider().get_meter(
"name",
version="version",
schema_url="schema_url",
)
self.assertEqual(meter._instrumentation_info.name, "name")
self.assertEqual(meter._instrumentation_info.version, "version")
self.assertEqual(meter._instrumentation_info.schema_url, "schema_url")
def test_get_meter_empty(self):
meter = MeterProvider().get_meter(
None,
version="version",
schema_url="schema_url",
)
self.assertIsInstance(meter, NoOpMeter)
self.assertEqual(meter._name, None)
meter = MeterProvider().get_meter(
"",
version="version",
schema_url="schema_url",
)
self.assertIsInstance(meter, NoOpMeter)
self.assertEqual(meter._name, "")
def test_get_meter_duplicate(self):
mp = MeterProvider()
meter1 = mp.get_meter(
"name",
version="version",
schema_url="schema_url",
)
meter2 = mp.get_meter(
"name",
version="version",
schema_url="schema_url",
)
meter3 = mp.get_meter(
"name2",
version="version",
schema_url="schema_url",
)
self.assertIs(meter1, meter2)
self.assertIsNot(meter1, meter3)
def test_shutdown(self):
mock_metric_reader_0 = MagicMock(
**{
"shutdown.return_value": False,
"__str__.return_value": "mock_metric_reader_0",
}
)
mock_metric_reader_1 = Mock(**{"shutdown.return_value": True})
meter_provider = MeterProvider(
metric_readers=[mock_metric_reader_0, mock_metric_reader_1]
)
with self.assertLogs(level=WARNING) as log:
self.assertFalse(meter_provider.shutdown())
self.assertEqual(
log.records[0].getMessage(),
"MetricReader mock_metric_reader_0 failed to shutdown",
)
mock_metric_reader_0.shutdown.assert_called_once()
mock_metric_reader_1.shutdown.assert_called_once()
mock_metric_reader_0 = Mock(**{"shutdown.return_value": True})
mock_metric_reader_1 = Mock(**{"shutdown.return_value": True})
meter_provider = MeterProvider(
metric_readers=[mock_metric_reader_0, mock_metric_reader_1]
)
self.assertTrue(meter_provider.shutdown())
mock_metric_reader_0.shutdown.assert_called_once()
mock_metric_reader_1.shutdown.assert_called_once()
def test_shutdown_subsequent_calls(self):
meter_provider = MeterProvider()
with self.assertRaises(AssertionError):
with self.assertLogs(level=WARNING):
meter_provider.shutdown()
with self.assertLogs(level=WARNING):
meter_provider.shutdown()
@patch("opentelemetry.sdk._metrics._logger")
def test_shutdown_race(self, mock_logger):
mock_logger.warning = MockFunc()
meter_provider = MeterProvider()
num_threads = 70
self.run_with_many_threads(
meter_provider.shutdown, num_threads=num_threads
)
self.assertEqual(mock_logger.warning.call_count, num_threads - 1)
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_measurement_collect_callback(
self, mock_sync_measurement_consumer
):
metric_readers = [DummyMetricReader()] * 5
sync_consumer_instance = mock_sync_measurement_consumer()
sync_consumer_instance.collect = MockFunc()
MeterProvider(metric_readers=metric_readers)
for reader in metric_readers:
reader.collect()
self.assertEqual(
sync_consumer_instance.collect.call_count, len(metric_readers)
)
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_creates_sync_measurement_consumer(
self, mock_sync_measurement_consumer
):
MeterProvider()
mock_sync_measurement_consumer.assert_called()
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_register_asynchronous_instrument(
self, mock_sync_measurement_consumer
):
meter_provider = MeterProvider()
meter_provider._measurement_consumer.register_asynchronous_instrument.assert_called_with(
meter_provider.get_meter("name").create_observable_counter(
"name", Mock()
)
)
meter_provider._measurement_consumer.register_asynchronous_instrument.assert_called_with(
meter_provider.get_meter("name").create_observable_up_down_counter(
"name", Mock()
)
)
meter_provider._measurement_consumer.register_asynchronous_instrument.assert_called_with(
meter_provider.get_meter("name").create_observable_gauge(
"name", Mock()
)
)
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_consume_measurement_counter(self, mock_sync_measurement_consumer):
sync_consumer_instance = mock_sync_measurement_consumer()
meter_provider = MeterProvider()
counter = meter_provider.get_meter("name").create_counter("name")
counter.add(1)
sync_consumer_instance.consume_measurement.assert_called()
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_consume_measurement_up_down_counter(
self, mock_sync_measurement_consumer
):
sync_consumer_instance = mock_sync_measurement_consumer()
meter_provider = MeterProvider()
counter = meter_provider.get_meter("name").create_up_down_counter(
"name"
)
counter.add(1)
sync_consumer_instance.consume_measurement.assert_called()
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_consume_measurement_histogram(
self, mock_sync_measurement_consumer
):
sync_consumer_instance = mock_sync_measurement_consumer()
meter_provider = MeterProvider()
counter = meter_provider.get_meter("name").create_histogram("name")
counter.record(1)
sync_consumer_instance.consume_measurement.assert_called()
class TestMeter(TestCase):
def setUp(self):
self.meter = Meter(Mock(), Mock())
def test_create_counter(self):
counter = self.meter.create_counter(
"name", unit="unit", description="description"
)
self.assertIsInstance(counter, Counter)
self.assertEqual(counter.name, "name")
def test_create_up_down_counter(self):
up_down_counter = self.meter.create_up_down_counter(
"name", unit="unit", description="description"
)
self.assertIsInstance(up_down_counter, UpDownCounter)
self.assertEqual(up_down_counter.name, "name")
def test_create_observable_counter(self):
observable_counter = self.meter.create_observable_counter(
"name", Mock(), unit="unit", description="description"
)
self.assertIsInstance(observable_counter, ObservableCounter)
self.assertEqual(observable_counter.name, "name")
def test_create_histogram(self):
histogram = self.meter.create_histogram(
"name", unit="unit", description="description"
)
self.assertIsInstance(histogram, Histogram)
self.assertEqual(histogram.name, "name")
def test_create_observable_gauge(self):
observable_gauge = self.meter.create_observable_gauge(
"name", Mock(), unit="unit", description="description"
)
self.assertIsInstance(observable_gauge, ObservableGauge)
self.assertEqual(observable_gauge.name, "name")
def test_create_observable_up_down_counter(self):
observable_up_down_counter = (
self.meter.create_observable_up_down_counter(
"name", Mock(), unit="unit", description="description"
)
)
self.assertIsInstance(
observable_up_down_counter, ObservableUpDownCounter
)
self.assertEqual(observable_up_down_counter.name, "name")
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.