signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def notify_txn_invalid ( self , txn_id , message = None , extended_data = None ) :
"""Adds a batch id to the invalid cache along with the id of the
transaction that was rejected and any error message or extended data .
Removes that batch id from the pending set . The cache is only
temporary , and the batch info will be purged after one hour .
Args :
txn _ id ( str ) : The id of the invalid batch
message ( str , optional ) : Message explaining why batch is invalid
extended _ data ( bytes , optional ) : Additional error data""" | invalid_txn_info = { 'id' : txn_id }
if message is not None :
invalid_txn_info [ 'message' ] = message
if extended_data is not None :
invalid_txn_info [ 'extended_data' ] = extended_data
with self . _lock :
for batch_id , txn_ids in self . _batch_info . items ( ) :
if txn_id in txn_ids :
if batch_id not in self . _invalid :
self . _invalid [ batch_id ] = [ invalid_txn_info ]
else :
self . _invalid [ batch_id ] . append ( invalid_txn_info )
self . _pending . discard ( batch_id )
self . _update_observers ( batch_id , ClientBatchStatus . INVALID )
return |
def import_medusa_data ( mat_filename , config_file ) :
"""Import measurement data ( a . mat file ) of the FZJ EIT160 system . This
data format is identified as ' FZJ - EZ - 2017 ' .
Parameters
mat _ filename : string
filename to the . mat data file . Note that only MNU0 single - potentials
are supported !
config _ file : string
filename for configuration file . The configuration file contains N rows
with 4 columns each ( a , b , m , n )
Returns""" | df_emd , df_md = _read_mat_mnu0 ( mat_filename )
# ' configs ' can be a numpy array or a filename
if not isinstance ( config_file , np . ndarray ) :
configs = np . loadtxt ( config_file ) . astype ( int )
else :
configs = config_file
# construct four - point measurements via superposition
print ( 'constructing four-point measurements' )
quadpole_list = [ ]
if df_emd is not None :
index = 0
for Ar , Br , M , N in configs : # print ( ' constructing ' , Ar , Br , M , N )
# the order of A and B doesn ' t concern us
A = np . min ( ( Ar , Br ) )
B = np . max ( ( Ar , Br ) )
# first choice : correct ordering
query_M = df_emd . query ( 'a=={0} and b=={1} and p=={2}' . format ( A , B , M ) )
query_N = df_emd . query ( 'a=={0} and b=={1} and p=={2}' . format ( A , B , N ) )
if query_M . size == 0 or query_N . size == 0 :
continue
index += 1
# keep these columns as they are ( no subtracting )
keep_cols = [ 'datetime' , 'frequency' , 'a' , 'b' , 'Zg1' , 'Zg2' , 'Zg3' , 'Is' , 'Il' , 'Zg' , 'Iab' , ]
df4 = pd . DataFrame ( )
diff_cols = [ 'Zt' , ]
df4 [ keep_cols ] = query_M [ keep_cols ]
for col in diff_cols :
df4 [ col ] = query_M [ col ] . values - query_N [ col ] . values
df4 [ 'm' ] = query_M [ 'p' ] . values
df4 [ 'n' ] = query_N [ 'p' ] . values
quadpole_list . append ( df4 )
if quadpole_list :
dfn = pd . concat ( quadpole_list )
Rsign = np . sign ( dfn [ 'Zt' ] . real )
dfn [ 'r' ] = Rsign * np . abs ( dfn [ 'Zt' ] )
dfn [ 'Vmn' ] = dfn [ 'r' ] * dfn [ 'Iab' ]
dfn [ 'rpha' ] = np . arctan2 ( np . imag ( dfn [ 'Zt' ] . values ) , np . real ( dfn [ 'Zt' ] . values ) ) * 1e3
else :
dfn = pd . DataFrame ( )
return dfn , df_md |
def remote_run ( cmd , instance_name , detach = False , retries = 1 ) :
"""Run command on GCS instance , optionally detached .""" | if detach :
cmd = SCREEN . format ( command = cmd )
args = SSH . format ( instance_name = instance_name ) . split ( )
args . append ( cmd )
for i in range ( retries + 1 ) :
try :
if i > 0 :
tf . logging . info ( "Retry %d for %s" , i , args )
return sp . check_call ( args )
except sp . CalledProcessError as e :
if i == retries :
raise e |
def is_openmp_supported ( ) :
"""Determine whether the build compiler has OpenMP support .""" | log_threshold = log . set_threshold ( log . FATAL )
ret = check_openmp_support ( )
log . set_threshold ( log_threshold )
return ret |
def _scaleTo8bit ( self , img ) :
'''The pattern comparator need images to be 8 bit
- > find the range of the signal and scale the image''' | r = scaleSignalCutParams ( img , 0.02 )
# , nSigma = 3)
self . signal_ranges . append ( r )
return toUIntArray ( img , dtype = np . uint8 , range = r ) |
def value_loss ( value_net_apply , value_net_params , observations , rewards , reward_mask , gamma = 0.99 ) :
"""Computes the value loss .
Args :
value _ net _ apply : value net apply function with signature ( params , ndarray of
shape ( B , T + 1 ) + OBS ) - > ndarray ( B , T + 1 , 1)
value _ net _ params : params of value _ net _ apply .
observations : np . ndarray of shape ( B , T + 1 ) + OBS
rewards : np . ndarray of shape ( B , T ) of rewards .
reward _ mask : np . ndarray of shape ( B , T ) , the mask over rewards .
gamma : float , discount factor .
Returns :
The average L2 value loss , averaged over instances where reward _ mask is 1.""" | B , T = rewards . shape
# pylint : disable = invalid - name
assert ( B , T + 1 ) == observations . shape [ : 2 ]
# NOTE : observations is ( B , T + 1 ) + OBS , value _ prediction is ( B , T + 1 , 1)
value_prediction = value_net_apply ( observations , value_net_params )
assert ( B , T + 1 , 1 ) == value_prediction . shape
return value_loss_given_predictions ( value_prediction , rewards , reward_mask , gamma ) |
def venv_metadata_extension ( extraction_fce ) :
"""Extracts specific metadata from virtualenv object , merges them with data
from given extraction method .""" | def inner ( self ) :
data = extraction_fce ( self )
if virtualenv is None or not self . venv :
logger . debug ( "Skipping virtualenv metadata extraction." )
return data
temp_dir = tempfile . mkdtemp ( )
try :
extractor = virtualenv . VirtualEnv ( self . name , temp_dir , self . name_convertor , self . base_python_version )
data . set_from ( extractor . get_venv_data , update = True )
except exc . VirtualenvFailException as e :
logger . error ( "{}, skipping virtualenv metadata extraction." . format ( e ) )
finally :
shutil . rmtree ( temp_dir )
return data
return inner |
def read ( self , vals ) :
"""Read values .
Args :
vals ( list ) : list of strings representing values""" | i = 0
if len ( vals [ i ] ) == 0 :
self . typical_or_extreme_period_name = None
else :
self . typical_or_extreme_period_name = vals [ i ]
i += 1
if len ( vals [ i ] ) == 0 :
self . typical_or_extreme_period_type = None
else :
self . typical_or_extreme_period_type = vals [ i ]
i += 1
if len ( vals [ i ] ) == 0 :
self . period_start_day = None
else :
self . period_start_day = vals [ i ]
i += 1
if len ( vals [ i ] ) == 0 :
self . period_end_day = None
else :
self . period_end_day = vals [ i ]
i += 1 |
def p_if_statement_1 ( self , p ) :
"""if _ statement : IF LPAREN expr RPAREN statement""" | p [ 0 ] = self . asttypes . If ( predicate = p [ 3 ] , consequent = p [ 5 ] )
p [ 0 ] . setpos ( p ) |
def _histogram_summary ( self , tf_name , value , step = None ) :
"""Args :
tf _ name ( str ) : name of tensorflow variable
value ( tuple or list ) : either a tuple of bin _ edges and bincounts or
a list of values to summarize in a histogram .
References :
https : / / github . com / yunjey / pytorch - tutorial / blob / master / tutorials / 04 - utils / tensorboard / logger . py # L45
Example :
> > > tf _ name = ' foo '
> > > value = ( [ 0 , 1 , 2 , 3 , 4 , 5 ] , [ 1 , 20 , 10 , 22 , 11 ] )
> > > self = Logger ( None , is _ dummy = True )
> > > summary = self . _ histogram _ summary ( tf _ name , value , step = None )
> > > assert summary . value [ 0 ] . histo . max = = 5
Example :
> > > tf _ name = ' foo '
> > > value = [ 0.72 , 0.18 , 0.34 , 0.66 , 0.11 , 0.70 , 0.23]
> > > self = Logger ( None , is _ dummy = True )
> > > summary = self . _ histogram _ summary ( tf _ name , value , step = None )
> > > assert summary . value [ 0 ] . histo . num = = 7.0""" | if isinstance ( value , tuple ) :
bin_edges , bincounts = value
assert len ( bin_edges ) == len ( bincounts ) + 1 , ( 'must have one more edge than count' )
hist = summary_pb2 . HistogramProto ( )
hist . min = float ( min ( bin_edges ) )
hist . max = float ( max ( bin_edges ) )
else :
values = np . array ( value )
bincounts , bin_edges = np . histogram ( values )
hist = summary_pb2 . HistogramProto ( )
hist . min = float ( np . min ( values ) )
hist . max = float ( np . max ( values ) )
hist . num = int ( np . prod ( values . shape ) )
hist . sum = float ( np . sum ( values ) )
hist . sum_squares = float ( np . sum ( values ** 2 ) )
# Add bin edges and counts
for edge in bin_edges [ 1 : ] :
hist . bucket_limit . append ( edge )
for v in bincounts :
hist . bucket . append ( v )
summary = summary_pb2 . Summary ( )
summary . value . add ( tag = tf_name , histo = hist )
return summary |
def round_sig ( x , sig ) :
"""Round the number to the specified number of significant figures""" | return round ( x , sig - int ( floor ( log10 ( abs ( x ) ) ) ) - 1 ) |
def add_boot_info_table ( self , boot_info_table ) : # type : ( eltorito . EltoritoBootInfoTable ) - > None
'''A method to add a boot info table to this Inode .
Parameters :
boot _ info _ table - The Boot Info Table object to add to this Inode .
Returns :
Nothing .''' | if not self . _initialized :
raise pycdlibexception . PyCdlibInternalError ( 'Inode is not yet initialized' )
self . boot_info_table = boot_info_table |
def verbosity_level ( self , value ) :
"""Setter for * * self . _ _ verbosity _ level * * attribute .
: param value : Attribute value .
: type value : int""" | if value is not None :
assert type ( value ) is int , "'{0}' attribute: '{1}' type is not 'int'!" . format ( "verbosity_level" , value )
assert value >= 0 and value <= 4 , "'{0}' attribute: Value need to be exactly beetween 0 and 4!" . format ( "verbosity_level" )
self . __verbosity_level = value |
def disaggregate_temperature ( self , method = 'sine_min_max' , min_max_time = 'fix' , mod_nighttime = False ) :
"""Disaggregate air temperature .
Parameters
method : str , optional
Disaggregation method .
` ` sine _ min _ max ` `
Hourly temperatures follow a sine function preserving daily minimum
and maximum values . ( Default )
` ` sine _ mean ` `
Hourly temperatures follow a sine function preserving the daily mean
value and the diurnal temperature range .
` ` sine ` `
Same as ` ` sine _ min _ max ` ` .
` ` mean _ course _ min _ max ` `
Hourly temperatures follow an observed average course ( calculated for each month ) ,
preserving daily minimum and maximum values .
` ` mean _ course _ mean ` `
Hourly temperatures follow an observed average course ( calculated for each month ) ,
preserving the daily mean value and the diurnal temperature range .
min _ max _ time : str , optional
Method to determine the time of minimum and maximum temperature .
` ` fix ` ` :
Minimum / maximum temperature are assumed to occur at 07:00/14:00 local time .
` ` sun _ loc ` ` :
Minimum / maximum temperature are assumed to occur at sunrise / solar noon + 2 h .
` ` sun _ loc _ shift ` ` :
Minimum / maximum temperature are assumed to occur at sunrise / solar noon + monthly mean shift .
mod _ nighttime : bool , optional
Use linear interpolation between minimum and maximum temperature .""" | self . data_disagg . temp = melodist . disaggregate_temperature ( self . data_daily , method = method , min_max_time = min_max_time , max_delta = self . statistics . temp . max_delta , mean_course = self . statistics . temp . mean_course , sun_times = self . sun_times , mod_nighttime = mod_nighttime ) |
def send_and_wait ( self , path , message , timeout = 0 , responder = None ) :
"""Send a message and block until a response is received . Return response message""" | message . on ( "response" , lambda x , event_origin , source : None , once = True )
if timeout > 0 :
ts = time . time ( )
else :
ts = 0
sent = False
while not message . response_received :
if not sent :
self . send ( path , message )
sent = True
if ts :
if time . time ( ) - ts > timeout :
raise exceptions . TimeoutError ( "send_and_wait(%s)" % path , timeout )
return message . response_message |
def p_program_line_label ( p ) :
"""label _ line : LABEL statements
| LABEL co _ statements""" | lbl = make_label ( p [ 1 ] , p . lineno ( 1 ) )
p [ 0 ] = make_block ( lbl , p [ 2 ] ) if len ( p ) == 3 else lbl |
def check_rst ( code , ignore ) :
"""Yield errors in nested RST code .""" | filename = '<string>'
for result in check ( code , filename = filename , ignore = ignore ) :
yield result |
def iter_configurations ( kafka_topology_base_path = None ) :
"""Cluster topology iterator .
Iterate over all the topologies available in config .""" | if not kafka_topology_base_path :
config_dirs = get_conf_dirs ( )
else :
config_dirs = [ kafka_topology_base_path ]
types = set ( )
for config_dir in config_dirs :
new_types = [ x for x in map ( lambda x : os . path . basename ( x ) [ : - 5 ] , glob . glob ( '{0}/*.yaml' . format ( config_dir ) ) , ) if x not in types ]
for cluster_type in new_types :
try :
topology = TopologyConfiguration ( cluster_type , config_dir , )
except ConfigurationError :
continue
types . add ( cluster_type )
yield topology |
def sorted_files_from_bucket ( bucket , keys = None ) :
"""Return files from bucket sorted by given keys .
: param bucket : : class : ` ~ invenio _ files _ rest . models . Bucket ` containing the
files .
: param keys : Keys order to be used .
: returns : Sorted list of bucket items .""" | keys = keys or [ ]
total = len ( keys )
sortby = dict ( zip ( keys , range ( total ) ) )
values = ObjectVersion . get_by_bucket ( bucket ) . all ( )
return sorted ( values , key = lambda x : sortby . get ( x . key , total ) ) |
def add_answer_for_student ( student_item , vote , rationale ) :
"""Add an answer for a student to the backend
Args :
student _ item ( dict ) : The location of the problem this submission is
associated with , as defined by a course , student , and item .
vote ( int ) : the option that student voted for
rationale ( str ) : the reason why the student vote for the option""" | answers = get_answers_for_student ( student_item )
answers . add_answer ( vote , rationale )
sub_api . create_submission ( student_item , { ANSWER_LIST_KEY : answers . get_answers_as_list ( ) } ) |
def pop ( self ) :
"""Removes the last traversal path node from this traversal path .""" | node = self . nodes . pop ( )
self . __keys . remove ( node . key ) |
async def get_password_tty ( device , options ) :
"""Get the password to unlock a device from terminal .""" | # TODO : make this a TRUE async
text = _ ( 'Enter password for {0.device_presentation}: ' , device )
try :
return getpass . getpass ( text )
except EOFError :
print ( "" )
return None |
def _read_stdin ( ) :
"""Generator for reading from standard input in nonblocking mode .
Other ways of reading from ` ` stdin ` ` in python waits , until the buffer is
big enough , or until EOF character is sent .
This functions yields immediately after each line .""" | line = sys . stdin . readline ( )
while line :
yield line
line = sys . stdin . readline ( ) |
def _get_oplog_timestamp ( self , newest_entry ) :
"""Return the timestamp of the latest or earliest entry in the oplog .""" | sort_order = pymongo . DESCENDING if newest_entry else pymongo . ASCENDING
curr = ( self . oplog . find ( { "op" : { "$ne" : "n" } } ) . sort ( "$natural" , sort_order ) . limit ( - 1 ) )
try :
ts = next ( curr ) [ "ts" ]
except StopIteration :
LOG . debug ( "OplogThread: oplog is empty." )
return None
LOG . debug ( "OplogThread: %s oplog entry has timestamp %s." % ( "Newest" if newest_entry else "Oldest" , ts ) )
return ts |
def decode_field ( self , field , value ) :
"""Decode a JSON value to a python value .
Args :
field : A ProtoRPC field instance .
value : A serialized JSON value .
Returns :
A Python value compatible with field .""" | # Override BytesField handling . Client libraries typically use a url - safe
# encoding . b64decode doesn ' t handle these gracefully . urlsafe _ b64decode
# handles both cases safely . Also add padding if the padding is incorrect .
if isinstance ( field , messages . BytesField ) :
try : # Need to call str ( value ) because ProtoRPC likes to pass values
# as unicode , and urlsafe _ b64decode can only handle bytes .
padded_value = self . __pad_value ( str ( value ) , 4 , '=' )
return base64 . urlsafe_b64decode ( padded_value )
except ( TypeError , UnicodeEncodeError ) , err :
raise messages . DecodeError ( 'Base64 decoding error: %s' % err )
return super ( EndpointsProtoJson , self ) . decode_field ( field , value ) |
def edge_length_sum ( self , terminal = True , internal = True ) :
'''Compute the sum of all selected edge lengths in this ` ` Tree ` `
Args :
` ` terminal ` ` ( ` ` bool ` ` ) : ` ` True ` ` to include terminal branches , otherwise ` ` False ` `
` ` internal ` ` ( ` ` bool ` ` ) : ` ` True ` ` to include internal branches , otherwise ` ` False ` `
Returns :
` ` float ` ` : Sum of all selected edge lengths in this ` ` Tree ` `''' | if not isinstance ( terminal , bool ) :
raise TypeError ( "leaves must be a bool" )
if not isinstance ( internal , bool ) :
raise TypeError ( "internal must be a bool" )
return sum ( node . edge_length for node in self . traverse_preorder ( ) if node . edge_length is not None and ( ( terminal and node . is_leaf ( ) ) or ( internal and not node . is_leaf ( ) ) ) ) |
def setup_psd_workflow ( workflow , science_segs , datafind_outs , output_dir = None , tags = None ) :
'''Setup static psd section of CBC workflow . At present this only supports pregenerated
psd files , in the future these could be created within the workflow .
Parameters
workflow : pycbc . workflow . core . Workflow
An instanced class that manages the constructed workflow .
science _ segs : Keyed dictionary of glue . segmentlist objects
scienceSegs [ ifo ] holds the science segments to be analysed for each
ifo .
datafind _ outs : pycbc . workflow . core . FileList
The file list containing the datafind files .
output _ dir : path string
The directory where data products will be placed .
tags : list of strings
If given these tags are used to uniquely name and identify output files
that would be produced in multiple calls to this function .
Returns
psd _ files : pycbc . workflow . core . FileList
The FileList holding the psd files , 0 or 1 per ifo''' | if tags is None :
tags = [ ]
logging . info ( "Entering static psd module." )
make_analysis_dir ( output_dir )
cp = workflow . cp
# Parse for options in ini file .
try :
psdMethod = cp . get_opt_tags ( "workflow-psd" , "psd-method" , tags )
except : # Predefined PSD sare optional , just return an empty list if not
# provided .
return FileList ( [ ] )
if psdMethod == "PREGENERATED_FILE" :
logging . info ( "Setting psd from pre-generated file(s)." )
psd_files = setup_psd_pregenerated ( workflow , tags = tags )
else :
errMsg = "PSD method not recognized. Only "
errMsg += "PREGENERATED_FILE is currently supported."
raise ValueError ( errMsg )
logging . info ( "Leaving psd module." )
return psd_files |
def save ( self , * objs , condition = None , atomic = False ) :
"""Save one or more objects .
: param objs : objects to save .
: param condition : only perform each save if this condition holds .
: param bool atomic : only perform each save if the local and DynamoDB versions of the object match .
: raises bloop . exceptions . ConstraintViolation : if the condition ( or atomic ) is not met .""" | objs = set ( objs )
validate_not_abstract ( * objs )
for obj in objs :
self . session . save_item ( { "TableName" : self . _compute_table_name ( obj . __class__ ) , "Key" : dump_key ( self , obj ) , ** render ( self , obj = obj , atomic = atomic , condition = condition , update = True ) } )
object_saved . send ( self , engine = self , obj = obj )
logger . info ( "successfully saved {} objects" . format ( len ( objs ) ) ) |
def discard_local_changes ( cwd , path = '.' , user = None , password = None , ignore_retcode = False , output_encoding = None ) :
'''. . versionadded : : 2019.2.0
Runs a ` ` git checkout - - < path > ` ` from the directory specified by ` ` cwd ` ` .
cwd
The path to the git checkout
path
path relative to cwd ( defaults to ` ` . ` ` )
user
User under which to run the git command . By default , the command is run
by the user under which the minion is running .
password
Windows only . Required when specifying ` ` user ` ` . This parameter will be
ignored on non - Windows platforms .
ignore _ retcode : False
If ` ` True ` ` , do not log an error to the minion log if the git command
returns a nonzero exit status .
output _ encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run . This should not be needed in most
cases .
. . note : :
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF - 8 to handle
Unicode characters .
CLI Example :
. . code - block : : bash
salt myminion git . discard _ local _ changes / path / to / repo
salt myminion git . discard _ local _ changes / path / to / repo path = foo''' | cwd = _expand_path ( cwd , user )
command = [ 'git' , 'checkout' , '--' , path ]
# Checkout message goes to stderr
return _git_run ( command , cwd = cwd , user = user , password = password , ignore_retcode = ignore_retcode , redirect_stderr = True , output_encoding = output_encoding ) [ 'stdout' ] |
def gen_row_lines ( self , row , style , inner_widths , height ) :
r"""Combine cells in row and group them into lines with vertical borders .
Caller is expected to pass yielded lines to ' ' . join ( ) to combine them into a printable line . Caller must append
newline character to the end of joined line .
In :
[ ' Row One Column One ' , ' Two ' , ' Three ' ]
Out :
( ' | ' , ' Row One Column One ' , ' | ' , ' Two ' , ' | ' , ' Three ' , ' | ' ) ,
In :
[ ' Row One \ nColumn One ' , ' Two ' , ' Three ' ] ,
Out :
( ' | ' , ' Row One ' , ' | ' , ' Two ' , ' | ' , ' Three ' , ' | ' ) ,
( ' | ' , ' Column One ' , ' | ' , ' ' , ' | ' , ' ' , ' | ' ) ,
: param iter row : One row in the table . List of cells .
: param str style : Type of border characters to use .
: param iter inner _ widths : List of widths ( no padding ) for each column .
: param int height : Inner height ( no padding ) ( number of lines ) to expand row to .
: return : Yields lines split into components in a list . Caller must ' ' . join ( ) line .""" | cells_in_row = list ( )
# Resize row if it doesn ' t have enough cells .
if len ( row ) != len ( inner_widths ) :
row = row + [ '' ] * ( len ( inner_widths ) - len ( row ) )
# Pad and align each cell . Split each cell into lines to support multi - line cells .
for i , cell in enumerate ( row ) :
align = ( self . justify_columns . get ( i ) , )
inner_dimensions = ( inner_widths [ i ] , height )
padding = ( self . padding_left , self . padding_right , 0 , 0 )
cells_in_row . append ( align_and_pad_cell ( cell , align , inner_dimensions , padding ) )
# Determine border characters .
if style == 'heading' :
left = self . CHAR_H_OUTER_LEFT_VERTICAL if self . outer_border else ''
center = self . CHAR_H_INNER_VERTICAL if self . inner_column_border else ''
right = self . CHAR_H_OUTER_RIGHT_VERTICAL if self . outer_border else ''
elif style == 'footing' :
left = self . CHAR_F_OUTER_LEFT_VERTICAL if self . outer_border else ''
center = self . CHAR_F_INNER_VERTICAL if self . inner_column_border else ''
right = self . CHAR_F_OUTER_RIGHT_VERTICAL if self . outer_border else ''
else :
left = self . CHAR_OUTER_LEFT_VERTICAL if self . outer_border else ''
center = self . CHAR_INNER_VERTICAL if self . inner_column_border else ''
right = self . CHAR_OUTER_RIGHT_VERTICAL if self . outer_border else ''
# Yield each line .
for line in build_row ( cells_in_row , left , center , right ) :
yield line |
def Field ( self , field , Value = None ) :
'''Add field to bitmap''' | if Value == None :
try :
return self . __Bitmap [ field ]
except KeyError :
return None
elif Value == 1 or Value == 0 :
self . __Bitmap [ field ] = Value
else :
raise ValueError |
def coarsen ( self , windows , func , boundary = 'exact' , side = 'left' ) :
"""Apply""" | windows = { k : v for k , v in windows . items ( ) if k in self . dims }
if not windows :
return self . copy ( )
reshaped , axes = self . _coarsen_reshape ( windows , boundary , side )
if isinstance ( func , str ) :
name = func
func = getattr ( duck_array_ops , name , None )
if func is None :
raise NameError ( '{} is not a valid method.' . format ( name ) )
return type ( self ) ( self . dims , func ( reshaped , axis = axes ) , self . _attrs ) |
def create_target_group ( name , protocol , port , vpc_id , region = None , key = None , keyid = None , profile = None , health_check_protocol = 'HTTP' , health_check_port = 'traffic-port' , health_check_path = '/' , health_check_interval_seconds = 30 , health_check_timeout_seconds = 5 , healthy_threshold_count = 5 , unhealthy_threshold_count = 2 , ** kwargs ) :
'''. . versionadded : : 2017.11.0
Create target group if not present .
name
( string ) - The name of the target group .
protocol
( string ) - The protocol to use for routing traffic to the targets
port
( int ) - The port on which the targets receive traffic . This port is used unless
you specify a port override when registering the traffic .
vpc _ id
( string ) - The identifier of the virtual private cloud ( VPC ) .
health _ check _ protocol
( string ) - The protocol the load balancer uses when performing health check on
targets . The default is the HTTP protocol .
health _ check _ port
( string ) - The port the load balancer uses when performing health checks on
targets . The default is ' traffic - port ' , which indicates the port on which each
target receives traffic from the load balancer .
health _ check _ path
( string ) - The ping path that is the destination on the targets for health
checks . The default is / .
health _ check _ interval _ seconds
( integer ) - The approximate amount of time , in seconds , between health checks
of an individual target . The default is 30 seconds .
health _ check _ timeout _ seconds
( integer ) - The amount of time , in seconds , during which no response from a
target means a failed health check . The default is 5 seconds .
healthy _ threshold _ count
( integer ) - The number of consecutive health checks successes required before
considering an unhealthy target healthy . The default is 5.
unhealthy _ threshold _ count
( integer ) - The number of consecutive health check failures required before
considering a target unhealthy . The default is 2.
returns
( bool ) - True on success , False on failure .
CLI example :
. . code - block : : yaml
create - target :
boto _ elb2 . create _ targets _ group :
- name : myALB
- protocol : https
- port : 443
- vpc _ id : myVPC''' | ret = { 'name' : name , 'result' : None , 'comment' : '' , 'changes' : { } }
if __salt__ [ 'boto_elbv2.target_group_exists' ] ( name , region , key , keyid , profile ) :
ret [ 'result' ] = True
ret [ 'comment' ] = 'Target Group {0} already exists' . format ( name )
return ret
if __opts__ [ 'test' ] :
ret [ 'comment' ] = 'Target Group {0} will be created' . format ( name )
return ret
state = __salt__ [ 'boto_elbv2.create_target_group' ] ( name , protocol , port , vpc_id , region = region , key = key , keyid = keyid , profile = profile , health_check_protocol = health_check_protocol , health_check_port = health_check_port , health_check_path = health_check_path , health_check_interval_seconds = health_check_interval_seconds , health_check_timeout_seconds = health_check_timeout_seconds , healthy_threshold_count = healthy_threshold_count , unhealthy_threshold_count = unhealthy_threshold_count , ** kwargs )
if state :
ret [ 'changes' ] [ 'target_group' ] = name
ret [ 'result' ] = True
ret [ 'comment' ] = 'Target Group {0} created' . format ( name )
else :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Target Group {0} creation failed' . format ( name )
return ret |
def execute_django ( self , soql , args = ( ) ) :
"""Fixed execute for queries coming from Django query compilers""" | response = None
sqltype = soql . split ( None , 1 ) [ 0 ] . upper ( )
if isinstance ( self . query , subqueries . InsertQuery ) :
response = self . execute_insert ( self . query )
elif isinstance ( self . query , subqueries . UpdateQuery ) :
response = self . execute_update ( self . query )
elif isinstance ( self . query , subqueries . DeleteQuery ) :
response = self . execute_delete ( self . query )
elif isinstance ( self . query , RawQuery ) :
self . execute_select ( soql , args )
elif sqltype in ( 'SAVEPOINT' , 'ROLLBACK' , 'RELEASE' ) :
log . info ( "Ignored SQL command '%s'" , sqltype )
return
elif isinstance ( self . query , Query ) :
self . execute_select ( soql , args )
else :
raise DatabaseError ( "Unsupported query: type %s: %s" % ( type ( self . query ) , self . query ) )
return response |
def fetch_datatype ( self , bucket , key , r = None , pr = None , basic_quorum = None , notfound_ok = None , timeout = None , include_context = None ) :
"""Fetches a Riak Datatype .""" | raise NotImplementedError |
def max_pairs ( shape ) :
"""[ DEPRECATED ] Compute the maximum number of record pairs possible .""" | if not isinstance ( shape , ( tuple , list ) ) :
x = get_length ( shape )
n = int ( x * ( x - 1 ) / 2 )
elif ( isinstance ( shape , ( tuple , list ) ) and len ( shape ) == 1 ) :
x = get_length ( shape [ 0 ] )
n = int ( x * ( x - 1 ) / 2 )
else :
n = numpy . prod ( [ get_length ( xi ) for xi in shape ] )
return n |
def clear ( ) :
"""Clear all data on the local server . Useful for debugging purposed .""" | utils . check_for_local_server ( )
click . confirm ( "Are you sure you want to do this? It will delete all of your data" , abort = True )
server = Server ( config [ "local_server" ] [ "url" ] )
for db_name in all_dbs :
del server [ db_name ] |
def set ( self , key , value , expire = 0 , noreply = None ) :
"""The memcached " set " command .
Args :
key : str , see class docs for details .
value : str , see class docs for details .
expire : optional int , number of seconds until the item is expired
from the cache , or zero for no expiry ( the default ) .
noreply : optional bool , True to not wait for the reply ( defaults to
self . default _ noreply ) .
Returns :
If no exception is raised , always returns True . If an exception is
raised , the set may or may not have occurred . If noreply is True ,
then a successful return does not guarantee a successful set .""" | if noreply is None :
noreply = self . default_noreply
return self . _store_cmd ( b'set' , { key : value } , expire , noreply ) [ key ] |
def ot_find_studies ( arg_dict , exact = True , verbose = False , oti_wrapper = None ) :
"""Uses a peyotl wrapper around an Open Tree web service to get a list of studies
including values ` value ` for a given property to be searched on ` porperty ` .
The oti _ wrapper can be None ( in which case the default wrapper from peyotl . sugar will be used .
All other arguments correspond to the arguments of the web - service call .""" | if oti_wrapper is None :
from peyotl . sugar import oti
oti_wrapper = oti
return oti_wrapper . find_studies ( arg_dict , exact = exact , verbose = verbose , wrap_response = True ) |
def pair ( self ) :
"""Returns a callable and an iterable respectively . Those can be used to
both transmit a message and / or iterate over incoming messages , that were
sent by a pair socket . Note that the iterable returns as many parts as
sent by a pair . Also , the sender function has a ` ` print ` ` like signature ,
with an infinite number of arguments . Each one being a part of the
complete message .
: rtype : ( function , generator )""" | sock = self . __sock ( zmq . PAIR )
return self . __send_function ( sock ) , self . __recv_generator ( sock ) |
def expand_expression ( self , pattern , hosts , services , hostgroups , servicegroups , running = False ) : # pylint : disable = too - many - locals
"""Expand a host or service expression into a dependency node tree
using ( host | service ) group membership , regex , or labels as item selector .
: param pattern : pattern to parse
: type pattern : str
: param hosts : hosts list , used to find a specific host
: type hosts : alignak . objects . host . Host
: param services : services list , used to find a specific service
: type services : alignak . objects . service . Service
: param running : rules are evaluated at run time and parsing . True means runtime
: type running : bool
: return : root node of parsed tree
: rtype : alignak . dependencynode . DependencyNode""" | error = None
node = DependencyNode ( )
node . operand = '&'
elts = [ e . strip ( ) for e in pattern . split ( ',' ) ]
# If host _ name is empty , use the host _ name the business rule is bound to
if not elts [ 0 ] :
elts [ 0 ] = self . bound_item . host_name
filters = [ ]
# Looks for hosts / services using appropriate filters
try :
all_items = { "hosts" : hosts , "hostgroups" : hostgroups , "servicegroups" : servicegroups }
if len ( elts ) > 1 : # We got a service expression
host_expr , service_expr = elts
filters . extend ( self . get_srv_host_filters ( host_expr ) )
filters . extend ( self . get_srv_service_filters ( service_expr ) )
items = services . find_by_filter ( filters , all_items )
else : # We got a host expression
host_expr = elts [ 0 ]
filters . extend ( self . get_host_filters ( host_expr ) )
items = hosts . find_by_filter ( filters , all_items )
except re . error as regerr :
error = "Business rule uses invalid regex %s: %s" % ( pattern , regerr )
else :
if not items :
error = "Business rule got an empty result for pattern %s" % pattern
# Checks if we got result
if error :
if running is False :
node . configuration_errors . append ( error )
else : # As business rules are re - evaluated at run time on
# each scheduling loop , if the rule becomes invalid
# because of a badly written macro modulation , it
# should be notified upper for the error to be
# displayed in the check output .
raise Exception ( error )
return node
# Creates dependency node subtree
# here we have Alignak SchedulingItem object ( Host / Service )
for item in items : # Creates a host / service node
son = DependencyNode ( )
son . operand = item . __class__ . my_type
son . sons . append ( item . uuid )
# Only store the uuid , not the full object .
# Appends it to wrapping node
node . sons . append ( son )
node . switch_zeros_of_values ( )
return node |
def register ( cls , function = None , name = None ) :
"""Registers a new math function * function * with * name * and returns an : py : class : ` Operation `
instance . A math function expects a : py : class : ` Number ` as its first argument , followed by
optional ( keyword ) arguments . When * name * is * None * , the name of the * function * is used . The
returned object can be used to set the derivative ( similar to * property * ) . Example :
. . code - block : : python
@ ops . register
def my _ op ( x ) :
return x * 2 + 1
@ my _ op . derive
def my _ op ( x ) :
return 2
num = Number ( 5 , 2)
print ( num ) # - > 5.00 ( + 2.00 , - 2.00)
num = ops . my _ op ( num )
print ( num ) # - > 11.00 ( + 4.00 , - 4.00)
Please note that there is no need to register * simple * functions as in the particular
example above as most of them are just composite operations whose derivatives are already
known .""" | def register ( function ) :
op = Operation ( function , name = name )
@ functools . wraps ( function )
def wrapper ( num , * args , ** kwargs ) :
if op . derivative is None :
raise Exception ( "cannot run operation '{}', no derivative registered" . format ( op . name ) )
# ensure we deal with a number instance
num = ensure_number ( num )
# apply to the nominal value
nominal = op . function ( num . nominal , * args , ** kwargs )
# apply to all uncertainties via
# unc _ f = derivative _ f ( x ) * unc _ x
x = abs ( op . derivative ( num . nominal , * args , ** kwargs ) )
uncertainties = { }
for name in num . uncertainties . keys ( ) :
up , down = num . get_uncertainty ( name )
uncertainties [ name ] = ( x * up , x * down )
# create and return the new number
return num . __class__ ( nominal , uncertainties )
# actual registration
cls . _instances [ op . name ] = op
setattr ( cls , op . name , staticmethod ( wrapper ) )
return op
if function is None :
return register
else :
return register ( function ) |
def _extract_assignment_text ( self , element_id ) :
"""Extract assignment text ( instructions ) .
@ param element _ id : Element id to extract assignment instructions from .
@ type element _ id : str
@ return : List of assignment text ( instructions ) .
@ rtype : [ str ]""" | dom = get_page ( self . _session , OPENCOURSE_PROGRAMMING_ASSIGNMENTS_URL , json = True , course_id = self . _course_id , element_id = element_id )
return [ element [ 'submissionLearnerSchema' ] [ 'definition' ] [ 'assignmentInstructions' ] [ 'definition' ] [ 'value' ] for element in dom [ 'elements' ] ] |
def register ( self , widget , basename , ** parameters ) :
"""Register a widget , URL basename and any optional URL parameters .
Parameters are passed as keyword arguments , i . e .
> > > router . register ( MyWidget , ' mywidget ' , my _ parameter = " [ A - Z0-9 ] + " )
This would be the equivalent of manually adding the following
to urlpatterns :
> > > url ( r " ^ widgets / mywidget / ( P < my _ parameter > [ A - Z0-9 ] + ) / ? " ,
MyWidget . as _ view ( ) , " widget _ mywidget " )""" | self . registry . append ( ( widget , basename , parameters ) ) |
def coherence_spectrogram ( self , other , stride , fftlength = None , overlap = None , window = 'hann' , nproc = 1 ) :
"""Calculate the coherence spectrogram between this ` TimeSeries `
and other .
Parameters
other : ` TimeSeries `
the second ` TimeSeries ` in this CSD calculation
stride : ` float `
number of seconds in single PSD ( column of spectrogram )
fftlength : ` float `
number of seconds in single FFT
overlap : ` float ` , optional
number of seconds of overlap between FFTs , defaults to the
recommended overlap for the given window ( if given ) , or 0
window : ` str ` , ` numpy . ndarray ` , optional
window function to apply to timeseries prior to FFT ,
see : func : ` scipy . signal . get _ window ` for details on acceptable
formats
nproc : ` int `
number of parallel processes to use when calculating
individual coherence spectra .
Returns
spectrogram : ` ~ gwpy . spectrogram . Spectrogram `
time - frequency coherence spectrogram as generated from the
input time - series .""" | from . . spectrogram . coherence import from_timeseries
return from_timeseries ( self , other , stride , fftlength = fftlength , overlap = overlap , window = window , nproc = nproc ) |
def generate_local_port ( ) :
"""https : / / github . com / thom311 / libnl / blob / libnl3_2_25 / lib / socket . c # L63.""" | global _PREVIOUS_LOCAL_PORT
if _PREVIOUS_LOCAL_PORT is None :
try :
with contextlib . closing ( socket . socket ( getattr ( socket , 'AF_NETLINK' , - 1 ) , socket . SOCK_RAW ) ) as s :
s . bind ( ( 0 , 0 ) )
_PREVIOUS_LOCAL_PORT = int ( s . getsockname ( ) [ 0 ] )
except OSError :
_PREVIOUS_LOCAL_PORT = 4294967295
# UINT32 _ MAX
return int ( _PREVIOUS_LOCAL_PORT ) |
def kernel_command_line ( self , kernel_command_line ) :
"""Sets the kernel command line for this QEMU VM .
: param kernel _ command _ line : QEMU kernel command line""" | log . info ( 'QEMU VM "{name}" [{id}] has set the QEMU kernel command line to {kernel_command_line}' . format ( name = self . _name , id = self . _id , kernel_command_line = kernel_command_line ) )
self . _kernel_command_line = kernel_command_line |
def template_chooser_clicked ( self ) :
"""Slot activated when report file tool button is clicked .
. . versionadded : 4.3.0""" | path = self . template_path . text ( )
if not path :
path = setting ( 'lastCustomTemplate' , '' , str )
if path :
directory = dirname ( path )
else :
directory = ''
# noinspection PyCallByClass , PyTypeChecker
file_name = QFileDialog . getOpenFileName ( self , tr ( 'Select report' ) , directory , tr ( 'QGIS composer templates (*.qpt *.QPT)' ) )
self . template_path . setText ( file_name ) |
def request_data ( cls , time , site_id , derived = False ) :
"""Retreive IGRA version 2 data for one station .
Parameters
site _ id : str
11 - character IGRA2 station identifier .
time : datetime
The date and time of the desired observation . If list of two times is given ,
dataframes for all dates within the two dates will be returned .
Returns
: class : ` pandas . DataFrame ` containing the data .""" | igra2 = cls ( )
# Set parameters for data query
if derived :
igra2 . ftpsite = igra2 . ftpsite + 'derived/derived-por/'
igra2 . suffix = igra2 . suffix + '-drvd.txt'
else :
igra2 . ftpsite = igra2 . ftpsite + 'data/data-por/'
igra2 . suffix = igra2 . suffix + '-data.txt'
if type ( time ) == datetime . datetime :
igra2 . begin_date = time
igra2 . end_date = time
else :
igra2 . begin_date , igra2 . end_date = time
igra2 . site_id = site_id
df , headers = igra2 . _get_data ( )
return df , headers |
async def on_raw_401 ( self , message ) :
"""No such nick / channel .""" | nickname = message . params [ 1 ]
# Remove nickname from whois requests if it involves one of ours .
if nickname in self . _pending [ 'whois' ] :
future = self . _pending [ 'whois' ] . pop ( nickname )
future . set_result ( None )
del self . _whois_info [ nickname ] |
def generate_vectored_io_stripe_metadata ( local_path , metadata ) : # type : ( blobxfer . models . upload . LocalPath , dict ) - > dict
"""Generate vectored io stripe metadata dict
: param blobxfer . models . upload . LocalPath local _ path : local path
: param dict metadata : existing metadata dict
: rtype : dict
: return : merged metadata dictionary""" | md = { _JSON_KEY_VECTORED_IO : { _JSON_KEY_VECTORED_IO_MODE : _JSON_KEY_VECTORED_IO_STRIPE , _JSON_KEY_VECTORED_IO_STRIPE : { _JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SIZE : local_path . total_size , _JSON_KEY_VECTORED_IO_STRIPE_OFFSET_START : local_path . view . fd_start , _JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SLICES : local_path . view . total_slices , _JSON_KEY_VECTORED_IO_STRIPE_SLICE_ID : local_path . view . slice_num , _JSON_KEY_VECTORED_IO_STRIPE_NEXT : local_path . view . next , } } }
return blobxfer . util . merge_dict ( metadata , md ) |
def is_subdomain ( self , other ) :
"""Is self a subdomain of other ?
The notion of subdomain includes equality .
@ rtype : bool""" | ( nr , o , nl ) = self . fullcompare ( other )
if nr == NAMERELN_SUBDOMAIN or nr == NAMERELN_EQUAL :
return True
return False |
def _transform ( self , df ) :
"""Private transform method of a Transformer . This serves as batch - prediction method for our purposes .""" | output_col = self . getOutputCol ( )
label_col = self . getLabelCol ( )
new_schema = copy . deepcopy ( df . schema )
new_schema . add ( StructField ( output_col , StringType ( ) , True ) )
rdd = df . rdd . coalesce ( 1 )
features = np . asarray ( rdd . map ( lambda x : from_vector ( x . features ) ) . collect ( ) )
# Note that we collect , since executing this on the rdd would require model serialization once again
model = model_from_yaml ( self . get_keras_model_config ( ) )
model . set_weights ( self . weights . value )
predictions = rdd . ctx . parallelize ( model . predict_classes ( features ) ) . coalesce ( 1 )
predictions = predictions . map ( lambda x : tuple ( str ( x ) ) )
results_rdd = rdd . zip ( predictions ) . map ( lambda x : x [ 0 ] + x [ 1 ] )
results_df = df . sql_ctx . createDataFrame ( results_rdd , new_schema )
results_df = results_df . withColumn ( output_col , results_df [ output_col ] . cast ( DoubleType ( ) ) )
results_df = results_df . withColumn ( label_col , results_df [ label_col ] . cast ( DoubleType ( ) ) )
return results_df |
def _parse_date_default_value ( property_name , default_value_string ) :
"""Parse and return the default value for a date property .""" | # OrientDB doesn ' t use ISO - 8601 datetime format , so we have to parse it manually
# and then turn it into a python datetime object . strptime ( ) will raise an exception
# if the provided value cannot be parsed correctly .
parsed_value = time . strptime ( default_value_string , ORIENTDB_DATE_FORMAT )
return datetime . date ( parsed_value . tm_year , parsed_value . tm_mon , parsed_value . tm_mday ) |
def samples ( self ) :
"""Access the samples
: returns : twilio . rest . autopilot . v1 . assistant . task . sample . SampleList
: rtype : twilio . rest . autopilot . v1 . assistant . task . sample . SampleList""" | if self . _samples is None :
self . _samples = SampleList ( self . _version , assistant_sid = self . _solution [ 'assistant_sid' ] , task_sid = self . _solution [ 'sid' ] , )
return self . _samples |
def earth_rates ( ATTITUDE ) :
'''return angular velocities in earth frame''' | from math import sin , cos , tan , fabs
p = ATTITUDE . rollspeed
q = ATTITUDE . pitchspeed
r = ATTITUDE . yawspeed
phi = ATTITUDE . roll
theta = ATTITUDE . pitch
psi = ATTITUDE . yaw
phiDot = p + tan ( theta ) * ( q * sin ( phi ) + r * cos ( phi ) )
thetaDot = q * cos ( phi ) - r * sin ( phi )
if fabs ( cos ( theta ) ) < 1.0e-20 :
theta += 1.0e-10
psiDot = ( q * sin ( phi ) + r * cos ( phi ) ) / cos ( theta )
return ( phiDot , thetaDot , psiDot ) |
def transform_content ( tags , content_transformer ) :
"""Transform content in all ` tags ` using result of ` content _ transformer ( tag ) `
call .
Args :
tags ( obj / list ) : HTMLElement instance , or list of HTMLElement
instances .
content _ transformer ( function ) : Function which is called as
` ` content _ transformer ( tag ) ` ` .""" | if type ( tags ) not in [ tuple , list ] :
tags = [ tags ]
for tag in tags :
new_child = dhtmlparser . HTMLElement ( content_transformer ( tag ) )
# don ' t forget to add parent if the list is double - linked
if hasattr ( tag , "parent" ) :
new_child . parent = tag
tag . childs = [ new_child ] |
def runInParallel ( * fns ) :
"""Runs multiple processes in parallel .
: type : fns : def""" | proc = [ ]
for fn in fns :
p = Process ( target = fn )
p . start ( )
proc . append ( p )
for p in proc :
p . join ( ) |
def get_files ( file_tokens , cwd = None ) :
"""Given a list of parser file tokens , return a list of input objects
for them .""" | if not file_tokens :
return [ ]
token = file_tokens . pop ( )
try :
filename = token . filename
except AttributeError :
filename = ''
if cwd :
input = Input ( token . alias , filename , cwd = cwd )
else :
input = Input ( token . alias , filename )
return [ input ] + get_files ( file_tokens ) |
def upgrade_nrml ( directory , dry_run , multipoint ) :
"""Upgrade all the NRML files contained in the given directory to the latest
NRML version . Works by walking all subdirectories .
WARNING : there is no downgrade !""" | for cwd , dirs , files in os . walk ( directory ) :
for f in files :
path = os . path . join ( cwd , f )
if f . endswith ( '.xml' ) :
ip = iterparse ( path , events = ( 'start' , ) )
next ( ip )
# read node zero
try :
fulltag = next ( ip ) [ 1 ] . tag
# tag of the first node
xmlns , tag = fulltag . split ( '}' )
except Exception : # not a NRML file
xmlns , tag = '' , ''
if xmlns [ 1 : ] == NRML05 : # already upgraded
if 'sourceModel' in tag and multipoint :
print ( 'upgrading to multiPointSources' , path )
node0 = nrml . read ( path ) [ 0 ]
sourceconverter . update_source_model ( node0 , path )
with open ( path , 'wb' ) as f :
nrml . write ( [ node0 ] , f , gml = True )
elif 'nrml/0.4' in xmlns and ( 'vulnerability' in tag or 'fragility' in tag or 'sourceModel' in tag ) :
if not dry_run :
print ( 'Upgrading' , path )
try :
upgrade_file ( path , multipoint )
except Exception as exc :
raise
print ( exc )
else :
print ( 'Not upgrading' , path ) |
def find_annotations ( data , retriever = None ) :
"""Find annotation configuration files for vcfanno , using pre - installed inputs .
Creates absolute paths for user specified inputs and finds locally
installed defaults .
Default annotations :
- gemini for variant pipelines
- somatic for variant tumor pipelines
- rnaedit for RNA - seq variant calling""" | conf_files = dd . get_vcfanno ( data )
if not isinstance ( conf_files , ( list , tuple ) ) :
conf_files = [ conf_files ]
for c in _default_conf_files ( data , retriever ) :
if c not in conf_files :
conf_files . append ( c )
conf_checkers = { "gemini" : annotate_gemini , "somatic" : _annotate_somatic }
out = [ ]
annodir = os . path . normpath ( os . path . join ( os . path . dirname ( dd . get_ref_file ( data ) ) , os . pardir , "config" , "vcfanno" ) )
if not retriever :
annodir = os . path . abspath ( annodir )
for conf_file in conf_files :
if objectstore . is_remote ( conf_file ) or ( os . path . exists ( conf_file ) and os . path . isfile ( conf_file ) ) :
conffn = conf_file
elif not retriever :
conffn = os . path . join ( annodir , conf_file + ".conf" )
else :
conffn = conf_file + ".conf"
luafn = "%s.lua" % utils . splitext_plus ( conffn ) [ 0 ]
if retriever :
conffn , luafn = [ ( x if objectstore . is_remote ( x ) else None ) for x in retriever . add_remotes ( [ conffn , luafn ] , data [ "config" ] ) ]
if not conffn :
pass
elif conf_file in conf_checkers and not conf_checkers [ conf_file ] ( data , retriever ) :
logger . warn ( "Skipping vcfanno configuration: %s. Not all input files found." % conf_file )
elif not objectstore . file_exists_or_remote ( conffn ) :
build = dd . get_genome_build ( data )
CONF_NOT_FOUND = ( "The vcfanno configuration {conffn} was not found for {build}, skipping." )
logger . warn ( CONF_NOT_FOUND . format ( ** locals ( ) ) )
else :
out . append ( conffn )
if luafn and objectstore . file_exists_or_remote ( luafn ) :
out . append ( luafn )
return out |
def get_property_example ( cls , property_ , nested = None , ** kw ) :
"""Get example for property
: param dict property _ :
: param set nested :
: return : example value""" | paths = kw . get ( 'paths' , [ ] )
name = kw . get ( 'name' , '' )
result = None
if name and paths :
paths = list ( map ( lambda path : '.' . join ( ( path , name ) ) , paths ) )
result , path = cls . _get_custom_example ( paths )
if result is not None and property_ [ 'type' ] in PRIMITIVE_TYPES :
cls . _example_validate ( path , result , property_ [ 'type' ] , property_ [ 'type_format' ] )
return result
if SchemaObjects . contains ( property_ [ 'type' ] ) :
schema = SchemaObjects . get ( property_ [ 'type' ] )
if result is not None :
if schema . is_array :
if not isinstance ( result , list ) :
result = [ result ] * cls . EXAMPLE_ARRAY_ITEMS_COUNT
else :
if isinstance ( result , list ) :
cls . logger . warning ( 'Example type mismatch in path {}' . format ( schema . ref_path ) )
else :
result = cls . get_example_by_schema ( schema , ** kw )
if ( not result ) and schema . nested_schemas :
for _schema_id in schema . nested_schemas :
_schema = SchemaObjects . get ( _schema_id )
if _schema :
if isinstance ( _schema , SchemaMapWrapper ) :
result [ _schema . name ] = cls . get_example_by_schema ( _schema , ** kw )
elif _schema . nested_schemas :
for _schema__id in _schema . nested_schemas :
_schema_ = SchemaObjects . get ( _schema__id )
if isinstance ( _schema_ , SchemaMapWrapper ) :
result [ _schema . name ] = cls . get_example_by_schema ( _schema_ , ** kw )
else :
result = cls . get_example_value_for_primitive_type ( property_ [ 'type' ] , property_ [ 'type_properties' ] , property_ [ 'type_format' ] , ** kw )
return result |
def vignettingEq ( xxx_todo_changeme , f = 100 , alpha = 0 , Xi = 0 , tau = 0 , cx = 50 , cy = 50 ) :
'''Vignetting equation using the KARL - WEISS - MODEL
see http : / / research . microsoft . com / en - us / people / stevelin / vignetting . pdf
f - focal length
alpha - coefficient in the geometric vignetting factor
Xi - tilt factor of a planar scene
tau - rotation angle of a planar scene
cx - image center , x
cy - image center , y''' | ( x , y ) = xxx_todo_changeme
dist = ( ( x - cx ) ** 2 + ( y - cy ) ** 2 ) ** 0.5
A = old_div ( 1.0 , ( 1 + ( old_div ( dist , f ) ) ** 2 ) ** 2 )
G = ( 1 - alpha * dist )
T = np . cos ( tau ) * ( 1 + ( old_div ( np . tan ( tau ) , f ) ) * ( x * np . sin ( Xi ) - y * np . cos ( Xi ) ) ) ** 3
return A * G * T |
def hs_join ( ls_hsi , hso ) :
"""[ Many - to - one ] Synchronizes ( joins ) a list of input handshake interfaces : output is ready when ALL inputs are ready
ls _ hsi - ( i ) list of input handshake tuples ( ready , valid )
hso - ( o ) an output handshake tuple ( ready , valid )""" | N = len ( ls_hsi )
ls_hsi_rdy , ls_hsi_vld = zip ( * ls_hsi )
ls_hsi_rdy , ls_hsi_vld = list ( ls_hsi_rdy ) , list ( ls_hsi_vld )
hso_rdy , hso_vld = hso
@ always_comb
def _hsjoin ( ) :
all_vld = True
for i in range ( N ) :
all_vld = all_vld and ls_hsi_vld [ i ]
hso_vld . next = all_vld
for i in range ( N ) :
ls_hsi_rdy [ i ] . next = all_vld and hso_rdy
return _hsjoin |
async def configure_reporting ( self , attribute , min_interval , max_interval , reportable_change ) :
"""Configure reporting .""" | result = await super ( ) . configure_reporting ( PowerConfigurationCluster . BATTERY_VOLTAGE_ATTR , self . FREQUENCY , self . FREQUENCY , self . MINIMUM_CHANGE )
return result |
def folderitem ( self , obj , item , index ) :
"""Service triggered each time an item is iterated in folderitems .
The use of this service prevents the extra - loops in child objects .
: obj : the instance of the class to be foldered
: item : dict containing the properties of the object to be used by
the template
: index : current index of the item""" | # We are using the existing logic from the auditview
logview = api . get_view ( "auditlog" , context = obj , request = self . request )
# get the last snapshot
snapshot = get_last_snapshot ( obj )
# get the metadata of the last snapshot
metadata = get_snapshot_metadata ( snapshot )
title = obj . Title ( )
url = obj . absolute_url ( )
auditlog_url = "{}/@@auditlog" . format ( url )
# Title
item [ "title" ] = title
# Link the title to the auditlog of the object
item [ "replace" ] [ "title" ] = get_link ( auditlog_url , value = title )
# Version
version = get_snapshot_version ( obj , snapshot )
item [ "version" ] = version
# Modification Date
m_date = metadata . get ( "modified" )
item [ "modified" ] = logview . to_localized_time ( m_date )
# Actor
actor = metadata . get ( "actor" )
item [ "actor" ] = actor
# Fullname
properties = api . get_user_properties ( actor )
item [ "fullname" ] = properties . get ( "fullname" , actor )
# Roles
roles = metadata . get ( "roles" , [ ] )
item [ "roles" ] = ", " . join ( roles )
# Remote Address
remote_address = metadata . get ( "remote_address" )
item [ "remote_address" ] = remote_address
# Action
action = metadata . get ( "action" )
item [ "action" ] = logview . translate_state ( action )
# Review State
review_state = metadata . get ( "review_state" )
item [ "review_state" ] = logview . translate_state ( review_state )
# get the previous snapshot
prev_snapshot = get_snapshot_by_version ( obj , version - 1 )
if prev_snapshot :
prev_metadata = get_snapshot_metadata ( prev_snapshot )
prev_review_state = prev_metadata . get ( "review_state" )
if prev_review_state != review_state :
item [ "replace" ] [ "review_state" ] = "{} → {}" . format ( logview . translate_state ( prev_review_state ) , logview . translate_state ( review_state ) )
# Rendered Diff
diff = compare_snapshots ( snapshot , prev_snapshot )
item [ "diff" ] = logview . render_diff ( diff )
return item |
def update_permissions ( self , grp_name , resource , permissions ) :
"""Update permissions for the group associated with the given resource .
Args :
grp _ name ( string ) : Name of group .
resource ( intern . resource . boss . Resource ) : Identifies which data
model object to operate on
permissions ( list ) : List of permissions to add to the given resource
Raises :
requests . HTTPError on failure .""" | self . project_service . set_auth ( self . _token_project )
self . project_service . update_permissions ( grp_name , resource , permissions ) |
def sem ( inlist ) :
"""Returns the estimated standard error of the mean ( sx - bar ) of the
values in the passed list . sem = stdev / sqrt ( n )
Usage : lsem ( inlist )""" | sd = stdev ( inlist )
n = len ( inlist )
return sd / math . sqrt ( n ) |
def connect ( self ) :
"""connect""" | _logger . debug ( "Start connecting to broker" )
while True :
try :
self . client . connect ( self . broker_host , self . broker_port , self . broker_keepalive )
break
except Exception :
_logger . debug ( "Connect failed. wait %s sec" % self . connect_delay )
sleep ( self . connect_delay )
self . client . loop_forever ( ) |
def add_fast_step ( self , fastsim ) :
"""Add the fastsim context to the trace .""" | for wire_name in self . trace :
self . trace [ wire_name ] . append ( fastsim . context [ wire_name ] ) |
def symmetric_difference_update ( self , that ) :
"""Update the set , keeping only elements found in either * self * or * that * ,
but not in both .""" | _set = self . _set
_list = self . _list
_set . symmetric_difference_update ( that )
_list . clear ( )
_list . update ( _set )
return self |
def split_extension ( path ) :
"""A extension splitter that checks for compound extensions such as
' file . nii . gz '
Parameters
filename : str
A filename to split into base and extension
Returns
base : str
The base part of the string , i . e . ' file ' of ' file . nii . gz '
ext : str
The extension part of the string , i . e . ' nii . gz ' of ' file . nii . gz '""" | for double_ext in double_exts :
if path . endswith ( double_ext ) :
return path [ : - len ( double_ext ) ] , double_ext
dirname = os . path . dirname ( path )
filename = os . path . basename ( path )
parts = filename . split ( '.' )
if len ( parts ) == 1 :
base = filename
ext = None
else :
ext = '.' + parts [ - 1 ]
base = '.' . join ( parts [ : - 1 ] )
return os . path . join ( dirname , base ) , ext |
def retrieve_table_records ( self , table , query_column , field_list , ids_to_retrieve ) :
"""Responsys . retrieveTableRecords call
Accepts :
InteractObject table
string query _ column
possible values : ' RIID ' | ' EMAIL _ ADDRESS ' | ' CUSTOMER _ ID ' | ' MOBILE _ NUMBER '
list field _ list
list ids _ to _ retrieve
Returns a RecordData""" | table = table . get_soap_object ( self . client )
return RecordData . from_soap_type ( self . call ( 'retrieveTableRecords' , table , query_column , field_list , ids_to_retrieve ) ) |
def from_JSON ( oauth_json , type = "service" ) :
'''At the time of writing , keys include :
client _ secret , client _ email , redirect _ uris ( list ) , client _ x509 _ cert _ url , client _ id , javascript _ origins ( list )
auth _ provider _ x509 _ cert _ url , auth _ uri , token _ uri .''' | assert ( type == "service" or type == "web" )
return NestedBunch ( json . loads ( oauth_json ) [ type ] ) |
def decode_unicode ( data , replace_boo = True ) : # dictionary which direct maps unicode values to its letters
dictionary = { '0030' : '0' , '0031' : '1' , '0032' : '2' , '0033' : '3' , '0034' : '4' , '0035' : '5' , '0036' : '6' , '0037' : '7' , '0038' : '8' , '0039' : '9' , '0024' : '$' , '0040' : '@' , '00A2' : 'cents' , '00A3' : 'pounds' , '00A5' : 'yen' , '00C7' : 'C' , '00D0' : 'D' , '00D1' : 'N' , '00DD' : 'Y' , '00E7' : 'c' , '00F1' : 'n' , '00FD' : 'y' , '00FF' : 'y' , '010E' : 'D' , '010F' : 'F' , '0110' : 'D' , '0111' : 'D' , '0130' : 'I' , '0134' : 'J' , '0135' : 'J' , '0136' : 'K' , '0137' : 'K' , '0138' : 'K' , '0160' : 'S' , '0161' : 'S' , '0191' : 'F' , '0192' : 'F' , '0193' : 'G' , '0198' : 'K' , '0199' : 'K' , '019D' : 'N' , '019E' : 'N' , '01A4' : 'P' , '01A5' : 'P' , '01AC' : 'T' , '01AF' : 'U' , '01B5' : 'Z' , '01CD' : 'A' , '01CE' : 'A' , '01CF' : 'I' , '01D0' : 'I' , '01D1' : 'O' , '01D2' : 'O' , '01DE' : 'A' , '01DF' : 'A' , '01E0' : 'A' , '01E1' : 'A' , '01F4' : 'G' , '01F5' : 'G' , '01F8' : 'N' , '01F9' : 'N' , '01FA' : 'A' , '01FB' : 'A' , '021E' : 'H' , '021F' : 'H' , '0224' : 'Z' , '2113' : 'L' , '2718' : 'X' , '0225' : 'Z' , '2134' : 'O' , '0226' : 'A' , '0227' : 'A' , '0228' : 'E' , '0229' : 'E' , '0386' : 'A' , '0388' : 'E' , '0389' : 'H' , '038A' : 'I' , '0391' : 'A' , '0392' : 'B' , '0395' : 'E' , '0396' : 'Z' , '0397' : 'H' , '0399' : 'I' , '039A' : 'K' , '039C' : 'M' , '039D' : 'N' , '039F' : 'O' , '03A1' : 'P' , '03A4' : 'T' , '03A5' : 'Y' , '03A7' : 'X' , '03AA' : 'I' , '03AB' : 'B' , '1E10' : 'D' , '1E11' : 'D' , '1E12' : 'D' , '1E13' : 'D' , '1E1E' : 'F' , '1E1F' : 'F' , '1E20' : 'G' , '1E21' : 'H' , '1E2C' : 'I' , '1E2D' : 'I' , '1E2E' : 'I' , '1E2F' : 'I' , '1E3E' : 'M' , '1E3F' : 'M' , '1E70' : 'T' , '1E71' : 'T' , '1E8E' : 'Y' , '1E8F' : 'Y' , '1EE0' : 'O' , '1EE1' : 'O' , '1EE2' : 'O' , '1EE3' : 'O' , '1EE4' : 'O' , '1EF0' : 'U' , '1EF1' : 'U' }
# dictionary in which patterns ( prefixes and suffixes ) are matched to possible letter choices
pattern_dict = { '00C' : 'AEI' , '00D' : 'OU' , '00E' : 'AEI' , '00F' : 'OU' , '010' : 'AC' , '011' : 'EG' , '012' : 'GHI' , '013' : 'L' , '014' : 'LNO' , '015' : 'RS' , '016' : 'TU' , '017' : 'UWYZ' , '018' : 'BCD' , '01D' : 'U' , '01E' : 'GKO' , '020' : 'AEIO' , '021' : 'RUST' , '022' : 'O' , '1E0' : 'ABCD' , '1E1' : 'E' , '1E3' : 'KL' , '1E4' : 'MNO' , '1E5' : 'OPR' , '1E6' : 'ST' , '1E7' : 'UV' , '1E8' : 'WX' , '1E9' : 'Z' , '1EB' : 'A' , '1EC' : 'EIO' , '1ED' : 'O' , '1EE' : 'U' , '1EF' : 'Y' , '216' : 'greeknum' , '217' : 'greeknum' , '246' : 'consecnum' , '247' : 'numfrom17' }
# dictionary which matches patterns for emoticons
hex_dict = { 'A' : '10' , 'B' : '11' , 'C' : '12' , 'D' : '13' , 'E' : '14' , 'F' : '15' , 'a' : '10' , 'b' : '11' , 'c' : '12' , 'd' : '13' , 'e' : '14' , 'f' : '15' }
happy_dict = [ '1F600' , '263A' , '1F601' , '1F602' , '1F603' , '1F604' , '1F605' , '1F606' , '1F60A' , '263A' , '1F642' , '1F607' , '1F60C' , '1F643' , '1F62C' , '1F63A' , '1F638' , '1F639' ]
sad_dict = [ '1F610' , '1F611' , '1F623' , '1F494' , '1F625' , '1F62B' , '1F613' , '1F614' , '1F615' , '2639' , '1F641' , '1F616' , '1F61E' , '1F61F' , '1F624' , '1F622' , '1F62D' , '1F629' , '1F630' , '1F620' ]
sexual_dict = [ '1F609' , '1F6C0' , '2B50' , '1F445' , '1F525' , '1F36D' , '2606' , '1F60D' , '1F460' , '1F618' , '1F617' , '1F61A' , '1F917' , '1F60F' , '1F63B' , '1F63D' , '1F483' , '1F46F' , '1F48F' , '1F444' , '1F48B' , '1F459' , '1F484' , '1F34C' , '1F4AF' , '264B' ]
hearts = [ '1F498' , '2664' , '2764' , '2661' , '2665' , '1F493' , '1F495' , '1F496' , '1F497' , '1F499' , '1F49A' , '1F49B' , '1F49C' , '1F49D' , '1F49E' , '1F49F' , '2763' ]
baseball_dict = [ '26BE' , '1F3C0' , '1F3CF' ]
count = 0
misc_code = ' *misc* '
if not replace_boo :
misc_code = ''
retval = ''
# first I am filtering out all the non - unicode characters from the data
regex = re . compile ( r'\\u[0-9ABCDEFabcdef]{1,4}' )
regex2 = re . compile ( r'\\U[0-9ABCDEFabcdef]{1,8}' )
# this is so that both types of unicode representations are filtered
lowers = list ( 'abcdef' )
uppers = [ c . upper ( ) for c in lowers ]
ndata = set ( )
data = data . encode ( 'unicode-escape' ) . decode ( 'utf-8' )
data = re . sub ( r'(?:\\x(?:[0-9]|[a-f]){2})+' , ' ' , data , flags = re . IGNORECASE )
for val in re . finditer ( regex , data ) :
to_append = val . group ( )
# converting unicode to standard representation
for c in lowers :
if c in to_append :
to_append = to_append . replace ( c , c . lower ( ) )
ndata . add ( to_append )
for val in re . finditer ( regex2 , data ) :
to_append = '\u' + val . group ( ) [ 5 : ]
for c in lowers :
if c in to_append :
to_append = to_append . replace ( c , c . lower ( ) )
ndata . add ( to_append )
ndata = list ( ndata )
"""Process of parsing :
- > Convert unicode into standard form
- > Convert each character of the unicode symbol to its numerical equivalent
- > Mapping Process :
- First check in pattern dictionary to map suffix / prefix
- Check Emoticon Dictionary
- Replace value pair with Key whenever found
- Then check direct dictionary
- Append to . txt file if unicode not found in any dictionary""" | for unicode_str in ndata :
uni = unicode_str [ 2 : ]
if unicode_str not in data :
unicode_str = '\U000' + unicode_str [ 2 : ]
# converting to standard representation
for c in uppers :
if c in unicode_str :
unicode_str = unicode_str . replace ( c , c . lower ( ) )
if uni in baseball_dict :
retval += ' *baseball* '
# detecting baseball emoticons and converting to ' * baseball * ' and similar conversions for other categories of emoticons
data = string . replace ( data , unicode_str , ' *baseball* ' )
if uni in happy_dict :
retval += ' *happy* '
if replace_boo :
data = string . replace ( data , unicode_str , ' *happy* ' )
else :
data = string . replace ( data , unicode_str , ' ' )
elif uni in sad_dict :
retval += ' *sad* '
if replace_boo :
data = string . replace ( data , unicode_str , ' *sad* ' )
else :
data = string . replace ( data , unicode_str , ' ' )
elif uni in sexual_dict :
retval += ' *sexual* '
if replace_boo :
data = string . replace ( data , unicode_str , ' *sexual* ' )
else :
data = string . replace ( data , unicode_str , ' ' )
elif uni in hearts :
retval += ' *hearts* '
if replace_boo :
data = string . replace ( data , unicode_str , ' *hearts* ' )
else :
data = string . replace ( data , unicode_str , ' ' )
elif uni in dictionary :
retval += dictionary [ uni ]
data = string . replace ( data , unicode_str , dictionary [ uni ] )
elif uni [ 0 : 3 ] == '004' or uni [ 0 : 3 ] == '005' : # replacing unicodes for digits and before that , replacing hexadecimals with their numerical value
last_dig = uni [ 3 : ]
if last_dig in hex_dict :
last_dig = int ( hex_dict [ last_dig ] )
else :
last_dig = int ( last_dig )
second_last_dig = int ( uni [ 2 : 3 ] )
num = ( second_last_dig - 4 ) * 16 + last_dig
retval += chr ( 64 + num )
data = string . replace ( data , unicode_str , chr ( 64 + num ) )
elif uni [ 0 : 3 ] == '006' or uni [ 0 : 3 ] == '007' :
last_dig = uni [ 3 : ]
if last_dig in hex_dict :
last_dig = int ( hex_dict [ last_dig ] )
else :
last_dig = int ( last_dig )
second_last_dig = int ( uni [ 2 : 3 ] )
# parsing letters
num = ( second_last_dig - 6 ) * 16 + last_dig
retval += chr ( 64 + num )
data = string . replace ( data , unicode_str , chr ( 64 + num ) )
elif uni [ 0 : 3 ] in pattern_dict :
val = pattern_dict [ uni [ 0 : 3 ] ]
if len ( val ) == 1 :
retval += val
data = string . replace ( data , unicode_str , val )
elif uni [ 0 : 3 ] == '00C' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
# parsing miscelleneous
data = string . replace ( data , unicode_str , misc_code )
if last >= 0 and last <= 5 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last <= 11 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
else :
retval += val [ 2 ]
data = string . replace ( data , unicode_str , val [ 2 ] )
elif uni [ 0 : 3 ] == '00D' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
pass
if last >= 2 and last <= 6 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last >= 9 and last <= 12 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '00E' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
if last >= 0 and last <= 5 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last <= 11 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
else :
retval += val [ 2 ]
data = string . replace ( data , unicode_str , val [ 2 ] )
elif uni [ 0 : 3 ] == '00F' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
if last >= 2 and last <= 6 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last >= 9 and last <= 12 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '010' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
if last >= 0 and last <= 5 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last >= 6 and last <= 13 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '011' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
if last >= 2 and last <= 11 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last >= 12 and last <= 15 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '012' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
if last >= 0 and last <= 3 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last <= 7 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
elif last <= 15 :
retval += val [ 2 ]
data = string . replace ( data , unicode_str , val [ 2 ] )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '014' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
if last >= 0 and last <= 2 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last <= 11 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
elif last <= 15 :
retval += val [ 2 ]
data = string . replace ( data , unicode_str , val [ 2 ] )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '015' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
if last >= 4 and last <= 9 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last >= 10 and last <= 15 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '016' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
if last >= 2 and last <= 7 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last >= 8 and last <= 15 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '017' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
if last >= 0 and last <= 3 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last <= 5 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
elif last <= 8 :
retval += val [ 2 ]
data = string . replace ( data , unicode_str , val [ 2 ] )
elif last <= 14 :
retval += val [ 3 ]
data = string . replace ( data , unicode_str , val [ 3 ] )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '018' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
if last >= 0 and last <= 5 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last <= 8 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
elif last <= 12 :
retval += val [ 2 ]
data = string . replace ( data , unicode_str , val [ 2 ] )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '01E' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
if last >= 4 and last <= 7 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last <= 9 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
elif last <= 13 :
retval += val [ 2 ]
data = string . replace ( data , unicode_str , val [ 2 ] )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '020' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
if last >= 0 and last <= 3 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last <= 7 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
elif last <= 11 :
retval += val [ 2 ]
data = string . replace ( data , unicode_str , val [ 2 ] )
elif last <= 15 :
retval += val [ 3 ]
data = string . replace ( data , unicode_str , val [ 3 ] )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '021' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
if last >= 0 and last <= 3 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last <= 7 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
elif last <= 9 :
retval += val [ 2 ]
data = string . replace ( data , unicode_str , val [ 2 ] )
elif last <= 11 :
retval += val [ 3 ]
data = string . replace ( data , unicode_str , val [ 3 ] )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '1E0' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
if last >= 0 and last <= 1 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last <= 7 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
elif last <= 9 :
retval += val [ 2 ]
data = string . replace ( data , unicode_str , val [ 2 ] )
elif last <= 15 :
retval += val [ 3 ]
data = string . replace ( data , unicode_str , val [ 3 ] )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '1E3' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
if last >= 0 and last <= 5 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last <= 13 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '1E4' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
pass
if last >= 0 and last <= 3 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last <= 11 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
elif last <= 15 :
retval += val [ 2 ]
data = string . replace ( data , unicode_str , val [ 2 ] )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '1E5' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
pass
if last >= 0 and last <= 3 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last <= 7 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
elif last <= 15 :
retval += val [ 2 ]
data = string . replace ( data , unicode_str , val [ 2 ] )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '1E6' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
if last >= 0 and last <= 9 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last >= 10 and last <= 15 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '1E7' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
if last >= 2 and last <= 11 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last >= 12 and last <= 15 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '1E8' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
if last >= 0 and last <= 9 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last >= 10 and last <= 13 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '1EC' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
if last >= 0 and last <= 7 :
retval += val [ 0 ]
data = string . replace ( data , unicode_str , val [ 0 ] )
elif last <= 11 :
retval += val [ 1 ]
data = string . replace ( data , unicode_str , val [ 1 ] )
elif last <= 15 :
retval += val [ 2 ]
data = string . replace ( data , unicode_str , val [ 2 ] )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '216' or uni [ 0 : 3 ] == '217' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
if last <= 12 :
retval += str ( last + 1 )
data = string . replace ( data , unicode_str , str ( last + 1 ) )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
elif uni [ 0 : 3 ] == '246' or uni [ 0 : 3 ] == '247' :
last = uni [ 3 : ]
if last in hex_dict :
last = hex_dict [ last ]
try :
last = int ( last )
except :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
if uni [ 0 : 3 ] == '246' :
retval += str ( last + 1 )
data = string . replace ( data , unicode_str , str ( last + 1 ) )
elif last <= 3 :
retval += str ( last + 17 )
data = string . replace ( data , unicode_str , str ( last + 17 ) )
else :
retval += misc_code
data = string . replace ( data , unicode_str , misc_code )
else :
retval += misc_code
data = data . replace ( unicode_str , misc_code )
if len ( retval ) == 0 :
retval = "Sorry, no unicode strings were present"
try :
data = data . decode ( 'unicode-escape' )
except UnicodeDecodeError :
pass
retval = retval . encode ( 'unicode-escape' ) . decode ( 'unicode-escape' )
return ( retval , data ) |
def sam_parse_reply ( line ) :
"""parse a reply line into a dict""" | parts = line . split ( ' ' )
opts = { k : v for ( k , v ) in split_kv ( parts [ 2 : ] ) }
return SAMReply ( parts [ 0 ] , opts ) |
def QA_indicator_SKDJ ( DataFrame , N = 9 , M = 3 ) :
"""1 . 指标 > 80 时 , 回档机率大 ; 指标 < 20 时 , 反弹机率大 ;
2 . K在20左右向上交叉D时 , 视为买进信号参考 ;
3 . K在80左右向下交叉D时 , 视为卖出信号参考 ;
4 . SKDJ波动于50左右的任何讯号 , 其作用不大 。""" | CLOSE = DataFrame [ 'close' ]
LOWV = LLV ( DataFrame [ 'low' ] , N )
HIGHV = HHV ( DataFrame [ 'high' ] , N )
RSV = EMA ( ( CLOSE - LOWV ) / ( HIGHV - LOWV ) * 100 , M )
K = EMA ( RSV , M )
D = MA ( K , M )
DICT = { 'RSV' : RSV , 'SKDJ_K' : K , 'SKDJ_D' : D }
return pd . DataFrame ( DICT ) |
def _scobit_utility_transform ( systematic_utilities , alt_IDs , rows_to_alts , shape_params , intercept_params , intercept_ref_pos = None , * args , ** kwargs ) :
"""Parameters
systematic _ utilities : 1D ndarray .
All elements should be ints , floats , or longs . Should contain the
systematic utilities of each observation per available alternative .
Note that this vector is formed by the dot product of the design matrix
with the vector of utility coefficients .
alt _ IDs : 1D ndarray .
All elements should be ints . There should be one row per obervation per
available alternative for the given observation . Elements denote the
alternative corresponding to the given row of the design matrix .
rows _ to _ alts : 2D scipy sparse matrix .
There should be one row per observation per available alternative and
one column per possible alternative . This matrix maps the rows of the
design matrix to the possible alternatives for this dataset . All
elements should be zeros or ones .
shape _ params : None or 1D ndarray .
If an array , each element should be an int , float , or long . There
should be one value per shape parameter of the model being used .
intercept _ params : None or 1D ndarray .
If an array , each element should be an int , float , or long . If J is the
total number of possible alternatives for the dataset being modeled ,
there should be J - 1 elements in the array .
intercept _ ref _ pos : int , or None , optional .
Specifies the index of the alternative , in the ordered array of unique
alternatives , that is not having its intercept parameter estimated ( in
order to ensure identifiability ) . Should only be None if
` intercept _ params ` is None .
Returns
transformations : 2D ndarray .
Should have shape ` ( systematic _ utilities . shape [ 0 ] , 1 ) ` . The returned
array contains the transformed utility values for this model . All
elements should be ints , floats , or longs .""" | # Figure out what indices are to be filled in
if intercept_ref_pos is not None and intercept_params is not None :
needed_idxs = range ( intercept_params . shape [ 0 ] + 1 )
needed_idxs . remove ( intercept_ref_pos )
if len ( intercept_params . shape ) > 1 and intercept_params . shape [ 1 ] > 1 : # Get an array of zeros with shape
# ( num _ possible _ alternatives , num _ parameter _ samples )
all_intercepts = np . zeros ( ( rows_to_alts . shape [ 1 ] , intercept_params . shape [ 1 ] ) )
# For alternatives having their intercept estimated , replace the
# zeros with the current value of the estimated intercepts
all_intercepts [ needed_idxs , : ] = intercept_params
else : # Get an array of zeros with shape ( num _ possible _ alternatives , )
all_intercepts = np . zeros ( rows_to_alts . shape [ 1 ] )
# For alternatives having their intercept estimated , replace the
# zeros with the current value of the estimated intercepts
all_intercepts [ needed_idxs ] = intercept_params
else : # Create a full set of intercept parameters including the intercept
# constrained to zero
all_intercepts = np . zeros ( rows_to_alts . shape [ 1 ] )
# Figure out what intercept values correspond to each row of the
# systematic utilities
long_intercepts = rows_to_alts . dot ( all_intercepts )
# Convert the shape parameters back into their ' natural parametrization '
natural_shapes = np . exp ( shape_params )
natural_shapes [ np . isposinf ( natural_shapes ) ] = max_comp_value
# Figure out what shape values correspond to each row of the
# systematic utilities
long_natural_shapes = rows_to_alts . dot ( natural_shapes )
# Calculate the data dependent part of the transformation
# Also , along the way , guard against numeric underflow or overflow
exp_neg_v = np . exp ( - 1 * systematic_utilities )
exp_neg_v [ np . isposinf ( exp_neg_v ) ] = max_comp_value
powered_term = np . power ( 1 + exp_neg_v , long_natural_shapes )
powered_term [ np . isposinf ( powered_term ) ] = max_comp_value
term_2 = np . log ( powered_term - 1 )
# Guard against overvlow
too_big_idx = np . isposinf ( powered_term )
term_2 [ too_big_idx ] = ( - 1 * long_natural_shapes [ too_big_idx ] * systematic_utilities [ too_big_idx ] )
transformations = long_intercepts - term_2
# Guard against overflow
transformations [ np . isposinf ( transformations ) ] = max_comp_value
transformations [ np . isneginf ( transformations ) ] = - 1 * max_comp_value
# Be sure to return a 2D array since other functions will be expecting that
if len ( transformations . shape ) == 1 :
transformations = transformations [ : , np . newaxis ]
return transformations |
def tracker ( ) :
"""start a tracker to register running models""" | application = mmi . tracker . app ( )
application . listen ( 22222 )
logger . info ( 'serving at port 22222' )
tornado . ioloop . IOLoop . instance ( ) . start ( ) |
def cook_layout ( layout , ajax ) :
"""Return main _ template compatible layout""" | # Fix XHTML layouts with CR [ + LF ] line endings
layout = re . sub ( '\r' , '\n' , re . sub ( '\r\n' , '\n' , layout ) )
# Parse layout
if isinstance ( layout , six . text_type ) :
result = getHTMLSerializer ( [ layout . encode ( 'utf-8' ) ] , encoding = 'utf-8' )
else :
result = getHTMLSerializer ( [ layout ] , encoding = 'utf-8' )
# Fix XHTML layouts with inline js ( etree . tostring breaks all < ! [ CDATA [ )
if '<![CDATA[' in layout :
result . serializer = html . tostring
# Wrap all panels with a metal : fill - slot - tag :
all_slots = [ ]
for layoutPanelNode in slotsXPath ( result . tree ) :
data_slots = layoutPanelNode . attrib [ 'data-slots' ]
all_slots += wrap_append_prepend_slots ( layoutPanelNode , data_slots )
del layoutPanelNode . attrib [ 'data-slots' ]
# When no slots are explicitly defined , try to inject the very default
# slots
if len ( all_slots ) == 0 :
for node in result . tree . xpath ( '//*[@data-panel="content"]' ) :
wrap_append_prepend_slots ( node , 'content > body header main * content-core' )
# Append implicit slots
head = result . tree . getroot ( ) . find ( 'head' )
if not ajax and head is not None :
for name in [ 'top_slot' , 'head_slot' , 'style_slot' , 'javascript_head_slot' ] :
slot = etree . Element ( '{{{0:s}}}{1:s}' . format ( NSMAP [ 'metal' ] , name ) , nsmap = NSMAP )
slot . attrib [ 'define-slot' ] = name
head . append ( slot )
template = TEMPLATE
metal = 'xmlns:metal="http://namespaces.zope.org/metal"'
return ( template % '' . join ( result ) ) . replace ( metal , '' ) |
def get_common_session_key ( self , premaster_secret ) :
"""K = H ( S ) .
Special implementation for Apple TV .""" | k_1 = self . hash ( premaster_secret , b'\x00\x00\x00\x00' , as_bytes = True )
k_2 = self . hash ( premaster_secret , b'\x00\x00\x00\x01' , as_bytes = True )
return k_1 + k_2 |
def _parse_relation ( self , tag ) :
"""Parses the chunk tag , role and relation id from the token relation tag .
- VP = > VP , [ ] , [ ]
- VP - 1 = > VP , [ 1 ] , [ None ]
- ADJP - PRD = > ADJP , [ None ] , [ PRD ]
- NP - SBJ - 1 = > NP , [ 1 ] , [ SBJ ]
- NP - OBJ - 1 * NP - OBJ - 2 = > NP , [ 1,2 ] , [ OBJ , OBJ ]
- NP - SBJ ; NP - OBJ - 1 = > NP , [ 1,1 ] , [ SBJ , OBJ ]""" | chunk , relation , role = None , [ ] , [ ]
if ";" in tag : # NP - SBJ ; NP - OBJ - 1 = > 1 relates to both SBJ and OBJ .
id = tag . split ( "*" ) [ 0 ] [ - 2 : ]
id = id if id . startswith ( "-" ) else ""
tag = tag . replace ( ";" , id + "*" )
if "*" in tag :
tag = tag . split ( "*" )
else :
tag = [ tag ]
for s in tag :
s = s . split ( "-" )
n = len ( s )
if n == 1 :
chunk = s [ 0 ]
if n == 2 :
chunk = s [ 0 ] ;
relation . append ( s [ 1 ] ) ;
role . append ( None )
if n >= 3 :
chunk = s [ 0 ] ;
relation . append ( s [ 2 ] ) ;
role . append ( s [ 1 ] )
if n > 1 :
id = relation [ - 1 ]
if id . isdigit ( ) :
relation [ - 1 ] = int ( id )
else : # Correct " ADJP - PRD " :
# ( ADJP , [ PRD ] , [ None ] ) = > ( ADJP , [ None ] , [ PRD ] )
relation [ - 1 ] , role [ - 1 ] = None , id
return chunk , relation , role |
def _compare_suffix ( self , other ) :
"""Return false if suffixes are mutually exclusive""" | # If suffix is omitted , assume a match
if not self . suffix or not other . suffix :
return True
# Check if more than one unique suffix
suffix_set = set ( self . suffix_list + other . suffix_list )
unique_suffixes = suffix_set & UNIQUE_SUFFIXES
for key in EQUIVALENT_SUFFIXES :
if key in unique_suffixes :
unique_suffixes . remove ( key )
unique_suffixes . add ( EQUIVALENT_SUFFIXES [ key ] )
return len ( unique_suffixes ) < 2 |
def _maybe_replace_path ( self , match ) :
"""Regex replacement method that will sub paths when needed""" | path = match . group ( 0 )
if self . _should_replace ( path ) :
return self . _replace_path ( path )
else :
return path |
def add ( self , name , arcname = None , recursive = True , exclude = None , filter = None ) :
"""Add the file ` name ' to the archive . ` name ' may be any type of file
( directory , fifo , symbolic link , etc . ) . If given , ` arcname '
specifies an alternative name for the file in the archive .
Directories are added recursively by default . This can be avoided by
setting ` recursive ' to False . ` exclude ' is a function that should
return True for each filename to be excluded . ` filter ' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object , if it returns None the TarInfo object will be
excluded from the archive .""" | self . _check ( "aw" )
if arcname is None :
arcname = name
# Exclude pathnames .
if exclude is not None :
import warnings
warnings . warn ( "use the filter argument instead" , DeprecationWarning , 2 )
if exclude ( name ) :
self . _dbg ( 2 , "tarfile: Excluded %r" % name )
return
# Skip if somebody tries to archive the archive . . .
if self . name is not None and os . path . abspath ( name ) == self . name :
self . _dbg ( 2 , "tarfile: Skipped %r" % name )
return
self . _dbg ( 1 , name )
# Create a TarInfo object from the file .
tarinfo = self . gettarinfo ( name , arcname )
if tarinfo is None :
self . _dbg ( 1 , "tarfile: Unsupported type %r" % name )
return
# Change or exclude the TarInfo object .
if filter is not None :
tarinfo = filter ( tarinfo )
if tarinfo is None :
self . _dbg ( 2 , "tarfile: Excluded %r" % name )
return
# Append the tar header and data to the archive .
if tarinfo . isreg ( ) :
with tarfile . bltn_open ( name , "rb" ) as f :
self . addfile ( tarinfo , f )
elif tarinfo . isdir ( ) :
self . addfile ( tarinfo )
if recursive :
for f in os . listdir ( name ) :
self . add ( os . path . join ( name , f ) , os . path . join ( arcname , f ) , recursive , exclude , filter )
else :
self . addfile ( tarinfo ) |
def add_tags ( self , tags , afterwards = None , remove_rest = False ) :
"""add ` tags ` to all messages in this thread
. . note : :
This only adds the requested operation to this objects
: class : ` DBManager ' s < alot . db . DBManager > ` write queue .
You need to call : meth : ` DBManager . flush < alot . db . DBManager . flush > `
to actually write out .
: param tags : a list of tags to be added
: type tags : list of str
: param afterwards : callback that gets called after successful
application of this tagging operation
: type afterwards : callable
: param remove _ rest : remove all other tags
: type remove _ rest : bool""" | def myafterwards ( ) :
if remove_rest :
self . _tags = set ( tags )
else :
self . _tags = self . _tags . union ( tags )
if callable ( afterwards ) :
afterwards ( )
self . _dbman . tag ( 'thread:' + self . _id , tags , afterwards = myafterwards , remove_rest = remove_rest ) |
def get_all_published_ships_basic ( db_connection ) :
"""Gets a list of all published ships and their basic information .
: return : Each result has a tuple of ( typeID , typeName , groupID , groupName , categoryID , and categoryName ) .
: rtype : list""" | if not hasattr ( get_all_published_ships_basic , '_results' ) :
sql = 'CALL get_all_published_ships_basic();'
results = execute_sql ( sql , db_connection )
get_all_published_ships_basic . _results = results
return get_all_published_ships_basic . _results |
def _delete_extraneous_files ( self ) : # type : ( Uploader ) - > None
"""Delete extraneous files on the remote
: param Uploader self : this""" | if not self . _spec . options . delete_extraneous_destination :
return
# list blobs for all destinations
checked = set ( )
deleted = 0
for sa , container , vpath , dpath in self . _get_destination_paths ( ) :
key = ';' . join ( ( sa . name , sa . endpoint , str ( dpath ) ) )
if key in checked :
continue
logger . debug ( 'attempting to delete extraneous blobs/files from: {}' . format ( key ) )
if ( self . _spec . options . mode == blobxfer . models . azure . StorageModes . File ) :
files = blobxfer . operations . azure . file . list_all_files ( sa . file_client , container )
for file in files :
try :
pathlib . Path ( file ) . relative_to ( vpath )
except ValueError :
continue
id = blobxfer . operations . upload . Uploader . create_destination_id ( sa . file_client , container , file )
if id not in self . _delete_exclude :
if self . _general_options . dry_run :
logger . info ( '[DRY RUN] deleting file: {}' . format ( file ) )
else :
if self . _general_options . verbose :
logger . debug ( 'deleting file: {}' . format ( file ) )
blobxfer . operations . azure . file . delete_file ( sa . file_client , container , file )
deleted += 1
else :
blobs = blobxfer . operations . azure . blob . list_all_blobs ( sa . block_blob_client , container )
for blob in blobs :
try :
pathlib . Path ( blob . name ) . relative_to ( vpath )
except ValueError :
continue
id = blobxfer . operations . upload . Uploader . create_destination_id ( sa . block_blob_client , container , blob . name )
if id not in self . _delete_exclude :
if self . _general_options . dry_run :
logger . info ( '[DRY RUN] deleting blob: {}' . format ( blob . name ) )
else :
if self . _general_options . verbose :
logger . debug ( 'deleting blob: {}' . format ( blob . name ) )
blobxfer . operations . azure . blob . delete_blob ( sa . block_blob_client , container , blob . name )
deleted += 1
checked . add ( key )
logger . info ( 'deleted {} extraneous blobs/files' . format ( deleted ) ) |
def _populate ( self , json ) :
"""A helper method that , given a JSON object representing this object ,
assigns values based on the properties dict and the attributes of
its Properties .""" | if not json :
return
# hide the raw JSON away in case someone needs it
self . _set ( '_raw_json' , json )
for key in json :
if key in ( k for k in type ( self ) . properties . keys ( ) if not type ( self ) . properties [ k ] . identifier ) :
if type ( self ) . properties [ key ] . relationship and not json [ key ] is None :
if isinstance ( json [ key ] , list ) :
objs = [ ]
for d in json [ key ] :
if not 'id' in d :
continue
new_class = type ( self ) . properties [ key ] . relationship
obj = new_class . make_instance ( d [ 'id' ] , getattr ( self , '_client' ) )
if obj :
obj . _populate ( d )
objs . append ( obj )
self . _set ( key , objs )
else :
if isinstance ( json [ key ] , dict ) :
related_id = json [ key ] [ 'id' ]
else :
related_id = json [ key ]
new_class = type ( self ) . properties [ key ] . relationship
obj = new_class . make_instance ( related_id , getattr ( self , '_client' ) )
if obj and isinstance ( json [ key ] , dict ) :
obj . _populate ( json [ key ] )
self . _set ( key , obj )
elif type ( self ) . properties [ key ] . slug_relationship and not json [ key ] is None : # create an object of the expected type with the given slug
self . _set ( key , type ( self ) . properties [ key ] . slug_relationship ( self . _client , json [ key ] ) )
elif type ( json [ key ] ) is dict :
self . _set ( key , MappedObject ( ** json [ key ] ) )
elif type ( json [ key ] ) is list : # we ' re going to use MappedObject ' s behavior with lists to
# expand these , then grab the resulting value to set
mapping = MappedObject ( _list = json [ key ] )
self . _set ( key , mapping . _list )
# pylint : disable = no - member
elif type ( self ) . properties [ key ] . is_datetime :
try :
t = time . strptime ( json [ key ] , DATE_FORMAT )
self . _set ( key , datetime . fromtimestamp ( time . mktime ( t ) ) )
except : # TODO - handle this better ( or log it ? )
self . _set ( key , json [ key ] )
else :
self . _set ( key , json [ key ] )
self . _set ( '_populated' , True )
self . _set ( '_last_updated' , datetime . now ( ) ) |
def get_xauth_access_token ( self , username , password ) :
"""Get an access token from an username and password combination .
In order to get this working you need to create an app at
http : / / twitter . com / apps , after that send a mail to api @ twitter . com
and request activation of xAuth for it .""" | try :
url = self . _get_oauth_url ( 'access_token' )
oauth = OAuth1 ( self . consumer_key , client_secret = self . consumer_secret )
r = requests . post ( url = url , auth = oauth , headers = { 'x_auth_mode' : 'client_auth' , 'x_auth_username' : username , 'x_auth_password' : password } )
credentials = parse_qs ( r . content )
return credentials . get ( 'oauth_token' ) [ 0 ] , credentials . get ( 'oauth_token_secret' ) [ 0 ]
except Exception as e :
raise TweepError ( e ) |
def _to_unicode_scalar_value ( s ) :
"""Helper function for converting a character or surrogate pair into a Unicode scalar value e . g .
" \ud800 \udc00 " - > 0x10000
The algorithm can be found in older versions of the Unicode Standard .
https : / / unicode . org / versions / Unicode3.0.0 / ch03 . pdf , Section 3.7 , D28
Unicode scalar value : a number N from 0 to 0x10FFFF is defined by applying the following algorithm to a
character sequence S :
If S is a single , non - surrogate value U :
N = U
If S is a surrogate pair H , L :
N = ( H - 0xD800 ) * 0x0400 + ( L - 0xDC00 ) + 0x10000
: param s :
: return :""" | if len ( s ) == 1 :
return ord ( s )
elif len ( s ) == 2 :
return ( ord ( s [ 0 ] ) - 0xD800 ) * 0x0400 + ( ord ( s [ 1 ] ) - 0xDC00 ) + 0x10000
else :
raise ValueError |
def do_plot ( args ) :
"""Create plots of mcmc output""" | import ugali . utils . plotting
import pylab as plt
config , name , label , coord = args
filenames = make_filenames ( config , label )
srcfile = filenames [ 'srcfile' ]
samfile = filenames [ 'samfile' ]
memfile = filenames [ 'memfile' ]
if not exists ( srcfile ) :
logger . warning ( "Couldn't find %s; skipping..." % srcfile )
return
if not exists ( samfile ) :
logger . warning ( "Couldn't find %s; skipping..." % samfile )
return
config = ugali . utils . config . Config ( config )
burn = config [ 'mcmc' ] [ 'nburn' ] * config [ 'mcmc' ] [ 'nwalkers' ]
source = ugali . analysis . source . Source ( )
source . load ( srcfile , section = 'source' )
outfile = samfile . replace ( '.npy' , '.png' )
ugali . utils . plotting . plotTriangle ( srcfile , samfile , burn = burn )
logger . info ( " Writing %s..." % outfile )
plt . savefig ( outfile , bbox_inches = 'tight' , dpi = 60 )
plt . close ( )
plotter = ugali . utils . plotting . SourcePlotter ( source , config , radius = 0.5 )
data = fitsio . read ( memfile , trim_strings = True ) if exists ( memfile ) else None
if data is not None :
plt . figure ( )
kernel , isochrone = source . kernel , source . isochrone
ugali . utils . plotting . plotMembership ( config , data , kernel , isochrone )
outfile = samfile . replace ( '.npy' , '_mem.png' )
logger . info ( " Writing %s..." % outfile )
plt . savefig ( outfile , bbox_inches = 'tight' , dpi = 60 )
plt . close ( )
plotter . plot6 ( data )
outfile = samfile . replace ( '.npy' , '_6panel.png' )
logger . info ( " Writing %s..." % outfile )
plt . savefig ( outfile , bbox_inches = 'tight' , dpi = 60 )
outfile = samfile . replace ( '.npy' , '_6panel.pdf' )
logger . info ( " Writing %s..." % outfile )
plt . savefig ( outfile , bbox_inches = 'tight' , dpi = 60 )
plt . close ( )
try :
title = name
plotter . plot4 ( )
outfile = samfile . replace ( '.npy' , '_4panel.png' )
logger . info ( " Writing %s..." % outfile )
plt . suptitle ( title )
plt . savefig ( outfile , bbox_inches = 'tight' , dpi = 60 )
plt . close ( )
except :
logger . warning ( " Failed to create plotter.plot4()" ) |
def get_following ( self ) :
""": calls : ` GET / user / following < http : / / developer . github . com / v3 / users / followers > ` _
: rtype : : class : ` github . PaginatedList . PaginatedList ` of : class : ` github . NamedUser . NamedUser `""" | return github . PaginatedList . PaginatedList ( github . NamedUser . NamedUser , self . _requester , "/user/following" , None ) |
def unique_ ( self , col ) :
"""Returns unique values in a column""" | try :
df = self . df . drop_duplicates ( subset = [ col ] , inplace = False )
return list ( df [ col ] )
except Exception as e :
self . err ( e , "Can not select unique data" ) |
def from_chord_shorthand ( self , shorthand ) :
"""Empty the container and add the notes in the shorthand .
See mingus . core . chords . from _ shorthand for an up to date list of
recognized format .
Example :
> > > NoteContainer ( ) . from _ chord _ shorthand ( ' Am ' )
[ ' A - 4 ' , ' C - 5 ' , ' E - 5 ' ]""" | self . empty ( )
self . add_notes ( chords . from_shorthand ( shorthand ) )
return self |
def get_layers ( self , class_ : Type [ L ] , became : bool = True ) -> List [ L ] :
"""Returns the list of layers of a given class . If no layers are present
then the list will be empty .
: param class _ : class of the expected layers
: param became : Allow transformed layers in results""" | out = self . _index . get ( class_ , [ ] )
if became :
out += self . _transformed . get ( class_ , [ ] )
return out |
def start_task ( self , method , * args , ** kwargs ) :
"""Start a task in a separate thread
Args :
method : the method to start in a separate thread
args : Accept args / kwargs arguments""" | thread = threading . Thread ( target = method , args = args , kwargs = kwargs )
thread . is_daemon = False
thread . start ( )
self . threads . append ( thread ) |
def reader ( stream ) :
"""Read Items from a stream containing lines of JSON .""" | for line in stream :
item = Item ( )
item . json = line
yield item |
def browse ( self , path = None ) :
"""Returns a list of directories matching the path given .
Args :
path ( str ) : glob pattern .
Returns :
List [ str ]""" | params = None
if path :
assert isinstance ( path , string_types )
params = { 'current' : path }
return self . get ( 'browse' , params = params ) |
def get_collection_measures ( self ) :
"""Helper function for calculating measurements derived from clusters / chains / collections""" | if not self . quiet :
print
print "Computing duration-independent" , self . current_collection_type , "measures..."
self . compute_collection_measures ( )
# include length = 1 clusters
self . compute_collection_measures ( no_singletons = True )
# no length = 1 clusters
self . compute_pairwise_similarity_score ( )
if not self . quiet :
collection_measures = [ x for x in self . measures if x . startswith ( "COLLECTION_" ) and self . current_collection_type in x and self . current_similarity_measure in x ]
collection_measures . sort ( )
print_table ( [ ( k , str ( self . measures [ k ] ) ) for k in collection_measures ] )
if not self . quiet :
print
print "Computing duration-based clustering measures..."
self . compute_duration_measures ( ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.