signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def f_rollup ( items , times , freq ) :
"""Use : func : ` groupby _ freq ` to rollup items
: param items : items in timeseries
: param times : times corresponding to items
: param freq : One of the ` ` dateutil . rrule ` ` frequency constants
: type freq : str"""
|
rollup = [ np . sum ( item for __ , item in ts ) for _ , ts in groupby_freq ( items , times , freq ) ]
return np . array ( rollup )
|
def sort_variants ( vcf_handle ) :
"""Sort the variants of a vcf file
Args :
vcf _ handle
mode ( str ) : position or rank score
Returns :
sorted _ variants ( Iterable ) : An iterable with sorted variants"""
|
logger . debug ( "Creating temp file" )
temp_file = NamedTemporaryFile ( delete = False )
temp_file . close ( )
logger . debug ( "Opening temp file with codecs" )
temp_file_handle = codecs . open ( temp_file . name , mode = 'w' , encoding = 'utf-8' , errors = 'replace' )
try :
with codecs . open ( temp_file . name , mode = 'w' , encoding = 'utf-8' , errors = 'replace' ) as f :
for line in vcf_handle :
if not line . startswith ( '#' ) :
line = line . rstrip ( ) . split ( '\t' )
chrom = line [ 0 ]
priority = get_chromosome_priority ( chrom )
print_line = "{0}\t{1}\n" . format ( priority , '\t' . join ( line ) )
f . write ( print_line )
# Sort the variants
sort_variant_file ( temp_file . name )
with codecs . open ( temp_file . name , mode = 'r' , encoding = 'utf-8' , errors = 'replace' ) as f :
for line in f :
line = line . rstrip ( ) . split ( '\t' )
yield '\t' . join ( line [ 1 : ] )
except Exception as err :
logger . error ( "Something went wrong" )
logger . error ( err )
finally :
logger . debug ( "Deleting temp file" )
os . remove ( temp_file . name )
logger . debug ( "Temp file deleted" )
|
def add ( node_name , ** kwargs ) :
"""Create a new node and generate a token for it"""
|
result = { }
kwargs = kwargs . copy ( )
overwrite = kwargs . pop ( 'overwrite' , False )
node = nago . core . get_node ( node_name )
if not node :
node = nago . core . Node ( )
elif not overwrite :
result [ 'status' ] = 'error'
result [ 'message' ] = "node %s already exists. add argument overwrite=1 to overwrite it." % ( node_name )
return result
else :
node . delete ( )
node = nago . core . Node ( )
node [ 'host_name' ] = node_name
for k , v in kwargs . items ( ) :
node [ k ] = v
node . save ( )
result [ 'message' ] = "node successfully saved"
result [ 'node_data' ] = node . data
return result
|
def handleMatch ( self , m ) :
"""Handles user input into [ magic ] tag , processes it ,
and inserts the returned URL into an < img > tag
through a Python ElementTree < img > Element ."""
|
userStr = m . group ( 3 )
# print ( userStr )
imgURL = processString ( userStr )
# print ( imgURL )
el = etree . Element ( 'img' )
# Sets imgURL to ' src ' attribute of < img > tag element
el . set ( 'src' , imgURL )
el . set ( 'alt' , userStr )
el . set ( 'title' , userStr )
return el
|
def pre_process_method_headers ( method , headers ) :
'''Returns the lowered method .
Capitalize headers , prepend HTTP _ and change - to _ .'''
|
method = method . lower ( )
# Standard WSGI supported headers
_wsgi_headers = [ "content_length" , "content_type" , "query_string" , "remote_addr" , "remote_host" , "remote_user" , "request_method" , "server_name" , "server_port" ]
_transformed_headers = { }
# For every header , replace - to _ , prepend http _ if necessary and convert
# to upper case .
for header , value in headers . items ( ) :
header = header . replace ( "-" , "_" )
header = "http_{header}" . format ( header = header ) if header . lower ( ) not in _wsgi_headers else header
_transformed_headers . update ( { header . upper ( ) : value } )
return method , _transformed_headers
|
def custom_scale_mixture_prior_builder ( getter , name , * args , ** kwargs ) :
"""A builder for the gaussian scale - mixture prior of Fortunato et al .
Please see https : / / arxiv . org / abs / 1704.02798 , section 7.1
Args :
getter : The ` getter ` passed to a ` custom _ getter ` . Please see the
documentation for ` tf . get _ variable ` .
name : The ` name ` argument passed to ` tf . get _ variable ` .
* args : Positional arguments forwarded by ` tf . get _ variable ` .
* * kwargs : Keyword arguments forwarded by ` tf . get _ variable ` .
Returns :
An instance of ` tfp . distributions . Distribution ` representing the
prior distribution over the variable in question ."""
|
# This specific prior formulation doesn ' t need any of the arguments forwarded
# from ` get _ variable ` .
del getter
del name
del args
del kwargs
return CustomScaleMixture ( FLAGS . prior_pi , FLAGS . prior_sigma1 , FLAGS . prior_sigma2 )
|
def gramschmidt ( vin , uin ) :
"""Returns that part of the first input vector
that is orthogonal to the second input vector .
The output vector is not normalized .
Args :
vin ( numpy array ) :
first input vector
uin ( numpy array ) :
second input vector"""
|
vin_uin = np . inner ( vin , uin )
uin_uin = np . inner ( uin , uin )
if uin_uin <= 0.0 :
raise ValueError ( "Zero or negative inner product!" )
return vin - ( vin_uin / uin_uin ) * uin
|
def sanitize_label ( label : str ) -> str :
"""Sanitize a BIO label - this deals with OIE
labels sometimes having some noise , as parentheses ."""
|
if "-" in label :
prefix , suffix = label . split ( "-" )
suffix = suffix . split ( "(" ) [ - 1 ]
return f"{prefix}-{suffix}"
else :
return label
|
def is_traditional ( s ) :
"""Check if a string ' s Chinese characters are Traditional .
This is equivalent to :
> > > identify ( ' foo ' ) in ( TRADITIONAL , BOTH )"""
|
chinese = _get_hanzi ( s )
if not chinese :
return False
elif chinese . issubset ( _SHARED_CHARACTERS ) :
return True
elif chinese . issubset ( _TRADITIONAL_CHARACTERS ) :
return True
return False
|
def set_status ( self , name , text = '' , row = 0 , fg = 'black' , bg = 'white' ) :
'''set a status value'''
|
if self . is_alive ( ) :
self . parent_pipe_send . send ( Value ( name , text , row , fg , bg ) )
|
def type_inherits_of_type ( inheriting_type , base_type ) :
"""Checks whether inheriting _ type inherits from base _ type
: param str inheriting _ type :
: param str base _ type :
: return : True is base _ type is base of inheriting _ type"""
|
assert isinstance ( inheriting_type , type ) or isclass ( inheriting_type )
assert isinstance ( base_type , type ) or isclass ( base_type )
if inheriting_type == base_type :
return True
else :
if len ( inheriting_type . __bases__ ) != 1 :
return False
return type_inherits_of_type ( inheriting_type . __bases__ [ 0 ] , base_type )
|
def get_entity ( self , entity_id , at = None ) :
"""Returns entity with given ID , optionally until position ."""
|
# Get a snapshot ( None if none exist ) .
if self . _snapshot_strategy is not None :
snapshot = self . _snapshot_strategy . get_snapshot ( entity_id , lte = at )
else :
snapshot = None
# Decide the initial state of the entity , and the
# version of the last item applied to the entity .
if snapshot is None :
initial_state = None
gt = None
else :
initial_state = entity_from_snapshot ( snapshot )
gt = snapshot . originator_version
# Obtain and return current state .
return self . get_and_project_events ( entity_id , gt = gt , lte = at , initial_state = initial_state )
|
def _grid_widgets ( self ) :
"""Put the widgets in the correct position based on self . _ _ compound ."""
|
orient = str ( self . _scale . cget ( 'orient' ) )
self . _scale . grid ( row = 2 , column = 2 , sticky = 'ew' if orient == tk . HORIZONTAL else 'ns' , padx = ( 0 , self . __entryscalepad ) if self . __compound is tk . RIGHT else ( self . __entryscalepad , 0 ) if self . __compound is tk . LEFT else 0 , pady = ( 0 , self . __entryscalepad ) if self . __compound is tk . BOTTOM else ( self . __entryscalepad , 0 ) if self . __compound is tk . TOP else 0 )
self . _entry . grid ( row = 1 if self . __compound is tk . TOP else 3 if self . __compound is tk . BOTTOM else 2 , column = 1 if self . __compound is tk . LEFT else 3 if self . __compound is tk . RIGHT else 2 )
if orient == tk . HORIZONTAL :
self . columnconfigure ( 0 , weight = 0 )
self . columnconfigure ( 2 , weight = 1 )
self . columnconfigure ( 4 , weight = 0 )
self . rowconfigure ( 0 , weight = 1 )
self . rowconfigure ( 2 , weight = 0 )
self . rowconfigure ( 4 , weight = 1 )
else :
self . rowconfigure ( 0 , weight = 0 )
self . rowconfigure ( 2 , weight = 1 )
self . rowconfigure ( 4 , weight = 0 )
self . columnconfigure ( 0 , weight = 1 )
self . columnconfigure ( 2 , weight = 0 )
self . columnconfigure ( 4 , weight = 1 )
|
def wrap_object ( func , before , after ) :
'''before / after call will encapsulate callable object'''
|
def _wrapper ( * args , ** kwargs ) :
before ( )
try :
return func ( * args , ** kwargs )
except Exception as e :
raise e
finally :
after ( )
return _wrapper
|
def smooth_rectangle ( x , y , rec_w , rec_h , gaussian_width_x , gaussian_width_y ) :
"""Rectangle with a solid central region , then Gaussian fall - off at the edges ."""
|
gaussian_x_coord = abs ( x ) - rec_w / 2.0
gaussian_y_coord = abs ( y ) - rec_h / 2.0
box_x = np . less ( gaussian_x_coord , 0.0 )
box_y = np . less ( gaussian_y_coord , 0.0 )
sigmasq_x = gaussian_width_x * gaussian_width_x
sigmasq_y = gaussian_width_y * gaussian_width_y
with float_error_ignore ( ) :
falloff_x = x * 0.0 if sigmasq_x == 0.0 else np . exp ( np . divide ( - gaussian_x_coord * gaussian_x_coord , 2 * sigmasq_x ) )
falloff_y = y * 0.0 if sigmasq_y == 0.0 else np . exp ( np . divide ( - gaussian_y_coord * gaussian_y_coord , 2 * sigmasq_y ) )
return np . minimum ( np . maximum ( box_x , falloff_x ) , np . maximum ( box_y , falloff_y ) )
|
def determine_module_class ( path , class_path ) :
"""Determine type of module and return deployment module class ."""
|
if not class_path : # First check directory name for type - indicating suffix
basename = os . path . basename ( path )
if basename . endswith ( '.sls' ) :
class_path = 'runway.module.serverless.Serverless'
elif basename . endswith ( '.tf' ) :
class_path = 'runway.module.terraform.Terraform'
elif basename . endswith ( '.cdk' ) :
class_path = 'runway.module.cdk.CloudDevelopmentKit'
elif basename . endswith ( '.cfn' ) :
class_path = 'runway.module.cloudformation.CloudFormation'
if not class_path : # Fallback to autodetection
if os . path . isfile ( os . path . join ( path , 'serverless.yml' ) ) :
class_path = 'runway.module.serverless.Serverless'
elif glob . glob ( os . path . join ( path , '*.tf' ) ) :
class_path = 'runway.module.terraform.Terraform'
elif os . path . isfile ( os . path . join ( path , 'cdk.json' ) ) and os . path . isfile ( os . path . join ( path , 'package.json' ) ) :
class_path = 'runway.module.cdk.CloudDevelopmentKit'
elif glob . glob ( os . path . join ( path , '*.env' ) ) or ( glob . glob ( os . path . join ( path , '*.yaml' ) ) ) or ( glob . glob ( os . path . join ( path , '*.yml' ) ) ) :
class_path = 'runway.module.cloudformation.CloudFormation'
if not class_path :
LOGGER . error ( 'No module class found for %s' , os . path . basename ( path ) )
sys . exit ( 1 )
return load_object_from_string ( class_path )
|
async def async_get_state ( self , field : str ) -> dict :
"""Get state of object in deCONZ .
Field is a string representing an API endpoint or lower
e . g . field = ' / lights ' .
See Dresden Elektroniks REST API documentation for details :
http : / / dresden - elektronik . github . io / deconz - rest - doc / rest /"""
|
session = self . session . get
url = self . api_url + field
response_dict = await async_request ( session , url )
return response_dict
|
def get ( self , type_name , obj_id , base_fields = None , nested_fields = None ) :
"""Get the resource by resource id .
: param nested _ fields : nested resource fields .
: param type _ name : Resource type . For example , pool , lun , nasServer .
: param obj _ id : Resource id
: param base _ fields : Resource fields to return
: return : List of tuple [ ( name , res _ inst ) ]"""
|
base_fields = self . get_fields ( type_name , base_fields , nested_fields )
url = '/api/instances/{}/{}' . format ( type_name , obj_id )
return self . rest_get ( url , fields = base_fields )
|
def assert_unordered_list_eq ( expected , actual , message = None ) :
"""Raises an AssertionError if the objects contained
in expected are not equal to the objects contained
in actual without regard to their order .
This takes quadratic time in the umber of elements in actual ; don ' t use it for very long lists ."""
|
missing_in_actual = [ ]
missing_in_expected = list ( actual )
for x in expected :
try :
missing_in_expected . remove ( x )
except ValueError :
missing_in_actual . append ( x )
if missing_in_actual or missing_in_expected :
if not message :
message = ( "%r not equal to %r; missing items: %r in expected, %r in actual." % ( expected , actual , missing_in_expected , missing_in_actual ) )
assert False , message
|
def __zipped_files_data ( self ) :
"""Get a dict of all files of interest from the FA release zipfile ."""
|
files = { }
with zipfile . ZipFile ( self . __zip_file ) as thezip :
for zipinfo in thezip . infolist ( ) :
if zipinfo . filename . endswith ( 'metadata/icons.json' ) :
with thezip . open ( zipinfo ) as compressed_file :
files [ 'icons.json' ] = compressed_file . read ( )
elif zipinfo . filename . endswith ( '.ttf' ) : # For the record , the paths usually look like this :
# webfonts / fa - brands - 400 . ttf
# webfonts / fa - regular - 400 . ttf
# webfonts / fa - solid - 900 . ttf
name = os . path . basename ( zipinfo . filename )
tokens = name . split ( '-' )
style = tokens [ 1 ]
if style in self . FA_STYLES :
with thezip . open ( zipinfo ) as compressed_file :
files [ style ] = compressed_file . read ( )
# Safety checks :
assert all ( style in files for style in self . FA_STYLES ) , 'Not all FA styles found! Update code is broken.'
assert 'icons.json' in files , 'icons.json not found! Update code is broken.'
return files
|
def _term ( self , term ) :
"""Add a term to the query .
Arguments :
term ( str ) : The term to add .
Returns :
SearchHelper : Self"""
|
# All terms must be strings for Elasticsearch
term = str ( term )
if term :
self . __query [ "q" ] += term
return self
|
def _imload ( self , filepath , kwds ) :
"""Load an image file , guessing the format , and return a numpy
array containing an RGB image . If EXIF keywords can be read
they are returned in the dict _ kwds _ ."""
|
start_time = time . time ( )
typ , enc = mimetypes . guess_type ( filepath )
if not typ :
typ = 'image/jpeg'
typ , subtyp = typ . split ( '/' )
self . logger . debug ( "MIME type is %s/%s" % ( typ , subtyp ) )
data_loaded = False
if have_opencv and subtyp not in [ 'gif' ] : # First choice is OpenCv , because it supports high - bit depth
# multiband images
means = 'opencv'
data_np = cv2 . imread ( filepath , cv2 . IMREAD_ANYDEPTH + cv2 . IMREAD_ANYCOLOR )
if data_np is not None :
data_loaded = True
# funky indexing because opencv returns BGR images ,
# whereas PIL and others return RGB
if len ( data_np . shape ) >= 3 and data_np . shape [ 2 ] >= 3 :
data_np = data_np [ ... , : : - 1 ]
# OpenCv doesn ' t " do " image metadata , so we punt to piexif
# library ( if installed )
self . piexif_getexif ( filepath , kwds )
# OpenCv added a feature to do auto - orientation when loading
# ( see https : / / github . com / opencv / opencv / issues / 4344)
# So reset these values to prevent auto - orientation from
# happening later
kwds [ 'Orientation' ] = 1
kwds [ 'Image Orientation' ] = 1
# convert to working color profile , if can
if self . clr_mgr . can_profile ( ) :
data_np = self . clr_mgr . profile_to_working_numpy ( data_np , kwds )
if not data_loaded and have_pil :
means = 'PIL'
image = PILimage . open ( filepath )
try :
if hasattr ( image , '_getexif' ) :
info = image . _getexif ( )
if info is not None :
for tag , value in info . items ( ) :
kwd = TAGS . get ( tag , tag )
kwds [ kwd ] = value
elif have_exif :
self . piexif_getexif ( image . info [ "exif" ] , kwds )
else :
self . logger . warning ( "Please install 'piexif' module to get image metadata" )
except Exception as e :
self . logger . warning ( "Failed to get image metadata: %s" % ( str ( e ) ) )
# convert to working color profile , if can
if self . clr_mgr . can_profile ( ) :
image = self . clr_mgr . profile_to_working_pil ( image , kwds )
# convert from PIL to numpy
data_np = np . array ( image )
if data_np is not None :
data_loaded = True
if ( not data_loaded and ( typ == 'image' ) and ( subtyp in ( 'x-portable-pixmap' , 'x-portable-greymap' ) ) ) : # Special opener for PPM files , preserves high bit depth
means = 'built-in'
data_np = open_ppm ( filepath )
if data_np is not None :
data_loaded = True
if not data_loaded :
raise ImageError ( "No way to load image format '%s/%s'" % ( typ , subtyp ) )
end_time = time . time ( )
self . logger . debug ( "loading (%s) time %.4f sec" % ( means , end_time - start_time ) )
return data_np
|
def put_settings ( self , sensors = [ ] , actuators = [ ] , auth_token = None , endpoint = None , blink = None , discovery = None , dht_sensors = [ ] , ds18b20_sensors = [ ] ) :
"""Sync settings to the Konnected device"""
|
url = self . base_url + '/settings'
payload = { "sensors" : sensors , "actuators" : actuators , "dht_sensors" : dht_sensors , "ds18b20_sensors" : ds18b20_sensors , "token" : auth_token , "apiUrl" : endpoint }
if blink is not None :
payload [ 'blink' ] = blink
if discovery is not None :
payload [ 'discovery' ] = discovery
try :
r = requests . put ( url , json = payload , timeout = 10 )
return r . ok
except RequestException as err :
raise Client . ClientError ( err )
|
def text ( self , quantity : int = 5 ) -> str :
"""Generate the text .
: param quantity : Quantity of sentences .
: return : Text ."""
|
text = ''
for _ in range ( quantity ) :
text += ' ' + self . random . choice ( self . _data [ 'text' ] )
return text . strip ( )
|
def draw_circle ( self , x , y , r , color ) :
"""Draw a circle .
Args :
x ( int ) : The x coordinate of the center of the circle .
y ( int ) : The y coordinate of the center of the circle .
r ( int ) : The radius of the circle .
color ( Tuple [ int , int , int , int ] ) : The color of the circle .
Raises :
SDLError : If an error is encountered ."""
|
check_int_err ( lib . circleRGBA ( self . _ptr , x , y , r , color [ 0 ] , color [ 1 ] , color [ 2 ] , color [ 3 ] ) )
|
def random_color ( dtype = np . uint8 ) :
"""Return a random RGB color using datatype specified .
Parameters
dtype : numpy dtype of result
Returns
color : ( 4 , ) dtype , random color that looks OK"""
|
hue = np . random . random ( ) + .61803
hue %= 1.0
color = np . array ( colorsys . hsv_to_rgb ( hue , .99 , .99 ) )
if np . dtype ( dtype ) . kind in 'iu' :
max_value = ( 2 ** ( np . dtype ( dtype ) . itemsize * 8 ) ) - 1
color *= max_value
color = np . append ( color , max_value ) . astype ( dtype )
return color
|
def group ( self , p_todos ) :
"""Groups the todos according to the given group string ."""
|
# preorder todos for the group sort
p_todos = _apply_sort_functions ( p_todos , self . pregroupfunctions )
# initialize result with a single group
result = OrderedDict ( [ ( ( ) , p_todos ) ] )
for ( function , label ) , _ in self . groupfunctions :
oldresult = result
result = OrderedDict ( )
for oldkey , oldgroup in oldresult . items ( ) :
for key , _group in groupby ( oldgroup , function ) :
newgroup = list ( _group )
if not isinstance ( key , list ) :
key = [ key ]
for subkey in key :
subkey = "{}: {}" . format ( label , subkey )
newkey = oldkey + ( subkey , )
if newkey in result :
result [ newkey ] = result [ newkey ] + newgroup
else :
result [ newkey ] = newgroup
# sort all groups
for key , _group in result . items ( ) :
result [ key ] = self . sort ( _group )
return result
|
def parse_cookie ( self , string ) :
'''Parses a cookie string like returned in a Set - Cookie header
@ param string : The cookie string
@ return : the cookie dict'''
|
results = re . findall ( '([^=]+)=([^\;]+);?\s?' , string )
my_dict = { }
for item in results :
my_dict [ item [ 0 ] ] = item [ 1 ]
return my_dict
|
def getNeighbors ( self , id , depth = 1 , blankNodes = 'false' , relationshipType = None , direction = 'BOTH' , project = '*' , callback = None , output = 'application/json' ) :
"""Get neighbors from : / graph / neighbors / { id }
Arguments :
id : This ID should be either a CURIE or an IRI
depth : How far to traverse neighbors
blankNodes : Traverse blank nodes
relationshipType : Which relationship to traverse
direction : Which direction to traverse : INCOMING , OUTGOING , BOTH ( default ) . Only used if relationshipType is specified .
project : Which properties to project . Defaults to ' * ' .
callback : Name of the JSONP callback ( ' fn ' by default ) . Supplying this parameter or
requesting a javascript media type will cause a JSONP response to be
rendered .
outputs :
application / json
application / graphson
application / xml
application / graphml + xml
application / xgmml
text / gml
text / csv
text / tab - separated - values
image / jpeg
image / png"""
|
kwargs = { 'id' : id , 'depth' : depth , 'blankNodes' : blankNodes , 'relationshipType' : relationshipType , 'direction' : direction , 'project' : project , 'callback' : callback }
kwargs = { k : dumps ( v ) if type ( v ) is dict else v for k , v in kwargs . items ( ) }
param_rest = self . _make_rest ( 'id' , ** kwargs )
url = self . _basePath + ( '/graph/neighbors/{id}' ) . format ( ** kwargs )
requests_params = { k : v for k , v in kwargs . items ( ) if k != 'id' }
return self . _get ( 'GET' , url , requests_params , output )
|
def get ( self , name = None ) :
"""Returns the plugin object with the given name .
Or if a name is not given , the complete plugin dictionary is returned .
: param name : Name of a plugin
: return : None , single plugin or dictionary of plugins"""
|
if name is None :
return self . _plugins
else :
if name not in self . _plugins . keys ( ) :
return None
else :
return self . _plugins [ name ]
|
def get_diff ( source , dest ) :
"""Get the diff between two records list in this order :
- to _ create
- to _ delete"""
|
# First build a dict from the lists , with the ID as the key .
source_dict = { record [ 'id' ] : record for record in source }
dest_dict = { record [ 'id' ] : record for record in dest }
source_keys = set ( source_dict . keys ( ) )
dest_keys = set ( dest_dict . keys ( ) )
to_create = source_keys - dest_keys
to_delete = dest_keys - source_keys
to_update = set ( )
to_check = source_keys - to_create - to_delete
for record_id in to_check : # Make sure to remove properties that are part of kinto
# records and not amo records .
# Here we will compare the record properties ignoring :
# ID , last _ modified and enabled .
new = canonical_json ( source_dict [ record_id ] )
old = canonical_json ( dest_dict [ record_id ] )
if new != old :
to_update . add ( record_id )
return ( [ source_dict [ k ] for k in to_create ] , [ source_dict [ k ] for k in to_update ] , [ dest_dict [ k ] for k in to_delete ] )
|
def _from_dict ( cls , _dict ) :
"""Initialize a QueryRelationsRelationship object from a json dictionary ."""
|
args = { }
if 'type' in _dict :
args [ 'type' ] = _dict . get ( 'type' )
if 'frequency' in _dict :
args [ 'frequency' ] = _dict . get ( 'frequency' )
if 'arguments' in _dict :
args [ 'arguments' ] = [ QueryRelationsArgument . _from_dict ( x ) for x in ( _dict . get ( 'arguments' ) ) ]
if 'evidence' in _dict :
args [ 'evidence' ] = [ QueryEvidence . _from_dict ( x ) for x in ( _dict . get ( 'evidence' ) ) ]
return cls ( ** args )
|
def punsubscribe ( self , * args ) :
"""Unsubscribe from the supplied patterns . If empty , unsubscribe from
all patterns ."""
|
if args :
args = list_or_args ( args [ 0 ] , args [ 1 : ] )
patterns = self . _normalize_keys ( dict . fromkeys ( args ) )
else :
patterns = self . patterns
self . pending_unsubscribe_patterns . update ( patterns )
return self . execute_command ( 'PUNSUBSCRIBE' , * args )
|
def exchange_partitions ( self , partitionSpecs , source_db , source_table_name , dest_db , dest_table_name ) :
"""Parameters :
- partitionSpecs
- source _ db
- source _ table _ name
- dest _ db
- dest _ table _ name"""
|
self . send_exchange_partitions ( partitionSpecs , source_db , source_table_name , dest_db , dest_table_name )
return self . recv_exchange_partitions ( )
|
def get_nodes ( self , coord , coords ) :
"""Get the variables containing the definition of the nodes
Parameters
coord : xarray . Coordinate
The mesh variable
coords : dict
The coordinates to use to get node coordinates"""
|
def get_coord ( coord ) :
return coords . get ( coord , self . ds . coords . get ( coord ) )
return list ( map ( get_coord , coord . attrs . get ( 'node_coordinates' , '' ) . split ( ) [ : 2 ] ) )
|
def convert_to_py_error ( error_message ) :
"""Raise specific exceptions for ease of error handling"""
|
message = error_message . lower ( )
for err_msg , err_type in ERR_MSGS :
if err_msg in message :
return err_type ( error_message )
else :
return IndicoError ( error_message )
|
def fit_transform ( self , X , y ) :
"""Encode categorical columns into average target values .
Args :
X ( pandas . DataFrame ) : categorical columns to encode
y ( pandas . Series ) : the target column
Returns :
X ( pandas . DataFrame ) : encoded columns"""
|
self . target_encoders = [ None ] * X . shape [ 1 ]
self . target_mean = y . mean ( )
for i , col in enumerate ( X . columns ) :
self . target_encoders [ i ] = self . _get_target_encoder ( X [ col ] , y )
X . loc [ : , col ] = X [ col ] . fillna ( NAN_INT ) . map ( self . target_encoders [ i ] ) . fillna ( self . target_mean )
return X
|
def get_context_data ( self , ** kwargs ) :
"""Add context data to view"""
|
context = super ( ) . get_context_data ( ** kwargs )
tabs = self . get_active_tabs ( )
context . update ( { 'page_detail_tabs' : tabs , 'active_tab' : tabs [ 0 ] . code if tabs else '' , 'app_label' : self . get_app_label ( ) , 'model_name' : self . get_model_name ( ) , 'model_alias' : self . get_model_alias ( ) , 'model_verbose_name' : self . object . _meta . verbose_name . title ( ) , 'back_url' : self . get_back_url ( ) , 'edit_url' : self . get_edit_url ( ) , 'delete_url' : self . get_delete_url ( ) , 'title' : self . title , } )
return context
|
def socket_reader ( connection : socket , buffer_size : int = 1024 ) :
"""read data from adb socket"""
|
while connection is not None :
try :
buffer = connection . recv ( buffer_size )
# no output
if not len ( buffer ) :
raise ConnectionAbortedError
except ConnectionAbortedError : # socket closed
print ( 'connection aborted' )
connection . close ( )
yield None
except OSError : # still operate connection after it was closed
print ( 'socket closed' )
connection . close ( )
yield None
else :
yield buffer
|
def cannon_normalize ( spec_raw ) :
"""Normalize according to The Cannon"""
|
spec = np . array ( [ spec_raw ] )
wl = np . arange ( 0 , spec . shape [ 1 ] )
w = continuum_normalization . gaussian_weight_matrix ( wl , L = 50 )
ivar = np . ones ( spec . shape ) * 0.5
cont = continuum_normalization . _find_cont_gaussian_smooth ( wl , spec , ivar , w )
norm_flux , norm_ivar = continuum_normalization . _cont_norm ( spec , ivar , cont )
return norm_flux [ 0 ]
|
def _filter_in ( self , term_list , field_name , field_type , is_not ) :
"""Returns a query that matches exactly ANY term in term _ list .
Notice that :
A in { B , C } < = > ( A = B or A = C )
~ ( A in { B , C } ) < = > ~ ( A = B or A = C )
Because OP _ AND _ NOT ( C , D ) < = > ( C and ~ D ) , then D = ( A in { B , C } ) requires ` is _ not = False ` .
Assumes term is a list ."""
|
query_list = [ self . _filter_exact ( term , field_name , field_type , is_not = False ) for term in term_list ]
if is_not :
return xapian . Query ( xapian . Query . OP_AND_NOT , self . _all_query ( ) , xapian . Query ( xapian . Query . OP_OR , query_list ) )
else :
return xapian . Query ( xapian . Query . OP_OR , query_list )
|
def get_epoch_namespace_receive_fees_period ( block_height , namespace_id ) :
"""how long can a namespace receive register / renewal fees ?"""
|
epoch_config = get_epoch_config ( block_height )
if epoch_config [ 'namespaces' ] . has_key ( namespace_id ) :
return epoch_config [ 'namespaces' ] [ namespace_id ] [ 'NAMESPACE_RECEIVE_FEES_PERIOD' ]
else :
return epoch_config [ 'namespaces' ] [ '*' ] [ 'NAMESPACE_RECEIVE_FEES_PERIOD' ]
|
def ssh_sa_ssh_server_cipher ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
ssh_sa = ET . SubElement ( config , "ssh-sa" , xmlns = "urn:brocade.com:mgmt:brocade-sec-services" )
ssh = ET . SubElement ( ssh_sa , "ssh" )
server = ET . SubElement ( ssh , "server" )
cipher = ET . SubElement ( server , "cipher" )
cipher . text = kwargs . pop ( 'cipher' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def validate_version ( self ) :
"""Ensure this package works with the installed version of dbt ."""
|
installed = get_installed_version ( )
if not versions_compatible ( * self . dbt_version ) :
msg = IMPOSSIBLE_VERSION_ERROR . format ( package = self . project_name , version_spec = [ x . to_version_string ( ) for x in self . dbt_version ] )
raise DbtProjectError ( msg )
if not versions_compatible ( installed , * self . dbt_version ) :
msg = INVALID_VERSION_ERROR . format ( package = self . project_name , installed = installed . to_version_string ( ) , version_spec = [ x . to_version_string ( ) for x in self . dbt_version ] )
raise DbtProjectError ( msg )
|
def plot_qq_exp ( fignum , I , title , subplot = False ) :
"""plots data against an exponential distribution in 0 = > 90.
Parameters
_ _ _ _ _
fignum : matplotlib figure number
I : data
title : plot title
subplot : boolean , if True plot as subplot with 1 row , two columns with fignum the plot number"""
|
if subplot == True :
plt . subplot ( 1 , 2 , fignum )
else :
plt . figure ( num = fignum )
X , Y , dpos , dneg = [ ] , [ ] , 0. , 0.
rad = old_div ( np . pi , 180. )
xsum = 0
for i in I :
theta = ( 90. - i ) * rad
X . append ( 1. - np . cos ( theta ) )
xsum += X [ - 1 ]
X . sort ( )
n = float ( len ( X ) )
kappa = old_div ( ( n - 1. ) , xsum )
for i in range ( len ( X ) ) :
p = old_div ( ( float ( i ) - 0.5 ) , n )
Y . append ( - np . log ( 1. - p ) )
f = 1. - np . exp ( - kappa * X [ i ] )
ds = old_div ( float ( i ) , n ) - f
if dpos < ds :
dpos = ds
ds = f - old_div ( ( float ( i ) - 1. ) , n )
if dneg < ds :
dneg = ds
if dneg > dpos :
ds = dneg
else :
ds = dpos
Me = ( ds - ( old_div ( 0.2 , n ) ) ) * ( np . sqrt ( n ) + 0.26 + ( old_div ( 0.5 , ( np . sqrt ( n ) ) ) ) )
# Eq . 5.15 from Fisher et al . ( 1987)
plt . plot ( Y , X , 'ro' )
bounds = plt . axis ( )
plt . axis ( [ 0 , bounds [ 1 ] , 0. , bounds [ 3 ] ] )
notestr = 'N: ' + '%i' % ( n )
plt . text ( .1 * bounds [ 1 ] , .9 * bounds [ 3 ] , notestr )
notestr = 'Me: ' + '%7.3f' % ( Me )
plt . text ( .1 * bounds [ 1 ] , .8 * bounds [ 3 ] , notestr )
if Me > 1.094 :
notestr = "Not Exponential"
else :
notestr = "Exponential (95%)"
plt . text ( .1 * bounds [ 1 ] , .7 * bounds [ 3 ] , notestr )
plt . title ( title )
plt . xlabel ( 'Exponential Quantile' )
plt . ylabel ( 'Data Quantile' )
return Me , 1.094
|
def untag ( self , querystring , tags , afterwards = None ) :
"""removes tags from messages that match ` querystring ` .
This appends an untag operation to the write queue and raises
: exc : ` ~ errors . DatabaseROError ` if in read only mode .
: param querystring : notmuch search string
: type querystring : str
: param tags : a list of tags to be added
: type tags : list of str
: param afterwards : callback that gets called after successful
application of this tagging operation
: type afterwards : callable
: exception : : exc : ` ~ errors . DatabaseROError `
. . note : :
This only adds the requested operation to the write queue .
You need to call : meth : ` DBManager . flush ` to actually write out ."""
|
if self . ro :
raise DatabaseROError ( )
self . writequeue . append ( ( 'untag' , afterwards , querystring , tags ) )
|
def merge_truthy ( * dicts ) :
"""Merge multiple dictionaries , keeping the truthy values in case of key collisions .
Accepts any number of dictionaries , or any other object that returns a 2 - tuple of
key and value pairs when its ` . items ( ) ` method is called .
If a key exists in multiple dictionaries passed to this function , the values from the latter
dictionary is kept . If the value of the latter dictionary does not evaluate to True , then
the value of the previous dictionary is kept .
> > > merge _ truthy ( { ' a ' : 1 , ' c ' : 4 } , { ' a ' : None , ' b ' : 2 } , { ' b ' : 3 } )
{ ' a ' : 1 , ' b ' : 3 , ' c ' : 4}"""
|
merged = { }
for d in dicts :
for k , v in d . items ( ) :
merged [ k ] = v or merged . get ( k , v )
return merged
|
def select_spread ( list_of_elements = None , number_of_elements = None ) :
"""This function returns the specified number of elements of a list spread
approximately evenly ."""
|
if len ( list_of_elements ) <= number_of_elements :
return list_of_elements
if number_of_elements == 0 :
return [ ]
if number_of_elements == 1 :
return [ list_of_elements [ int ( round ( ( len ( list_of_elements ) - 1 ) / 2 ) ) ] ]
return [ list_of_elements [ int ( round ( ( len ( list_of_elements ) - 1 ) / ( 2 * number_of_elements ) ) ) ] ] + select_spread ( list_of_elements [ int ( round ( ( len ( list_of_elements ) - 1 ) / ( number_of_elements ) ) ) : ] , number_of_elements - 1 )
|
def flatten_dict ( dct , separator = '-->' , allowed_types = [ int , float , bool ] ) :
"""Returns a list of string identifiers for each element in dct .
Recursively scans through dct and finds every element whose type is in
allowed _ types and adds a string indentifier for it .
eg :
dct = {
' a ' : ' a string ' ,
' c ' : 1.0,
' d ' : True
flatten _ dict ( dct ) would return
[ ' a ' , ' b - - > c ' , ' b - - > d ' ]"""
|
flat_list = [ ]
for key in sorted ( dct ) :
if key [ : 2 ] == '__' :
continue
key_type = type ( dct [ key ] )
if key_type in allowed_types :
flat_list . append ( str ( key ) )
elif key_type is dict :
sub_list = flatten_dict ( dct [ key ] )
sub_list = [ str ( key ) + separator + sl for sl in sub_list ]
flat_list += sub_list
return flat_list
|
def _get_field_by_name ( table : LdapObjectClass , name : str ) -> tldap . fields . Field :
"""Lookup a field by its name ."""
|
fields = table . get_fields ( )
return fields [ name ]
|
def _get_scale_and_shape ( self , transformed_lvs ) :
"""Obtains model scale , shape and skewness latent variables
Parameters
transformed _ lvs : np . array
Transformed latent variable vector
Returns
- Tuple of model scale , model shape , model skewness"""
|
if self . scale is True :
if self . shape is True :
model_shape = transformed_lvs [ - 1 ]
model_scale = transformed_lvs [ - 2 ]
else :
model_shape = 0
model_scale = transformed_lvs [ - 1 ]
else :
model_scale = 0
model_shape = 0
if self . skewness is True :
model_skewness = transformed_lvs [ - 3 ]
else :
model_skewness = 0
return model_scale , model_shape , model_skewness
|
def convert_notebooks ( ) :
"""Converts IPython Notebooks to proper . rst files and moves static
content to the _ static directory ."""
|
convert_status = call ( [ 'ipython' , 'nbconvert' , '--to' , 'rst' , '*.ipynb' ] )
if convert_status != 0 :
raise SystemError ( 'Conversion failed! Status was %s' % convert_status )
notebooks = [ x for x in os . listdir ( '.' ) if '.ipynb' in x and os . path . isfile ( x ) ]
names = [ os . path . splitext ( x ) [ 0 ] for x in notebooks ]
for i in range ( len ( notebooks ) ) :
name = names [ i ]
notebook = notebooks [ i ]
print ( 'processing %s (%s)' % ( name , notebook ) )
# move static files
sdir = '%s_files' % name
statics = os . listdir ( sdir )
statics = [ os . path . join ( sdir , x ) for x in statics ]
[ shutil . copy ( x , '_static/' ) for x in statics ]
shutil . rmtree ( sdir )
# rename static dir in rst file
rst_file = '%s.rst' % name
print ( 'REsT file is %s' % rst_file )
data = None
with open ( rst_file , 'r' ) as f :
data = f . read ( )
if data is not None :
with open ( rst_file , 'w' ) as f :
data = re . sub ( '%s' % sdir , '_static' , data )
f . write ( data )
# add special tags
lines = None
with open ( rst_file , 'r' ) as f :
lines = f . readlines ( )
if lines is not None :
n = len ( lines )
i = 0
rawWatch = False
while i < n :
line = lines [ i ]
# add class tags to images for css formatting
if 'image::' in line :
lines . insert ( i + 1 , ' :class: pynb\n' )
n += 1
elif 'parsed-literal::' in line :
lines . insert ( i + 1 , ' :class: pynb-result\n' )
n += 1
elif 'raw:: html' in line :
rawWatch = True
if rawWatch :
if '<div' in line :
line = line . replace ( '<div' , '<div class="pynb-result"' )
lines [ i ] = line
rawWatch = False
i += 1
with open ( rst_file , 'w' ) as f :
f . writelines ( lines )
|
def get_attachable_volumes ( self , start = 0 , count = - 1 , filter = '' , query = '' , sort = '' , scope_uris = '' , connections = '' ) :
"""Gets the volumes that are connected on the specified networks based on the storage system port ' s expected
network connectivity .
A volume is attachable if it satisfies either of the following conditions :
* The volume is shareable .
* The volume not shareable and not attached .
Args :
start :
The first item to return , using 0 - based indexing .
If not specified , the default is 0 - start with the first available item .
count :
The number of resources to return . A count of - 1 requests all items .
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items .
filter ( list or str ) :
A general filter / query string to narrow the list of items returned . The
default is no filter ; all resources are returned .
query :
A general query string to narrow the list of resources returned . The default
is no query ; all resources are returned .
sort :
The sort order of the returned data set . By default , the sort order is based
on create time with the oldest entry first .
connections :
A list of dicts specifics the connections used by the attachable volumes . Needs network uri , initiatoer
name and optional proxy name
scope _ uris :
A list specifics the list of scope uris used by the attachable volumed .
Returns :
list : A list of attachable volumes that the appliance manages ."""
|
uri = self . URI + '/attachable-volumes'
if connections :
uri += str ( '?' + 'connections=' + connections . __str__ ( ) )
return self . _client . get_all ( start , count , filter = filter , query = query , sort = sort , uri = uri , scope_uris = scope_uris )
|
def jwt_verify_token ( headers ) :
"""Verify the JWT token .
: param dict headers : The request headers .
: returns : The token data .
: rtype : dict"""
|
# Get the token from headers
token = headers . get ( current_app . config [ 'OAUTH2SERVER_JWT_AUTH_HEADER' ] )
if token is None :
raise JWTInvalidHeaderError
# Get authentication type
authentication_type = current_app . config [ 'OAUTH2SERVER_JWT_AUTH_HEADER_TYPE' ]
# Check if the type should be checked
if authentication_type is not None : # Get the prefix and the token
prefix , token = token . split ( )
# Check if the type matches
if prefix != authentication_type :
raise JWTInvalidHeaderError
try : # Get the token data
decode = jwt_decode_token ( token )
# Check the integrity of the user
if current_user . get_id ( ) != decode . get ( 'sub' ) :
raise JWTInvalidIssuer
return decode
except _JWTDecodeError as exc :
raise_from ( JWTDecodeError ( ) , exc )
except _JWTExpiredToken as exc :
raise_from ( JWTExpiredToken ( ) , exc )
|
def dorogokupets2015_pth ( v , temp , v0 , gamma0 , gamma_inf , beta , theta01 , m1 , theta02 , m2 , n , z , t_ref = 300. , three_r = 3. * constants . R ) :
"""calculate thermal pressure for Dorogokupets 2015 EOS
: param v : unit - cell volume in A ^ 3
: param temp : temperature in K
: param v0 : unit - cell volume in A ^ 3 at 1 bar
: param gamma0 : Gruneisen parameter at 1 bar
: param gamma _ inf : Gruneisen parameter at infinite pressure
: param beta : volume dependence of Gruneisen parameter
: param theta01 : Debye temperature at 1 bar in K
: param m1 : weighting factor , see Dorogokupets 2015 for detail
: param theta02 : Debye temperature at 1 bar in K
: param m2 : weighting factor , see Dorogokupets 2015 for detail
: param n : number of elements in a chemical formula
: param z : number of formula unit in a unit cell
: param three _ r : 3 times gas constant .
Jamieson modified this value to compensate for mismatches
: param t _ ref : reference temperature , 300 K
: return : thermal pressure in GPa"""
|
# x = v / v0
# a = a0 * np . power ( x , m )
v_mol = vol_uc2mol ( v , z )
gamma = altshuler_grun ( v , v0 , gamma0 , gamma_inf , beta )
theta1 = altshuler_debyetemp ( v , v0 , gamma0 , gamma_inf , beta , theta01 )
theta2 = altshuler_debyetemp ( v , v0 , gamma0 , gamma_inf , beta , theta02 )
if isuncertainties ( [ v , temp , v0 , gamma0 , gamma_inf , beta , theta01 , m1 , theta02 , m2 ] ) :
term_h1 = m1 / ( m1 + m2 ) * three_r * n * gamma / v_mol * ( theta1 / ( unp . exp ( theta1 / temp ) - 1. ) )
term_h2 = m2 / ( m1 + m2 ) * three_r * n * gamma / v_mol * ( theta2 / ( unp . exp ( theta2 / temp ) - 1. ) )
term_h1_ref = m1 / ( m1 + m2 ) * three_r * n * gamma / v_mol * ( theta1 / ( unp . exp ( theta1 / t_ref ) - 1. ) )
term_h2_ref = m2 / ( m1 + m2 ) * three_r * n * gamma / v_mol * ( theta2 / ( unp . exp ( theta2 / t_ref ) - 1. ) )
else :
term_h1 = m1 / ( m1 + m2 ) * three_r * n * gamma / v_mol * ( theta1 / ( np . exp ( theta1 / temp ) - 1. ) )
term_h2 = m2 / ( m1 + m2 ) * three_r * n * gamma / v_mol * ( theta2 / ( np . exp ( theta2 / temp ) - 1. ) )
term_h1_ref = m1 / ( m1 + m2 ) * three_r * n * gamma / v_mol * ( theta1 / ( np . exp ( theta1 / t_ref ) - 1. ) )
term_h2_ref = m2 / ( m1 + m2 ) * three_r * n * gamma / v_mol * ( theta2 / ( np . exp ( theta2 / t_ref ) - 1. ) )
p_th = term_h1 * 1.e-9 + term_h2 * 1.e-9
p_th_ref = term_h1_ref * 1.e-9 + term_h2_ref * 1.e-9
return ( p_th - p_th_ref )
|
def importfile ( filename ) :
"""Imports a module specifically from a file .
: param filename | < str >
: return < module > | | None"""
|
pkg = packageFromPath ( filename , includeModule = True )
root = packageRootPath ( filename )
if root not in sys . path :
sys . path . insert ( 0 , root )
__import__ ( pkg )
return sys . modules [ pkg ]
|
def add_argparser ( self , root , parents ) :
"""Add arguments for this command ."""
|
parents . append ( tools . argparser )
parser = root . add_parser ( 'auth' , parents = parents )
parser . set_defaults ( func = self )
parser . add_argument ( '--secrets' , dest = 'secrets' , action = 'store' , help = 'Path to the authorization secrets file (client_secrets.json).' )
return parser
|
def _parse_ftp_error ( error ) : # type : ( ftplib . Error ) - > Tuple [ Text , Text ]
"""Extract code and message from ftp error ."""
|
code , _ , message = text_type ( error ) . partition ( " " )
return code , message
|
def canonicalize ( method , resource , query_parameters , headers ) :
"""Canonicalize method , resource
: type method : str
: param method : The HTTP verb that will be used when requesting the URL .
Defaults to ` ` ' GET ' ` ` . If method is ` ` ' RESUMABLE ' ` ` then the
signature will additionally contain the ` x - goog - resumable `
header , and the method changed to POST . See the signed URL
docs regarding this flow :
https : / / cloud . google . com / storage / docs / access - control / signed - urls
: type resource : str
: param resource : A pointer to a specific resource
( typically , ` ` / bucket - name / path / to / blob . txt ` ` ) .
: type query _ parameters : dict
: param query _ parameters :
( Optional ) Additional query paramtersto be included as part of the
signed URLs . See :
https : / / cloud . google . com / storage / docs / xml - api / reference - headers # query
: type headers : Union [ dict | List ( Tuple ( str , str ) ) ]
: param headers :
( Optional ) Additional HTTP headers to be included as part of the
signed URLs . See :
https : / / cloud . google . com / storage / docs / xml - api / reference - headers
Requests using the signed URL * must * pass the specified header
( name and value ) with each request for the URL .
: rtype : : class : _ Canonical
: returns : Canonical method , resource , query _ parameters , and headers ."""
|
headers , _ = get_canonical_headers ( headers )
if method == "RESUMABLE" :
method = "POST"
headers . append ( "x-goog-resumable:start" )
if query_parameters is None :
return _Canonical ( method , resource , [ ] , headers )
normalized_qp = sorted ( ( key . lower ( ) , value and value . strip ( ) or "" ) for key , value in query_parameters . items ( ) )
encoded_qp = six . moves . urllib . parse . urlencode ( normalized_qp )
canonical_resource = "{}?{}" . format ( resource , encoded_qp )
return _Canonical ( method , canonical_resource , normalized_qp , headers )
|
def upload_submit ( self , upload_request ) :
"""The method is submitting dataset upload"""
|
path = '/api/1.0/upload/save'
return self . _api_post ( definition . DatasetUploadResponse , path , upload_request )
|
def patch_statusreporter ( ) :
"""Monkey patch robotframework to do postmortem debugging"""
|
from robot . running . statusreporter import StatusReporter
orig_exit = StatusReporter . __exit__
def __exit__ ( self , exc_type , exc_val , exc_tb ) :
if exc_val and isinstance ( exc_val , Exception ) :
set_pdb_trace ( pm = True )
return orig_exit ( self , exc_type , exc_val , exc_tb )
StatusReporter . __exit__ = __exit__
|
def _write_mef ( self , key , extlist , outfile ) :
"""Write out regular multi - extension FITS data ."""
|
channel = self . fv . get_channel ( self . chname )
with fits . open ( outfile , mode = 'update' ) as pf : # Process each modified data extension
for idx in extlist :
k = '{0}[{1}]' . format ( key , self . _format_extname ( idx ) )
image = channel . datasrc [ k ]
# Insert data and header into output HDU
pf [ idx ] . data = image . get_data ( )
self . _write_header ( image , pf [ idx ] )
# Write history to PRIMARY
self . _write_history ( key , pf [ 'PRIMARY' ] )
|
def recvline ( sock ) :
"""Receive a single line from the socket ."""
|
reply = io . BytesIO ( )
while True :
c = sock . recv ( 1 )
if not c :
return None
# socket is closed
if c == b'\n' :
break
reply . write ( c )
result = reply . getvalue ( )
log . debug ( '-> %r' , result )
return result
|
def colorize ( arr , colors , values ) :
"""Colorize a monochromatic array * arr * , based * colors * given for
* values * . Interpolation is used . * values * must be in ascending order ."""
|
hcolors = np . array ( [ rgb2hcl ( * i [ : 3 ] ) for i in colors ] )
# unwrap colormap in hcl space
hcolors [ : , 0 ] = np . rad2deg ( np . unwrap ( np . deg2rad ( np . array ( hcolors ) [ : , 0 ] ) ) )
channels = [ np . interp ( arr , np . array ( values ) , np . array ( hcolors ) [ : , i ] ) for i in range ( 3 ) ]
channels = list ( hcl2rgb ( * channels ) )
rest = [ np . interp ( arr , np . array ( values ) , np . array ( colors ) [ : , i + 3 ] ) for i in range ( np . array ( colors ) . shape [ 1 ] - 3 ) ]
channels . extend ( rest )
try :
return [ np . ma . array ( channel , mask = arr . mask ) for channel in channels ]
except AttributeError :
return channels
|
def destroy ( self ) :
"""Destroy a Partner of given handle .
Before destruction the Partner is stopped , all clients disconnected and
all shared memory blocks released ."""
|
if self . library :
return self . library . Par_Destroy ( ctypes . byref ( self . pointer ) )
|
def delete_service ( self , name ) :
"""Delete a service by name .
@ param name : Service name
@ return : The deleted ApiService object"""
|
return services . delete_service ( self . _get_resource_root ( ) , name , self . name )
|
def fix_size ( self , content ) :
"""Adjusts the width and height of the file switcher
based on the relative size of the parent and content ."""
|
# Update size of dialog based on relative size of the parent
if content :
width , height = self . get_item_size ( content )
# Width
parent = self . parent ( )
relative_width = parent . geometry ( ) . width ( ) * 0.65
if relative_width > self . MAX_WIDTH :
relative_width = self . MAX_WIDTH
self . list . setMinimumWidth ( relative_width )
# Height
if len ( content ) < 15 :
max_entries = len ( content )
else :
max_entries = 15
max_height = height * max_entries * 1.7
self . list . setMinimumHeight ( max_height )
# Resize
self . list . resize ( relative_width , self . list . height ( ) )
|
def pos ( self ) :
"""Renvoie un caractère représentant la catégorie ( part of speech , orationis ) du lemme .
: return : Caractère représentant la catégorie ( part of speech , orationis ) du lemme .
: rtype : str"""
|
if not self . _pos and self . _renvoi :
lr = self . _lemmatiseur . lemme ( self . _renvoi )
if lr :
return lr . pos ( )
return self . _pos
|
def hasAttribute ( self , attrName ) :
'''hasAttribute - Checks for the existance of an attribute . Attribute names are all lowercase .
@ param attrName < str > - The attribute name
@ return < bool > - True or False if attribute exists by that name'''
|
attrName = attrName . lower ( )
# Check if requested attribute is present on this node
return bool ( attrName in self . _attributes )
|
def update_share_image ( liststore , tree_iters , col , large_col , pcs_files , dir_name , icon_size , large_icon_size ) :
'''下载文件缩略图 , 并将它显示到liststore里 .
需要同时更新两列里的图片 , 用不同的缩放尺寸 .
pcs _ files - 里面包含了几个必要的字段 .
dir _ name - 缓存目录 , 下载到的图片会保存这个目录里 .'''
|
def update_image ( filepath , tree_iter ) :
try :
tree_path = liststore . get_path ( tree_iter )
if tree_path is None :
return
pix = GdkPixbuf . Pixbuf . new_from_file ( filepath )
width = pix . get_width ( )
height = pix . get_height ( )
small_pix = pix . scale_simple ( icon_size , height * icon_size // width , GdkPixbuf . InterpType . NEAREST )
liststore [ tree_path ] [ col ] = small_pix
liststore [ tree_path ] [ large_col ] = pix
except GLib . GError :
logger . error ( traceback . format_exc ( ) )
def dump_image ( url , filepath ) :
req = net . urlopen ( url )
if not req or not req . data :
logger . warn ( 'update_share_image:, failed to request %s' % url )
return False
with open ( filepath , 'wb' ) as fh :
fh . write ( req . data )
return True
for tree_iter , pcs_file in zip ( tree_iters , pcs_files ) :
if 'thumbs' not in pcs_file :
continue
elif 'url2' in pcs_file [ 'thumbs' ] :
key = 'url2'
elif 'url1' in pcs_file [ 'thumbs' ] :
key = 'url1'
elif 'url3' in pcs_file [ 'thumbs' ] :
key = 'url3'
else :
continue
fs_id = pcs_file [ 'fs_id' ]
url = pcs_file [ 'thumbs' ] [ key ]
filepath = os . path . join ( dir_name , 'share-{0}.jpg' . format ( fs_id ) )
if os . path . exists ( filepath ) and os . path . getsize ( filepath ) :
GLib . idle_add ( update_image , filepath , tree_iter )
elif not url or len ( url ) < 10 :
logger . warn ( 'update_share_image: failed to get url %s' % url )
else :
status = dump_image ( url , filepath )
if status :
GLib . idle_add ( update_image , filepath , tree_iter )
|
def authenticationRequest ( ) :
"""AUTHENTICATION REQUEST Section 9.2.2"""
|
a = TpPd ( pd = 0x5 )
b = MessageType ( mesType = 0x12 )
# 00010010
c = CiphKeySeqNrAndSpareHalfOctets ( )
d = AuthenticationParameterRAND ( )
packet = a / b / c / d
return packet
|
def tryAccessModifiers ( self , block ) :
"""Check for private , protected , public , signals etc . . . and assume we are in a
class definition . Try to find a previous private / protected / private . . . or
class and return its indentation or null if not found ."""
|
if CFG_ACCESS_MODIFIERS < 0 :
return None
if not re . match ( r'^\s*((public|protected|private)\s*(slots|Q_SLOTS)?|(signals|Q_SIGNALS)\s*):\s*$' , block . text ( ) ) :
return None
try :
block , notUsedColumn = self . findBracketBackward ( block , 0 , '{' )
except ValueError :
return None
indentation = self . _blockIndent ( block )
for i in range ( CFG_ACCESS_MODIFIERS ) :
indentation = self . _increaseIndent ( indentation )
dbg ( "tryAccessModifiers: success in line %d" % block . blockNumber ( ) )
return indentation
|
def lazy ( func ) :
"""Decorator , which can be used for lazy imports
@ lazy
def yaml ( ) :
import yaml
return yaml"""
|
try :
frame = sys . _getframe ( 1 )
except Exception :
_locals = None
else :
_locals = frame . f_locals
func_name = func . func_name if six . PY2 else func . __name__
return LazyStub ( func_name , func , _locals )
|
def get_current_shutit_pexpect_session_environment ( self , note = None ) :
"""Returns the current environment from the currently - set default
pexpect child ."""
|
self . handle_note ( note )
current_session = self . get_current_shutit_pexpect_session ( )
if current_session is not None :
res = current_session . current_environment
else :
res = None
self . handle_note_after ( note )
return res
|
def isiterable ( element , exclude = None ) :
"""Check whatever or not if input element is an iterable .
: param element : element to check among iterable types .
: param type / tuple exclude : not allowed types in the test .
: Example :
> > > isiterable ( { } )
True
> > > isiterable ( { } , exclude = dict )
False
> > > isiterable ( { } , exclude = ( dict , ) )
False"""
|
# check for allowed type
allowed = exclude is None or not isinstance ( element , exclude )
result = allowed and isinstance ( element , Iterable )
return result
|
def get_current_branch ( self ) -> str :
""": return : current branch
: rtype : str"""
|
current_branch : str = self . repo . active_branch . name
LOGGER . debug ( 'current branch: %s' , current_branch )
return current_branch
|
def _update_header ( orig_vcf , base_file , new_lines , chrom_process_fn = None ) :
"""Fix header with additional lines and remapping of generic sample names ."""
|
new_header = "%s-sample_header.txt" % utils . splitext_plus ( base_file ) [ 0 ]
with open ( new_header , "w" ) as out_handle :
chrom_line = None
with utils . open_gzipsafe ( orig_vcf ) as in_handle :
for line in in_handle :
if line . startswith ( "##" ) :
out_handle . write ( line )
else :
chrom_line = line
break
assert chrom_line is not None
for line in new_lines :
out_handle . write ( line + "\n" )
if chrom_process_fn :
chrom_line = chrom_process_fn ( chrom_line )
out_handle . write ( chrom_line )
return new_header
|
def configure ( self , settings_module = None , ** kwargs ) :
"""Allows user to reconfigure settings object passing a new settings
module or separated kwargs
: param settings _ module : defines the setttings file
: param kwargs : override default settings"""
|
default_settings . reload ( )
environment_var = self . _kwargs . get ( "ENVVAR_FOR_DYNACONF" , default_settings . ENVVAR_FOR_DYNACONF )
settings_module = settings_module or os . environ . get ( environment_var )
compat_kwargs ( kwargs )
kwargs . update ( self . _kwargs )
self . _wrapped = Settings ( settings_module = settings_module , ** kwargs )
self . logger . debug ( "Lazy Settings configured ..." )
|
def iterate ( self , image , feature_extractor , feature_vector ) :
"""iterate ( image , feature _ extractor , feature _ vector ) - > bounding _ box
Scales the given image , and extracts features from all possible bounding boxes .
For each of the sampled bounding boxes , this function fills the given pre - allocated feature vector and yields the current bounding box .
* * Parameters : * *
` ` image ` ` : array _ like ( 2D )
The given image to extract features for
` ` feature _ extractor ` ` : : py : class : ` FeatureExtractor `
The feature extractor to use to extract the features for the sampled patches
` ` feature _ vector ` ` : : py : class : ` numpy . ndarray ` ( 1D , uint16)
The pre - allocated feature vector that will be filled inside this function ; needs to be of size : py : attr : ` FeatureExtractor . number _ of _ features `
* * Yields : * *
` ` bounding _ box ` ` : : py : class : ` BoundingBox `
The bounding box for which the current features are extracted for"""
|
for scale , scaled_image_shape in self . scales ( image ) : # prepare the feature extractor to extract features from the given image
feature_extractor . prepare ( image , scale )
for bb in self . sample_scaled ( scaled_image_shape ) : # extract features for
feature_extractor . extract_indexed ( bb , feature_vector )
yield bb . scale ( 1. / scale )
|
def get_or_set_score ( self , member , default = 0 ) :
"""If * member * is in the collection , return its value . If not , store it
with a score of * default * and return * default * . * default * defaults to"""
|
default = float ( default )
def get_or_set_score_trans ( pipe ) :
pickled_member = self . _pickle ( member )
score = pipe . zscore ( self . key , pickled_member )
if score is None :
pipe . zadd ( self . key , { self . _pickle ( member ) : default } )
return default
return score
return self . _transaction ( get_or_set_score_trans )
|
def Get ( self , path , follow_symlink = True ) :
"""Stats given file or returns a cached result if available .
Args :
path : A path to the file to perform ` stat ` on .
follow _ symlink : True if ` stat ` of a symlink should be returned instead of
a file that it points to . For non - symlinks this setting has no effect .
Returns :
` Stat ` object corresponding to the given path ."""
|
key = self . _Key ( path = path , follow_symlink = follow_symlink )
try :
return self . _cache [ key ]
except KeyError :
value = Stat . FromPath ( path , follow_symlink = follow_symlink )
self . _cache [ key ] = value
# If we are not following symlinks and the file is a not symlink then
# the stat result for this file stays the same even if we want to follow
# symlinks .
if not follow_symlink and not value . IsSymlink ( ) :
self . _cache [ self . _Key ( path = path , follow_symlink = True ) ] = value
return value
|
def dre_dtau ( self , pars ) :
r""": math : Add formula"""
|
self . _set_parameters ( pars )
# term 1
num1 = self . c * self . w * self . otc1 * np . cos ( self . ang )
term1 = num1 / self . denom
# term 2
num2a = self . otc * np . cos ( self . ang )
num2b = 1 + num2a
denom2 = self . denom ** 2
term2 = num2b / denom2
# term 3
term3 = 2 * self . c * self . w * self . otc1 * np . cos ( self . ang ) + self . otc2
result = self . sigmai * self . m * ( term1 + term2 * term3 )
return result
|
def _indirect_jump_resolved ( self , jump , jump_addr , resolved_by , targets ) :
"""Called when an indirect jump is successfully resolved .
: param IndirectJump jump : The resolved indirect jump .
: param IndirectJumpResolver resolved _ by : The resolver used to resolve this indirect jump .
: param list targets : List of indirect jump targets .
: return : None"""
|
from . indirect_jump_resolvers . jumptable import JumpTableResolver
source_addr = jump . addr
if isinstance ( resolved_by , JumpTableResolver ) : # Fill in the jump _ tables dict
self . jump_tables [ jump . addr ] = jump
jump . resolved_targets = targets
all_targets = set ( targets )
for addr in all_targets :
to_outside = addr in self . functions or not self . _addrs_belong_to_same_section ( jump . addr , addr )
# TODO : get a better estimate of the function address
target_func_addr = jump . func_addr if not to_outside else addr
func_edge = FunctionTransitionEdge ( self . _nodes [ source_addr ] , addr , jump . func_addr , to_outside = to_outside , dst_func_addr = target_func_addr )
job = CFGJob ( addr , target_func_addr , jump . jumpkind , last_addr = source_addr , src_node = self . _nodes [ source_addr ] , src_ins_addr = None , src_stmt_idx = None , func_edges = [ func_edge ] , )
self . _insert_job ( job )
self . _register_analysis_job ( target_func_addr , job )
self . _deregister_analysis_job ( jump . func_addr , jump )
CFGBase . _indirect_jump_resolved ( self , jump , jump . addr , resolved_by , targets )
|
def binary_tlv_to_python ( binary_string , result = None ) :
"""Recursively decode a binary string and store output in result object
: param binary _ string : a bytearray object of tlv data
: param result : result store for recursion
: return :"""
|
result = { } if result is None else result
if not binary_string :
return result
byte = binary_string [ 0 ]
kind = byte & type_mask
id_length = get_id_length ( byte )
payload_length = get_value_length ( byte )
# start after the type indicator
offset = 1
item_id = str ( combine_bytes ( binary_string [ offset : offset + id_length ] ) )
offset += id_length
# get length of payload from specifier
value_length = payload_length
if byte & length_type_mask != LengthTypes . SET_BYTE :
value_length = combine_bytes ( binary_string [ offset : offset + payload_length ] )
offset += payload_length
if kind == Types . MULTI :
binary_tlv_to_python ( binary_string [ offset : offset + value_length ] , result . setdefault ( item_id , { } ) )
else :
value_binary = binary_string [ offset : offset + value_length ]
result [ item_id ] = ( combine_bytes ( value_binary ) if not all ( value_binary ) else value_binary . decode ( 'utf8' ) )
offset += value_length
binary_tlv_to_python ( binary_string [ offset : ] , result )
return result
|
def vinet_k ( p , v0 , k0 , k0p , numerical = False ) :
"""calculate bulk modulus , wrapper for cal _ k _ vinet
cannot handle uncertainties
: param p : pressure in GPa
: param v0 : unit - cell volume in A ^ 3 at 1 bar
: param k0 : bulk modulus at reference conditions
: param k0p : pressure derivative of bulk modulus at reference conditions
: return : bulk modulus at high pressure in GPa"""
|
f_u = uct . wrap ( cal_k_vinet )
return f_u ( p , [ v0 , k0 , k0p ] )
|
def get_xyz ( self , list_of_names = None ) :
"""Get xyz coordinates for these electrodes
Parameters
list _ of _ names : list of str
list of electrode names to use
Returns
list of tuples of 3 floats ( x , y , z )
list of xyz coordinates for all the electrodes
TODO
coordinate system of electrodes"""
|
if list_of_names is not None :
filter_lambda = lambda x : x [ 'name' ] in list_of_names
else :
filter_lambda = None
return self . electrodes . get ( filter_lambda = filter_lambda , map_lambda = lambda e : ( float ( e [ 'x' ] ) , float ( e [ 'y' ] ) , float ( e [ 'z' ] ) ) )
|
def wheel ( self , load ) :
'''Send a master control function back to the wheel system'''
|
# All wheel ops pass through eauth
auth_type , err_name , key = self . _prep_auth_info ( load )
# Authenticate
auth_check = self . loadauth . check_authentication ( load , auth_type , key = key , show_username = True )
error = auth_check . get ( 'error' )
if error : # Authentication error occurred : do not continue .
return { 'error' : error }
# Authorize
username = auth_check . get ( 'username' )
if auth_type != 'user' :
wheel_check = self . ckminions . wheel_check ( auth_check . get ( 'auth_list' , [ ] ) , load [ 'fun' ] , load [ 'kwarg' ] )
if not wheel_check :
return { 'error' : { 'name' : err_name , 'message' : 'Authentication failure of type "{0}" occurred for ' 'user {1}.' . format ( auth_type , username ) } }
elif isinstance ( wheel_check , dict ) and 'error' in wheel_check : # A dictionary with an error name / message was handled by ckminions . wheel _ check
return wheel_check
# Authenticated . Do the job .
jid = salt . utils . jid . gen_jid ( self . opts )
fun = load . pop ( 'fun' )
tag = salt . utils . event . tagify ( jid , prefix = 'wheel' )
data = { 'fun' : "wheel.{0}" . format ( fun ) , 'jid' : jid , 'tag' : tag , 'user' : username }
try :
self . event . fire_event ( data , salt . utils . event . tagify ( [ jid , 'new' ] , 'wheel' ) )
ret = self . wheel_ . call_func ( fun , ** load )
data [ 'return' ] = ret
data [ 'success' ] = True
self . event . fire_event ( data , salt . utils . event . tagify ( [ jid , 'ret' ] , 'wheel' ) )
return { 'tag' : tag , 'data' : data }
except Exception as exc :
log . exception ( 'Exception occurred while introspecting %s' , fun )
data [ 'return' ] = 'Exception occurred in wheel {0}: {1}: {2}' . format ( fun , exc . __class__ . __name__ , exc , )
data [ 'success' ] = False
self . event . fire_event ( data , salt . utils . event . tagify ( [ jid , 'ret' ] , 'wheel' ) )
return { 'tag' : tag , 'data' : data }
|
def get ( self , section , key ) :
"""Return the ' value ' of all lines matching the section / key .
Yields :
values for matching lines ."""
|
line = self . _make_line ( key )
for line in self . get_line ( section , line ) :
yield line . value
|
def log_task ( task , logger = logging , level = 'info' , propagate_fail = True , uuid = None ) :
"""Parameterized decorator to wrap a function in a log task
Example :
> > > @ log _ task ( ' mytask ' )
. . . def do _ something ( ) :
. . . pass"""
|
def decorator ( func ) :
@ wraps ( func )
def wrapper ( * args , ** kwargs ) :
with LogTask ( task , logger = logger , level = level , propagate_fail = propagate_fail , uuid = uuid ) :
return func ( * args , ** kwargs )
return wrapper
return decorator
|
def pcolormesh ( self , * args , ** kwargs ) :
"""Create a pseudocolor plot of a 2 - D array .
If a 3D or higher Data object is passed , a lower dimensional
channel can be plotted , provided the ` ` squeeze ` ` of the channel
has ` ` ndim = = 2 ` ` and the first two axes do not span dimensions
other than those spanned by that channel .
Uses pcolor _ helper to ensure that color boundaries are drawn
bisecting point positions , when possible .
Quicker than pcolor
Parameters
data : 2D WrightTools . data . Data object
Data to plot .
channel : int or string ( optional )
Channel index or name . Default is 0.
dynamic _ range : boolean ( optional )
Force plotting of all contours , overloading for major extent . Only applies to signed
data . Default is False .
autolabel : { ' none ' , ' both ' , ' x ' , ' y ' } ( optional )
Parameterize application of labels directly from data object . Default is none .
xlabel : string ( optional )
xlabel . Default is None .
ylabel : string ( optional )
ylabel . Default is None .
* * kwargs
matplotlib . axes . Axes . pcolormesh _ _ optional keyword arguments .
_ _ https : / / matplotlib . org / api / _ as _ gen / matplotlib . pyplot . pcolormesh . html
Returns
matplotlib . collections . QuadMesh"""
|
args , kwargs = self . _parse_plot_args ( * args , ** kwargs , plot_type = "pcolormesh" )
return super ( ) . pcolormesh ( * args , ** kwargs )
|
def _get_filter ( sdk_filter , attr_map ) :
"""Common functionality for filter structures
: param sdk _ filter : { field : constraint , field : { operator : constraint } , . . . }
: return : { field _ _ operator : constraint , . . . }"""
|
if not isinstance ( sdk_filter , dict ) :
raise CloudValueError ( 'filter value must be a dictionary, was %r' % ( sdk_filter , ) )
custom = sdk_filter . pop ( 'custom_attributes' , { } )
new_filter = _normalise_key_values ( filter_obj = sdk_filter , attr_map = attr_map )
new_filter . update ( { 'custom_attributes__%s' % k : v for k , v in _normalise_key_values ( filter_obj = custom ) . items ( ) } )
return new_filter
|
def api_notifications ( ) :
"""Receive MTurk REST notifications ."""
|
event_type = request . values [ 'Event.1.EventType' ]
assignment_id = request . values [ 'Event.1.AssignmentId' ]
# Add the notification to the queue .
db . logger . debug ( 'rq: Queueing %s with id: %s for worker_function' , event_type , assignment_id )
q . enqueue ( worker_function , event_type , assignment_id , None )
db . logger . debug ( 'rq: Submitted Queue Length: %d (%s)' , len ( q ) , ', ' . join ( q . job_ids ) )
return success_response ( request_type = "notification" )
|
def make_and_return_path_from_path_and_folder_names ( path , folder_names ) :
"""For a given path , create a directory structure composed of a set of folders and return the path to the inner - most folder .
For example , if path = ' / path / to / folders ' , and folder _ names = [ ' folder1 ' , ' folder2 ' ] , the directory created will be
' / path / to / folders / folder1 / folder2 / ' and the returned path will be ' / path / to / folders / folder1 / folder2 / ' .
If the folders already exist , routine continues as normal .
Parameters
path : str
The path where the directories are created .
folder _ names : [ str ]
The names of the folders which are created in the path directory .
Returns
path
A string specifying the path to the inner - most folder created .
Examples
path = ' / path / to / folders '
path = make _ and _ return _ path ( path = path , folder _ names = [ ' folder1 ' , ' folder2 ' ] ."""
|
for folder_name in folder_names :
path += folder_name + '/'
try :
os . makedirs ( path )
except FileExistsError :
pass
return path
|
def parse_text ( self , text , ** kwargs ) :
"""Parses given text with VISLCG3 based syntactic analyzer .
As a result of parsing , the input Text object will obtain a new
layer named LAYER _ VISLCG3 , which contains a list of dicts .
Each dicts corresponds to analysis of a single word token , and
has the following attributes ( at minimum ) :
' start ' - - start index of the word in Text ;
' end ' - - end index of the word in Text ;
' sent _ id ' - - index of the sentence in Text , starting from 0;
' parser _ out ' - - list of analyses from the output of the
syntactic parser ;
In the list of analyses , each item has the following structure :
[ syntactic _ label , index _ of _ the _ head ]
* ) syntactic _ label :
surface syntactic label of the word , e . g . ' @ SUBJ ' ,
' @ OBJ ' , ' @ ADVL ' ;
* ) index _ of _ the _ head :
index of the head ( in the sentence ) ;
-1 if the current token is root ;
Parameters
text : estnltk . text . Text
The input text that should be analysed for dependency relations ;
apply _ tag _ analysis : bool
Specifies whether , in case of a missing morphological ANALYSIS
layer , the text is morphologically analysed and disambiguated
via the method * text . tag _ analysis * before proceeding with
the syntactic analysis .
Note that the syntactic analyser does its own morphological
disambiguation , but results of that disambiguation do not reach
back to the Text object , so the Text object will contain a layer
of ambiguous morphological analyses at the end of the parsing
step ;
You can use * apply _ tag _ analysis = True * to ensure that at the
end of the parsing step , the input Text is both morphologically
analysed and disambiguated ;
Default : False
return _ type : string
If return _ type = = " text " ( Default ) ,
returns the input Text object ;
If return _ type = = " vislcg3 " ,
returns VISLCG3 ' s output : a list of strings , each element in
the list corresponding to a line from VISLCG3 ' s output ;
If return _ type = = " trees " ,
returns all syntactic trees of the text as a list of
EstNLTK ' s Tree objects ( estnltk . syntax . utils . Tree ) ;
If return _ type = = " dep _ graphs " ,
returns all syntactic trees of the text as a list of NLTK ' s
DependencyGraph objects
( nltk . parse . dependencygraph . DependencyGraph ) ;
Regardless the return type , the layer containing dependency syntactic
information ( LAYER _ VISLCG3 ) will be attached to the text object ;
augment _ words : bool
Specifies whether words in the input Text are to be augmented with
the syntactic information ( SYNTAX _ LABEL , SYNTAX _ HEAD and DEPREL ) ;
( ! ) This functionality is added to achieve a compatibility with the
old way syntactic processing , but it will be likely deprecated in
the future .
Default : False
Other arguments are the arguments that can be passed to methods :
vislcg3 _ syntax . process _ lines ( ) ,
vislcg3 _ syntax . align _ cg3 _ with _ Text ( ) ,
normalise _ alignments ( )
keep _ old : bool
Optional argument specifying whether the old analysis lines
should be preserved after overwriting ' parser _ out ' with new analysis
lines ;
If True , each dict will be augmented with key ' init _ parser _ out '
which contains the initial / old analysis lines ;
Default : False"""
|
# a ) get the configuration :
apply_tag_analysis = False
augment_words = False
all_return_types = [ "text" , "vislcg3" , "trees" , "dep_graphs" ]
return_type = all_return_types [ 0 ]
for argName , argVal in kwargs . items ( ) :
if argName . lower ( ) == 'return_type' :
if argVal . lower ( ) in all_return_types :
return_type = argVal . lower ( )
else :
raise Exception ( ' Unexpected return type: ' , argVal )
elif argName . lower ( ) == 'augment_words' :
augment_words = bool ( argVal )
elif argName . lower ( ) == 'apply_tag_analysis' :
apply_tag_analysis = bool ( argVal )
kwargs [ 'split_result' ] = True
kwargs [ 'clean_up' ] = True
kwargs [ 'remove_clo' ] = kwargs . get ( 'remove_clo' , True )
kwargs [ 'remove_cap' ] = kwargs . get ( 'remove_cap' , True )
kwargs [ 'keep_old' ] = kwargs . get ( 'keep_old' , False )
kwargs [ 'double_quotes' ] = 'unesc'
# b ) process :
if apply_tag_analysis :
text = text . tag_analysis ( )
result_lines1 = self . preprocessor . process_Text ( text , ** kwargs )
result_lines2 = self . vislcg3_processor . process_lines ( result_lines1 , ** kwargs )
alignments = align_cg3_with_Text ( result_lines2 , text , ** kwargs )
alignments = normalise_alignments ( alignments , data_type = VISLCG3_DATA , ** kwargs )
# c ) attach & return results
text [ LAYER_VISLCG3 ] = alignments
if augment_words :
self . _augment_text_w_syntactic_info ( text , text [ LAYER_VISLCG3 ] )
if return_type == "vislcg3" :
return result_lines2
elif return_type == "trees" :
return build_trees_from_text ( text , layer = LAYER_VISLCG3 , ** kwargs )
elif return_type == "dep_graphs" :
trees = build_trees_from_text ( text , layer = LAYER_VISLCG3 , ** kwargs )
graphs = [ tree . as_dependencygraph ( ) for tree in trees ]
return graphs
else :
return text
|
def find_tool ( name , additional_paths = [ ] , path_last = False ) :
"""Attempts to find tool ( binary ) named ' name ' in PATH and in
' additional - paths ' . If found in path , returns ' name ' . If
found in additional paths , returns full name . If the tool
is found in several directories , returns the first path found .
Otherwise , returns the empty string . If ' path _ last ' is specified ,
path is checked after ' additional _ paths ' ."""
|
assert isinstance ( name , basestring )
assert is_iterable_typed ( additional_paths , basestring )
assert isinstance ( path_last , ( int , bool ) )
programs = path . programs_path ( )
match = path . glob ( programs , [ name , name + '.exe' ] )
additional_match = path . glob ( additional_paths , [ name , name + '.exe' ] )
result = [ ]
if path_last :
result = additional_match
if not result and match :
result = match
else :
if match :
result = match
elif additional_match :
result = additional_match
if result :
return path . native ( result [ 0 ] )
else :
return ''
|
def highlightCharacters ( self , widgetObj , setPos , colorCode , fontWeight , charFormat = None ) :
"""Change the character format of one or more characters .
If ` ` charFormat ` ` is * * None * * then only the color and font
weight of the characters are changed to ` ` colorCode ` ` and
` ` fontWeight ` ` , respectively .
| Args |
* ` ` widgetObj ` ` ( * * QWidget * * ) : the ` ` QTextEdit ` ` holding
the characters .
* ` ` setPos ` ` ( * * tuple * * of * * int * * ) : character positions
inside the widget .
* ` ` colorCode ` ` ( * * QColor * * ) : eg . ` ` QtCore . Qt . blue ` `
* ` ` fontWeight ` ` ( * * int * * ) : font weight .
* ` ` charFormat ` ` ( * * QTextCharFormat * * ) : the character
format to apply ( see Qt documentation for details . )
| Returns |
* * * list * * : the original character format of the replaced
characters . This list has the same length as ` ` setPos ` ` .
| Raises |
* * * None * *"""
|
# Get the text cursor and character format .
textCursor = widgetObj . textCursor ( )
oldPos = textCursor . position ( )
retVal = [ ]
# Change the character formats of all the characters placed at
# the positions ` ` setPos ` ` .
for ii , pos in enumerate ( setPos ) : # Extract the position of the character to modify .
pos = setPos [ ii ]
# Ignore invalid positions . This can happen if the second
# character does not exist and the find - functions in the
# ` ` cursorPositionChangedEvent ` ` method returned
# ' - 1 ' . Also , store * * None * * as the format for this
# non - existent character .
if pos < 0 :
retVal . append ( None )
continue
# Move the text cursor to the specified character position
# and store its original character format ( necessary to
# " undo " the highlighting once the cursor was moved away
# again ) .
textCursor . setPosition ( pos )
retVal . append ( textCursor . charFormat ( ) )
# Change the character format . Either use the supplied
# one , or use a generic one .
if charFormat : # Use a specific character format ( usually used to
# undo the changes a previous call to
# ' highlightCharacters ' has made ) .
fmt = charFormat [ ii ]
else : # Modify the color and weight of the current character format .
fmt = textCursor . charFormat ( )
# Get the brush and specify its foreground color and
# style . In order to see the characters it is
# necessary to explicitly specify a solidPattern style
# but I have no idea why .
myBrush = fmt . foreground ( )
myBrush . setColor ( colorCode )
myBrush . setStyle ( QtCore . Qt . SolidPattern )
fmt . setForeground ( myBrush )
fmt . setFontWeight ( fontWeight )
# Select the character and apply the selected format .
textCursor . movePosition ( QtGui . QTextCursor . NextCharacter , QtGui . QTextCursor . KeepAnchor )
textCursor . setCharFormat ( fmt )
# Apply the textcursor to the current element .
textCursor . setPosition ( oldPos )
widgetObj . setTextCursor ( textCursor )
return retVal
|
def clear_text ( self , label ) :
"""stub"""
|
if label not in self . my_osid_object_form . _my_map [ 'texts' ] :
raise NotFound ( )
del self . my_osid_object_form . _my_map [ 'texts' ] [ label ]
|
def _is_2D_matrix ( matrix ) :
"""Checks to see if a ndarray is 2D or a list of lists is 2D"""
|
return ( ( isinstance ( matrix [ 0 ] , list ) and _rectangular ( matrix ) and not isinstance ( matrix [ 0 ] [ 0 ] , list ) ) or ( not isinstance ( matrix , list ) and matrix . shape == 2 ) )
|
def dataframe ( self ) :
"""Returns a pandas DataFrame containing all other class properties and
values . The index for the DataFrame is the string abbreviation of the
team , such as ' PURDUE ' ."""
|
fields_to_include = { 'abbreviation' : self . abbreviation , 'assist_percentage' : self . assist_percentage , 'assists' : self . assists , 'away_losses' : self . away_losses , 'away_wins' : self . away_wins , 'block_percentage' : self . block_percentage , 'blocks' : self . blocks , 'conference' : self . conference , 'conference_losses' : self . conference_losses , 'conference_wins' : self . conference_wins , 'defensive_rebounds' : self . defensive_rebounds , 'effective_field_goal_percentage' : self . effective_field_goal_percentage , 'field_goal_attempts' : self . field_goal_attempts , 'field_goal_percentage' : self . field_goal_percentage , 'field_goals' : self . field_goals , 'free_throw_attempt_rate' : self . free_throw_attempt_rate , 'free_throw_attempts' : self . free_throw_attempts , 'free_throw_percentage' : self . free_throw_percentage , 'free_throws' : self . free_throws , 'free_throws_per_field_goal_attempt' : self . free_throws_per_field_goal_attempt , 'games_played' : self . games_played , 'home_losses' : self . home_losses , 'home_wins' : self . home_wins , 'losses' : self . losses , 'minutes_played' : self . minutes_played , 'name' : self . name , 'net_rating' : self . net_rating , 'offensive_rating' : self . offensive_rating , 'offensive_rebound_percentage' : self . offensive_rebound_percentage , 'offensive_rebounds' : self . offensive_rebounds , 'opp_assist_percentage' : self . opp_assist_percentage , 'opp_assists' : self . opp_assists , 'opp_block_percentage' : self . opp_block_percentage , 'opp_blocks' : self . opp_blocks , 'opp_defensive_rebounds' : self . opp_defensive_rebounds , 'opp_effective_field_goal_percentage' : self . opp_effective_field_goal_percentage , 'opp_field_goal_attempts' : self . opp_field_goal_attempts , 'opp_field_goal_percentage' : self . opp_field_goal_percentage , 'opp_field_goals' : self . opp_field_goals , 'opp_free_throw_attempt_rate' : self . opp_free_throw_attempt_rate , 'opp_free_throw_attempts' : self . opp_free_throw_attempts , 'opp_free_throw_percentage' : self . opp_free_throw_percentage , 'opp_free_throws' : self . opp_free_throws , 'opp_free_throws_per_field_goal_attempt' : self . opp_free_throws_per_field_goal_attempt , 'opp_offensive_rating' : self . opp_offensive_rating , 'opp_offensive_rebound_percentage' : self . opp_offensive_rebound_percentage , 'opp_offensive_rebounds' : self . opp_offensive_rebounds , 'opp_personal_fouls' : self . opp_personal_fouls , 'opp_points' : self . opp_points , 'opp_steal_percentage' : self . opp_steal_percentage , 'opp_steals' : self . opp_steals , 'opp_three_point_attempt_rate' : self . opp_three_point_attempt_rate , 'opp_three_point_field_goal_attempts' : self . opp_three_point_field_goal_attempts , 'opp_three_point_field_goal_percentage' : self . opp_three_point_field_goal_percentage , 'opp_three_point_field_goals' : self . opp_three_point_field_goals , 'opp_two_point_field_goal_attempts' : self . opp_two_point_field_goal_attempts , 'opp_two_point_field_goal_percentage' : self . opp_two_point_field_goal_percentage , 'opp_two_point_field_goals' : self . opp_two_point_field_goals , 'opp_total_rebound_percentage' : self . opp_total_rebound_percentage , 'opp_total_rebounds' : self . opp_total_rebounds , 'opp_true_shooting_percentage' : self . opp_true_shooting_percentage , 'opp_turnover_percentage' : self . opp_turnover_percentage , 'opp_turnovers' : self . opp_turnovers , 'pace' : self . pace , 'personal_fouls' : self . personal_fouls , 'points' : self . points , 'simple_rating_system' : self . simple_rating_system , 'steal_percentage' : self . steal_percentage , 'steals' : self . steals , 'strength_of_schedule' : self . strength_of_schedule , 'three_point_attempt_rate' : self . three_point_attempt_rate , 'three_point_field_goal_attempts' : self . three_point_field_goal_attempts , 'three_point_field_goal_percentage' : self . three_point_field_goal_percentage , 'three_point_field_goals' : self . three_point_field_goals , 'two_point_field_goal_attempts' : self . two_point_field_goal_attempts , 'two_point_field_goal_percentage' : self . two_point_field_goal_percentage , 'two_point_field_goals' : self . two_point_field_goals , 'total_rebound_percentage' : self . total_rebound_percentage , 'total_rebounds' : self . total_rebounds , 'true_shooting_percentage' : self . true_shooting_percentage , 'turnover_percentage' : self . turnover_percentage , 'turnovers' : self . turnovers , 'win_percentage' : self . win_percentage , 'wins' : self . wins }
return pd . DataFrame ( [ fields_to_include ] , index = [ self . _abbreviation ] )
|
def get_extents ( self ) :
"""measure the extents of the sprite ' s graphics ."""
|
if self . _sprite_dirty : # redrawing merely because we need fresh extents of the sprite
context = cairo . Context ( cairo . ImageSurface ( cairo . FORMAT_A1 , 0 , 0 ) )
context . transform ( self . get_matrix ( ) )
self . emit ( "on-render" )
self . __dict__ [ "_sprite_dirty" ] = False
self . graphics . _draw ( context , 1 )
if not self . graphics . paths :
self . graphics . _draw ( cairo . Context ( cairo . ImageSurface ( cairo . FORMAT_A1 , 0 , 0 ) ) , 1 )
if not self . graphics . paths :
return None
context = cairo . Context ( cairo . ImageSurface ( cairo . FORMAT_A1 , 0 , 0 ) )
# bit of a hack around the problem - looking for clip instructions in parent
# so extents would not get out of it
clip_extents = None
for parent in self . get_parents ( ) :
context . transform ( parent . get_local_matrix ( ) )
if parent . graphics . paths :
clip_regions = [ ]
for instruction , type , path in parent . graphics . paths :
if instruction == "clip" :
context . append_path ( path )
context . save ( )
context . identity_matrix ( )
clip_regions . append ( context . fill_extents ( ) )
context . restore ( )
context . new_path ( )
elif instruction == "restore" and clip_regions :
clip_regions . pop ( )
for ext in clip_regions :
ext = get_gdk_rectangle ( int ( ext [ 0 ] ) , int ( ext [ 1 ] ) , int ( ext [ 2 ] - ext [ 0 ] ) , int ( ext [ 3 ] - ext [ 1 ] ) )
intersect , clip_extents = gdk . rectangle_intersect ( ( clip_extents or ext ) , ext )
context . transform ( self . get_local_matrix ( ) )
for instruction , type , path in self . graphics . paths :
if type == "path" :
context . append_path ( path )
else :
getattr ( context , instruction ) ( * path )
context . identity_matrix ( )
ext = context . path_extents ( )
ext = get_gdk_rectangle ( int ( ext [ 0 ] ) , int ( ext [ 1 ] ) , int ( ext [ 2 ] - ext [ 0 ] ) , int ( ext [ 3 ] - ext [ 1 ] ) )
if clip_extents :
intersect , ext = gdk . rectangle_intersect ( clip_extents , ext )
if not ext . width and not ext . height :
ext = None
self . __dict__ [ '_stroke_context' ] = context
return ext
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.