signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def stopService ( self ) :
"""Stop calling persistent timed events .""" | super ( _SiteScheduler , self ) . stopService ( )
if self . timer is not None :
self . timer . cancel ( )
self . timer = None |
def n1qlQueryEx ( self , cls , * args , ** kwargs ) :
"""Execute a N1QL statement providing a custom handler for rows .
This method allows you to define your own subclass ( of
: class : ` ~ AsyncN1QLRequest ` ) which can handle rows as they are
received from the network .
: param cls : The subclass ( not instance ) to use
: param args : Positional arguments for the class constructor
: param kwargs : Keyword arguments for the class constructor
. . seealso : : : meth : ` queryEx ` , around which this method wraps""" | kwargs [ 'itercls' ] = cls
o = super ( AsyncBucket , self ) . n1ql_query ( * args , ** kwargs )
if not self . connected :
self . connect ( ) . addCallback ( lambda x : o . start ( ) )
else :
o . start ( )
return o |
def pull ( self , key , default = None ) :
"""Pulls an item from the collection .
: param key : The key
: type key : mixed
: param default : The default value
: type default : mixed
: rtype : mixed""" | val = self . get ( key , default )
self . forget ( key )
return val |
async def wait ( self , need_pts = False ) -> dict :
"""Send long poll request
: param need _ pts : need return the pts field""" | if not self . base_url :
await self . _get_long_poll_server ( need_pts )
params = { 'ts' : self . ts , 'key' : self . key , }
params . update ( self . base_params )
# invalid mimetype from server
code , response = await self . api . _session . driver . get_text ( self . base_url , params , timeout = 2 * self . base_params [ 'wait' ] )
if code == 403 :
raise VkLongPollError ( 403 , 'smth weth wrong' , self . base_url + '/' , params )
response = json . loads ( response )
failed = response . get ( 'failed' )
if not failed :
self . ts = response [ 'ts' ]
return response
if failed == 1 :
self . ts = response [ 'ts' ]
elif failed == 4 :
raise VkLongPollError ( 4 , 'An invalid version number was passed in the version parameter' , self . base_url + '/' , params )
else :
self . base_url = None
return await self . wait ( ) |
def split_ls ( func ) :
"""Decorator to split files into manageable chunks as not to exceed the windows cmd limit
: param func : Function to call for each chunk
: type func : : py : class : Function""" | @ wraps ( func )
def wrapper ( self , files , silent = True , exclude_deleted = False ) :
if not isinstance ( files , ( tuple , list ) ) :
files = [ files ]
counter = 0
index = 0
results = [ ]
while files :
if index >= len ( files ) :
results += func ( self , files , silent , exclude_deleted )
break
length = len ( str ( files [ index ] ) )
if length + counter > CHAR_LIMIT : # - - at our limit
runfiles = files [ : index ]
files = files [ index : ]
counter = 0
index = 0
results += func ( self , runfiles , silent , exclude_deleted )
runfiles = None
del runfiles
else :
index += 1
counter += length
return results
return wrapper |
def yaml_to_file ( data : Mapping , output_dir : str , name : str ) -> str :
"""Save the given object to the given path in YAML .
: param data : dict / list to be dumped
: param output _ dir : target output directory
: param name : target filename
: return : target path""" | dumped_config_f = path . join ( output_dir , name )
with open ( dumped_config_f , 'w' ) as file :
yaml . dump ( data , file , Dumper = ruamel . yaml . RoundTripDumper )
return dumped_config_f |
def update ( self , query , payload ) :
"""Updates a record
: param query : Dictionary , string or : class : ` QueryBuilder ` object
: param payload : Dictionary payload
: return :
- Dictionary of the updated record""" | if not isinstance ( payload , dict ) :
raise InvalidUsage ( "Update payload must be of type dict" )
record = self . get ( query ) . one ( )
self . _url = self . _url_builder . get_appended_custom ( "/{0}" . format ( record [ 'sys_id' ] ) )
return self . _get_response ( 'PUT' , data = json . dumps ( payload ) ) |
def evaluate ( self , reference_scene_list , estimated_scene_list = None , estimated_scene_probabilities = None ) :
"""Evaluate file pair ( reference and estimated )
Parameters
reference _ scene _ list : list of dict or dcase _ util . containers . MetaDataContainer
Reference scene list .
Default value None
estimated _ scene _ list : list of dict or dcase _ util . containers . MetaDataContainer
Estimated scene list .
Default value None
estimated _ scene _ probabilities : dcase _ util . containers . ProbabilityContainer
Estimated scene probabilities . Currently not used .
Default value None
Returns
self""" | if estimated_scene_list is None and estimated_scene_probabilities is None :
raise ValueError ( "Nothing to evaluate, give at least estimated_scene_list or estimated_scene_probabilities" )
# Make sure reference _ scene _ list is dcase _ util . containers . MetaDataContainer
if not isinstance ( estimated_scene_list , dcase_util . containers . MetaDataContainer ) :
reference_scene_list = dcase_util . containers . MetaDataContainer ( reference_scene_list )
# Make sure estimated _ scene _ list is dcase _ util . containers . MetaDataContainer
if not isinstance ( estimated_scene_list , dcase_util . containers . MetaDataContainer ) :
estimated_scene_list = dcase_util . containers . MetaDataContainer ( estimated_scene_list )
# Make sure estimated _ tag _ probabilities is dcase _ util . containers . ProbabilityContainer
if estimated_scene_probabilities is not None :
if not isinstance ( estimated_scene_probabilities , dcase_util . containers . ProbabilityContainer ) :
estimated_scene_probabilities = dcase_util . containers . ProbabilityContainer ( estimated_scene_probabilities )
# Translate " file " field to " filename "
for item in reference_scene_list :
if 'filename' not in item and 'file' in item :
item [ 'filename' ] = item [ 'file' ]
for item in estimated_scene_list :
if 'filename' not in item and 'file' in item :
item [ 'filename' ] = item [ 'file' ]
y_true = [ ]
y_pred = [ ]
for estimated_item in estimated_scene_list :
reference_item_matched = { }
for reference_item in reference_scene_list :
if estimated_item [ 'filename' ] == reference_item [ 'filename' ] :
reference_item_matched = reference_item
break
if not reference_item_matched :
raise ValueError ( "Cannot find reference_item for estimated item [{item}]" . format ( item = estimated_item [ 'file' ] ) )
y_true . append ( reference_item_matched [ 'scene_label' ] )
y_pred . append ( estimated_item [ 'scene_label' ] )
y_true = numpy . array ( y_true )
y_pred = numpy . array ( y_pred )
Ncorr_overall = 0
for scene_id , scene_label in enumerate ( self . scene_label_list ) :
true_id = numpy . where ( y_true == scene_label ) [ 0 ]
pred_id = numpy . where ( y_pred == scene_label ) [ 0 ]
Ncorr = 0
for id in true_id :
if id in pred_id :
Ncorr += 1
Ncorr_overall += Ncorr
self . scene_wise [ scene_label ] [ 'Ncorr' ] += Ncorr
self . scene_wise [ scene_label ] [ 'Nref' ] += true_id . shape [ 0 ]
self . scene_wise [ scene_label ] [ 'Nsys' ] += pred_id . shape [ 0 ]
self . overall [ 'Ncorr' ] += Ncorr_overall
self . overall [ 'Nref' ] += y_true . shape [ 0 ]
self . overall [ 'Nsys' ] += y_pred . shape [ 0 ]
return self |
def plot_gaussian_2D ( mu , lmbda , color = 'b' , centermarker = True , label = '' , alpha = 1. , ax = None , artists = None ) :
'''Plots mean and cov ellipsoid into current axes . Must be 2D . lmbda is a covariance matrix .''' | assert len ( mu ) == 2
ax = ax if ax else plt . gca ( )
# TODO use artists !
t = np . hstack ( [ np . arange ( 0 , 2 * np . pi , 0.01 ) , 0 ] )
circle = np . vstack ( [ np . sin ( t ) , np . cos ( t ) ] )
ellipse = np . dot ( np . linalg . cholesky ( lmbda ) , circle )
if artists is None :
point = ax . scatter ( [ mu [ 0 ] ] , [ mu [ 1 ] ] , marker = 'D' , color = color , s = 4 , alpha = alpha ) if centermarker else None
line , = ax . plot ( ellipse [ 0 , : ] + mu [ 0 ] , ellipse [ 1 , : ] + mu [ 1 ] , linestyle = '-' , linewidth = 2 , color = color , label = label , alpha = alpha )
else :
line , point = artists
if centermarker :
point . set_offsets ( np . atleast_2d ( mu ) )
line . set_xdata ( ellipse [ 0 , : ] + mu [ 0 ] )
line . set_ydata ( ellipse [ 1 , : ] + mu [ 1 ] )
line . set_alpha ( alpha )
line . set_color ( color )
return line , point |
def set_deployment_id ( self ) :
"""Sets the deployment ID from deployment properties
: return : None""" | log = logging . getLogger ( self . cls_logger + '.set_deployment_id' )
deployment_id_val = self . get_value ( 'cons3rt.deployment.id' )
if not deployment_id_val :
log . debug ( 'Deployment ID not found in deployment properties' )
return
try :
deployment_id = int ( deployment_id_val )
except ValueError :
log . debug ( 'Deployment ID found was unable to convert to an int: {d}' . format ( d = deployment_id_val ) )
return
self . deployment_id = deployment_id
log . info ( 'Found deployment ID: {i}' . format ( i = str ( self . deployment_id ) ) ) |
def _getUE4BuildInterrogator ( self ) :
"""Uses UE4BuildInterrogator to interrogate UnrealBuildTool about third - party library details""" | ubtLambda = lambda target , platform , config , args : self . _runUnrealBuildTool ( target , platform , config , args , True )
interrogator = UE4BuildInterrogator ( self . getEngineRoot ( ) , self . _getEngineVersionDetails ( ) , self . _getEngineVersionHash ( ) , ubtLambda )
return interrogator |
def ac_factory ( path = "" ) :
"""Attribute Converter factory
: param path : The path to a directory where the attribute maps are expected
to reside .
: return : A AttributeConverter instance""" | acs = [ ]
if path :
if path not in sys . path :
sys . path . insert ( 0 , path )
for fil in os . listdir ( path ) :
if fil . endswith ( ".py" ) :
mod = import_module ( fil [ : - 3 ] )
for key , item in mod . __dict__ . items ( ) :
if key . startswith ( "__" ) :
continue
if isinstance ( item , dict ) and "to" in item and "fro" in item :
atco = AttributeConverter ( item [ "identifier" ] )
atco . from_dict ( item )
acs . append ( atco )
else :
from saml2 import attributemaps
for typ in attributemaps . __all__ :
mod = import_module ( ".%s" % typ , "saml2.attributemaps" )
for key , item in mod . __dict__ . items ( ) :
if key . startswith ( "__" ) :
continue
if isinstance ( item , dict ) and "to" in item and "fro" in item :
atco = AttributeConverter ( item [ "identifier" ] )
atco . from_dict ( item )
acs . append ( atco )
return acs |
def call_inputhook ( self , input_is_ready_func ) :
"""Call the inputhook . ( Called by a prompt - toolkit eventloop . )""" | self . _input_is_ready = input_is_ready_func
# Start thread that activates this pipe when there is input to process .
def thread ( ) :
input_is_ready_func ( wait = True )
os . write ( self . _w , b'x' )
threading . Thread ( target = thread ) . start ( )
# Call inputhook .
self . inputhook ( self )
# Flush the read end of the pipe .
try : # Before calling ' os . read ' , call select . select . This is required
# when the gevent monkey patch has been applied . ' os . read ' is never
# monkey patched and won ' t be cooperative , so that would block all
# other select ( ) calls otherwise .
# See : http : / / www . gevent . org / gevent . os . html
# Note : On Windows , this is apparently not an issue .
# However , if we would ever want to add a select call , it
# should use ` windll . kernel32 . WaitForMultipleObjects ` ,
# because ` select . select ` can ' t wait for a pipe on Windows .
if not is_windows ( ) :
select_fds ( [ self . _r ] , timeout = None )
os . read ( self . _r , 1024 )
except OSError : # This happens when the window resizes and a SIGWINCH was received .
# We get ' Error : [ Errno 4 ] Interrupted system call '
# Just ignore .
pass
self . _input_is_ready = None |
def get_document ( self , collection_id , ref = None , mimetype = "application/tei+xml, application/xml" ) :
"""Make a navigation request on the DTS API
: param collection _ id : Id of the collection
: param ref : If ref is a tuple , it is treated as a range . String or int are treated as single ref
: param mimetype : Media type to request
: return : Response
: rtype : requests . Response""" | parameters = { "id" : collection_id }
_parse_ref_parameters ( parameters , ref )
return self . call ( "documents" , parameters , mimetype = mimetype ) |
def imsave ( filename , image , normalize = False , format = None , quality = - 1 ) :
"""Convenience function that uses QImage . save to save an image to the
given file . This is intentionally similar to scipy . misc . imsave .
However , it supports different optional arguments :
: param normalize : see : func : ` array2qimage ` ( which is used internally )
: param format : image filetype ( e . g . ' PNG ' ) , ( default : check filename ' s suffix )
: param quality : see QImage . save ( 0 = small . . 100 = uncompressed , - 1 = default compression )
: returns : boolean success , see QImage . save
This function has been added in version 1.4.""" | qImage = array2qimage ( image , normalize = normalize )
return qImage . save ( filename , format , quality ) |
def on_key_event ( self , event ) :
'''handle key events''' | keycode = event . GetKeyCode ( )
if keycode == wx . WXK_HOME :
self . zoom = 1.0
self . dragpos = wx . Point ( 0 , 0 )
self . need_redraw = True |
def limit_author_choices ( ) :
"""Limit choices in blog author field based on config settings""" | LIMIT_AUTHOR_CHOICES = getattr ( settings , 'BLOG_LIMIT_AUTHOR_CHOICES_GROUP' , None )
if LIMIT_AUTHOR_CHOICES :
if isinstance ( LIMIT_AUTHOR_CHOICES , str ) :
limit = Q ( groups__name = LIMIT_AUTHOR_CHOICES )
else :
limit = Q ( )
for s in LIMIT_AUTHOR_CHOICES :
limit = limit | Q ( groups__name = s )
if getattr ( settings , 'BLOG_LIMIT_AUTHOR_CHOICES_ADMIN' , False ) :
limit = limit | Q ( is_staff = True )
else :
limit = { 'is_staff' : True }
return limit |
def _escapeText ( text ) :
"""Adds backslash - escapes to property value characters that need them .""" | output = ""
index = 0
match = reCharsToEscape . search ( text , index )
while match :
output = output + text [ index : match . start ( ) ] + '\\' + text [ match . start ( ) ]
index = match . end ( )
match = reCharsToEscape . search ( text , index )
output = output + text [ index : ]
return output |
def load_pa11y_ignore_rules ( file = None , url = None ) : # pylint : disable = redefined - builtin
"""Load the pa11y ignore rules from the given file or URL .""" | if not file and not url :
return None
if file :
file = Path ( file )
if not file . isfile ( ) :
msg = ( u"pa11y_ignore_rules_file specified, but file does not exist! {file}" ) . format ( file = file )
raise ValueError ( msg )
return yaml . safe_load ( file . text ( ) )
# must be URL
resp = requests . get ( url )
if not resp . ok :
msg = ( u"pa11y_ignore_rules_url specified, but failed to fetch URL. status={status}" ) . format ( status = resp . status_code )
err = RuntimeError ( msg )
err . response = resp
raise err
return yaml . safe_load ( resp . text ) |
def cli ( self , * args , ** kwargs ) :
"""Defines a CLI function that should be routed by this API""" | kwargs [ 'api' ] = self . api
return cli ( * args , ** kwargs ) |
def _set_interface_reliable_messaging ( self , v , load = False ) :
"""Setter method for interface _ reliable _ messaging , mapped from YANG variable / mpls _ config / router / mpls / mpls _ cmds _ holder / mpls _ interface / rsvp / interface _ reliable _ messaging ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ interface _ reliable _ messaging is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ interface _ reliable _ messaging ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = interface_reliable_messaging . interface_reliable_messaging , is_container = 'container' , presence = True , yang_name = "interface-reliable-messaging" , rest_name = "reliable-messaging" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Configure RSVP Reliable messaging on this interface' , u'alt-name' : u'reliable-messaging' } } , namespace = 'urn:brocade.com:mgmt:brocade-mpls' , defining_module = 'brocade-mpls' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """interface_reliable_messaging must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=interface_reliable_messaging.interface_reliable_messaging, is_container='container', presence=True, yang_name="interface-reliable-messaging", rest_name="reliable-messaging", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure RSVP Reliable messaging on this interface', u'alt-name': u'reliable-messaging'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""" , } )
self . __interface_reliable_messaging = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def trunc_neg_eigs ( self , particle ) :
"""Given a state represented as a model parameter vector ,
returns a model parameter vector representing the same
state with any negative eigenvalues set to zero .
: param np . ndarray particle : Vector of length ` ` ( dim * * 2 , ) ` `
representing a state .
: return : The same state with any negative eigenvalues
set to zero .""" | arr = np . tensordot ( particle , self . _basis . data . conj ( ) , 1 )
w , v = np . linalg . eig ( arr )
if np . all ( w >= 0 ) :
return particle
else :
w [ w < 0 ] = 0
new_arr = np . dot ( v * w , v . conj ( ) . T )
new_particle = np . real ( np . dot ( self . _basis . flat ( ) , new_arr . flatten ( ) ) )
assert new_particle [ 0 ] > 0
return new_particle |
def loadalldatas ( ) :
"""Loads all demo fixtures .""" | dependency_order = [ 'common' , 'profiles' , 'blog' , 'democomments' ]
for app in dependency_order :
project . recursive_load ( os . path . join ( paths . project_paths . manage_root , app ) ) |
def get_version_details ( path ) :
"""Parses version file
: param path : path to version file
: return : version details""" | with open ( path , "r" ) as reader :
lines = reader . readlines ( )
data = { line . split ( " = " ) [ 0 ] . replace ( "__" , "" ) : line . split ( " = " ) [ 1 ] . strip ( ) . replace ( "'" , "" ) for line in lines }
return data |
def minimize ( self , * args , ** kwargs ) :
'''Optimize our loss exhaustively .
This method is a thin wrapper over the : func : ` iterate ` method . It simply
exhausts the iterative optimization process and returns the final
monitor values .
Returns
train _ monitors : dict
A dictionary mapping monitor names to values , evaluated on the
training dataset .
valid _ monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset .''' | monitors = None
for monitors in self . iterate ( * args , ** kwargs ) :
pass
return monitors |
def targets ( self , tgt , tgt_type ) :
'''Return a dict of { ' id ' : { ' ipv4 ' : < ipaddr > } } data sets to be used as
targets given the passed tgt and tgt _ type''' | targets = { }
for back in self . _gen_back ( ) :
f_str = '{0}.targets' . format ( back )
if f_str not in self . rosters :
continue
try :
targets . update ( self . rosters [ f_str ] ( tgt , tgt_type ) )
except salt . exceptions . SaltRenderError as exc :
log . error ( 'Unable to render roster file: %s' , exc )
except IOError as exc :
log . error ( "Can't access roster for backend %s: %s" , back , exc )
log . debug ( 'Matched minions: %s' , targets )
return targets |
def insert_tag ( tag , before , root ) :
"""Insert ` tag ` before ` before ` tag if present . If not , insert it into ` root ` .
Args :
tag ( obj ) : HTMLElement instance .
before ( obj ) : HTMLElement instance .
root ( obj ) : HTMLElement instance .""" | if not before :
root . childs . append ( tag )
tag . parent = root
return
if type ( before ) in [ tuple , list ] :
before = first ( before )
# check that ` before ` is double linked
if not hasattr ( before , "parent" ) :
raise ValueError ( "Input must be double-linked!" )
# put it before first existing identifier
parent = before . parent
parent . childs . insert ( parent . childs . index ( before ) , tag )
tag . parent = parent |
def try_log_part ( self , context = None , with_start_message = True ) :
"""Залогировать , если пришло время из part _ log _ time _ minutes
: return : boolean Возвращает True если лог был записан""" | if context is None :
context = { }
self . __counter += 1
if time . time ( ) - self . __begin_time > self . __part_log_time_seconds :
self . __begin_time = time . time ( )
context [ 'count' ] = self . __counter
if self . __total :
self . __percent_done = int ( self . __counter * 100 / self . __total )
context [ 'percentDone' ] = self . __percent_done
context [ 'total' ] = self . __total
self . __log . info ( msg = self . __log_message , context = context )
return True
elif self . __counter == 1 :
if with_start_message :
self . __log . info ( u"Начали цикл: " + self . __log_message )
return True
return False |
def I_minus_R ( self , singular_value ) :
"""get I - R at singular value
Parameters
singular _ value : int
singular value to calc R at
Returns
I - R : pyemu . Matrix
identity matrix minus resolution matrix at singular _ value""" | if self . __I_R is not None and singular_value == self . __I_R_sv :
return self . __I_R
else :
if singular_value > self . jco . ncol :
return self . parcov . zero
else : # v2 = self . qhalfx . v [ : , singular _ value : ]
v2 = self . xtqx . v [ : , singular_value : ]
self . __I_R = v2 * v2 . T
self . __I_R_sv = singular_value
return self . __I_R |
def pre_release ( version ) :
"""Generates new docs , release announcements and creates a local tag .""" | announce ( version )
regen ( )
changelog ( version , write_out = True )
fix_formatting ( )
msg = "Preparing release version {}" . format ( version )
check_call ( [ "git" , "commit" , "-a" , "-m" , msg ] )
print ( )
print ( f"{Fore.CYAN}[generate.pre_release] {Fore.GREEN}All done!" )
print ( )
print ( f"Please push your branch and open a PR." ) |
def forward ( self , images , targets = None ) :
"""Arguments :
images ( list [ Tensor ] or ImageList ) : images to be processed
targets ( list [ BoxList ] ) : ground - truth boxes present in the image ( optional )
Returns :
result ( list [ BoxList ] or dict [ Tensor ] ) : the output from the model .
During training , it returns a dict [ Tensor ] which contains the losses .
During testing , it returns list [ BoxList ] contains additional fields
like ` scores ` , ` labels ` and ` mask ` ( for Mask R - CNN models ) .""" | if self . training and targets is None :
raise ValueError ( "In training mode, targets should be passed" )
images = to_image_list ( images )
features = self . backbone ( images . tensors )
proposals , proposal_losses = self . rpn ( images , features , targets )
if self . roi_heads :
x , result , detector_losses = self . roi_heads ( features , proposals , targets )
else : # RPN - only models don ' t have roi _ heads
x = features
result = proposals
detector_losses = { }
if self . training :
losses = { }
losses . update ( detector_losses )
losses . update ( proposal_losses )
return losses
return result |
def start ( self ) :
"""Starts the upload .
: raises SbgError : If upload is not in PREPARING state .""" | if self . _status == TransferState . PREPARING :
super ( Upload , self ) . start ( )
else :
raise SbgError ( 'Unable to start. Upload not in PREPARING state.' ) |
def get_scss_files ( self , skip_partials = True , with_source_path = False ) :
"""Gets all SCSS files in the source directory .
: param bool skip _ partials : If True , partials will be ignored . Otherwise ,
all SCSS files , including ones that begin
with ' _ ' will be returned .
: param boom with _ source _ path : If true , the ` source _ path ` will be added
to all of the paths . Otherwise , it will
be stripped .
: returns : A list of the SCSS files in the source directory""" | scss_files = [ ]
for root , dirs , files in os . walk ( self . _source_path ) :
for filename in fnmatch . filter ( files , "*.scss" ) :
if filename . startswith ( "_" ) and skip_partials :
continue
full_path = os . path . join ( root , filename )
if not with_source_path :
full_path = full_path . split ( self . _source_path ) [ 1 ]
if full_path . startswith ( "/" ) :
full_path = full_path [ 1 : ]
scss_files . append ( full_path )
return scss_files |
def get_lrc ( self , playingsong ) :
"""获取歌词
如果测试频繁会发如下信息 :
{ ' msg ' : ' You API access rate limit has been exceeded .
Contact api - master @ douban . com if you want higher limit . ' ,
' code ' : 1998,
' request ' : ' GET / j / v2 / lyric ' }""" | try :
url = "https://douban.fm/j/v2/lyric"
postdata = { 'sid' : playingsong [ 'sid' ] , 'ssid' : playingsong [ 'ssid' ] , }
s = requests . session ( )
response = s . get ( url , params = postdata , headers = HEADERS )
# 把歌词解析成字典
lyric = json . loads ( response . text , object_hook = decode_dict )
logger . info ( lyric )
if lyric . get ( 'code' , None ) == 1998 :
logger . info ( 'lrc API access rate limit has been exceeded' )
return { }
elif lyric . get ( 'code' , None ) == 107 :
logger . info ( 'lrc API invalid_request_uri' )
return { }
lrc_dic = lrc2dict ( lyric [ 'lyric' ] )
# 原歌词用的unicode , 为了兼容
for key , value in iteritems ( lrc_dic ) : # lrc _ dic [ key ] = value . decode ( ' utf - 8 ' )
lrc_dic [ key ] = value
if lrc_dic :
logger . debug ( 'Get lyric success!' )
return lrc_dic
except requests . exceptions . RequestException :
logger . error ( 'Get lyric failed!' )
return { } |
def _project_TH2 ( self , hist : Hist ) -> Any :
"""Perform the actual TH2 - > TH1 projection .
This projection can only be to 1D .
Args :
hist ( ROOT . TH2 ) : Histogram from which the projections should be performed .
Returns :
ROOT . TH1 : The projected histogram .""" | if len ( self . projection_axes ) != 1 :
raise ValueError ( len ( self . projection_axes ) , "Invalid number of axes" )
# logger . debug ( f " self . projection _ axes [ 0 ] . axis : { self . projection _ axes [ 0 ] . axis } , axis range name : { self . projection _ axes [ 0 ] . name } , axis _ type : { self . projection _ axes [ 0 ] . axis _ type } " )
# NOTE : We cannot use TH3 . ProjectionZ ( . . . ) because it has different semantics than ProjectionX
# and ProjectionY . In particular , it doesn ' t respect the axis limits of axis onto which it
# is projected . So we have to separate the projection by histogram type as opposed to axis
# length .
projection_func_map = { TH1AxisType . x_axis . value : hist . ProjectionX , TH1AxisType . y_axis . value : hist . ProjectionY }
# Determine the axis _ type value
# Use try here instead of checking for a particular type to protect against type changes ( say
# in the enum )
try : # Try to extract the value from an enum
axis_type = self . projection_axes [ 0 ] . axis_type . value
except ValueError : # Seems that we received an int , so just use that value
axis_type = self . axis_type
# type : ignore
projection_func = projection_func_map [ axis_type ]
# Do the actual projection
logger . info ( f"Projecting onto axis range {self.projection_axes[0].name} from hist {hist.GetName()}" )
projected_hist = projection_func ( )
return projected_hist |
def treebeard_js ( ) :
"""Template tag to print out the proper < script / > tag to include a custom . js""" | path = get_static_url ( )
js_file = urljoin ( path , 'treebeard/treebeard-admin.js' )
jquery_ui = urljoin ( path , 'treebeard/jquery-ui-1.8.5.custom.min.js' )
# Jquery UI is needed to call disableSelection ( ) on drag and drop so
# text selections arent marked while dragging a table row
# http : / / www . lokkju . com / blog / archives / 143
TEMPLATE = ( '<script type="text/javascript" src="{}"></script>' '<script type="text/javascript" src="{}"></script>' '<script>' '(function($){{jQuery = $.noConflict(true);}})(django.jQuery);' '</script>' '<script type="text/javascript" src="{}"></script>' )
return format_html ( TEMPLATE , "jsi18n" , mark_safe ( js_file ) , mark_safe ( jquery_ui ) ) |
def parse_stream ( response ) :
"""take stream from docker - py lib and display it to the user .
this also builds a stream list and returns it .""" | stream_data = [ ]
stream = stdout
for data in response :
if data :
try :
data = data . decode ( 'utf-8' )
except AttributeError as e :
logger . exception ( "Unable to parse stream, Attribute Error Raised: {0}" . format ( e ) )
stream . write ( data )
continue
try :
normalized_data = normalize_keys ( json . loads ( data ) )
except ValueError :
stream . write ( data )
continue
except TypeError :
stream . write ( data )
continue
if 'progress' in normalized_data :
stream_data . append ( normalized_data )
_display_progress ( normalized_data , stream )
elif 'error' in normalized_data :
_display_error ( normalized_data , stream )
elif 'status' in normalized_data :
stream_data . append ( normalized_data )
_display_status ( normalized_data , stream )
elif 'stream' in normalized_data :
stream_data . append ( normalized_data )
_display_stream ( normalized_data , stream )
else :
stream . write ( data )
stream . flush ( )
return stream_data |
def get_fit_failed_candidate_model ( model_type , formula ) :
"""Return a Candidate model that indicates the fitting routine failed .
Parameters
model _ type : : any : ` str `
Model type ( e . g . , ` ` ' cdd _ hdd ' ` ` ) .
formula : : any : ` float `
The candidate model formula .
Returns
candidate _ model : : any : ` eemeter . CalTRACKUsagePerDayCandidateModel `
Candidate model instance with status ` ` ' ERROR ' ` ` , and warning with
traceback .""" | warnings = [ EEMeterWarning ( qualified_name = "eemeter.caltrack_daily.{}.model_results" . format ( model_type ) , description = ( "Error encountered in statsmodels.formula.api.ols method. (Empty data?)" ) , data = { "traceback" : traceback . format_exc ( ) } , ) ]
return CalTRACKUsagePerDayCandidateModel ( model_type = model_type , formula = formula , status = "ERROR" , warnings = warnings ) |
def linkify ( self ) :
"""The realms linkify is done during the default realms / satellites initialization in the
Config class .
This functione only finishes the process by setting the realm level property according
to the realm position in the hierarchy .
All ` level ` 0 realms are main realms that have their own hierarchy .
: return : None""" | logger . info ( "Known realms:" )
for realm in self :
for tmp_realm in self : # Ignore if it is me . . .
if tmp_realm == realm :
continue
# Ignore if I am a sub realm of another realm
if realm . get_name ( ) in tmp_realm . realm_members :
break
else : # This realm is not in the children of any realm
realm . level = 0
realm . set_level ( 0 , self ) |
def p_if_statement_woelse ( self , p ) :
'if _ statement : IF LPAREN cond RPAREN true _ statement' | p [ 0 ] = IfStatement ( p [ 3 ] , p [ 5 ] , None , lineno = p . lineno ( 1 ) )
p . set_lineno ( 0 , p . lineno ( 1 ) ) |
def authenticate ( self , end_user_ip , personal_number = None , requirement = None , ** kwargs ) :
"""Request an authentication order . The : py : meth : ` collect ` method
is used to query the status of the order .
Note that personal number is not needed when authentication is to
be done on the same device , provided that the returned
` ` autoStartToken ` ` is used to open the BankID Client .
Example data returned :
. . code - block : : json
" orderRef " : " 131daac9-16c6-4618 - beb0-365768f37288 " ,
" autoStartToken " : " 7c40b5c9 - fa74-49cf - b98c - bfe651f9a7c6"
: param end _ user _ ip : IP address of the user requesting
the authentication .
: type end _ user _ ip : str
: param personal _ number : The Swedish personal number in
format YYYYMMDDXXXX .
: type personal _ number : str
: param requirement : An optional dictionary stating how the signature
must be created and verified . See BankID Relying Party Guidelines ,
section 13.5 for more details .
: type requirement : dict
: return : The order response .
: rtype : dict
: raises BankIDError : raises a subclass of this error
when error has been returned from server .""" | data = { "endUserIp" : end_user_ip }
if personal_number :
data [ "personalNumber" ] = personal_number
if requirement and isinstance ( requirement , dict ) :
data [ "requirement" ] = requirement
# Handling potentially changed optional in - parameters .
data . update ( kwargs )
response = self . client . post ( self . _auth_endpoint , json = data )
if response . status_code == 200 :
return response . json ( )
else :
raise get_json_error_class ( response ) |
def from_json ( cls , data ) :
"""Create STAT from json dictionary .
Args :
data : {
' location ' : { } , / / ladybug location schema
' ashrae _ climate _ zone ' : str ,
' koppen _ climate _ zone ' : str ,
' extreme _ cold _ week ' : { } , / / ladybug analysis period schema
' extreme _ hot _ week ' : { } , / / ladybug analysis period schema
' typical _ weeks ' : { } , / / dict of ladybug analysis period schemas
' heating _ dict ' : { } , / / dict containing heating design conditions
' cooling _ dict ' : { } , / / dict containing cooling design conditions
" monthly _ db _ 50 " : [ ] , / / list of 12 float values for each month
" monthly _ wb _ 50 " : [ ] , / / list of 12 float values for each month
" monthly _ db _ range _ 50 " : [ ] , / / list of 12 float values for each month
" monthly _ wb _ range _ 50 " : [ ] , / / list of 12 float values for each month
" monthly _ db _ 100 " : [ ] , / / list of 12 float values for each month
" monthly _ wb _ 100 " : [ ] , / / list of 12 float values for each month
" monthly _ db _ 20 " : [ ] , / / list of 12 float values for each month
" monthly _ wb _ 20 " : [ ] , / / list of 12 float values for each month
" monthly _ db _ 04 " : [ ] , / / list of 12 float values for each month
" monthly _ wb _ 04 " : [ ] , / / list of 12 float values for each month
" monthly _ wind " : [ ] , / / list of 12 float values for each month
" monthly _ wind _ dirs " : [ ] , / / matrix with 12 cols for months of the year
and 8 rows for the cardinal directions .
" standard _ pressure _ at _ elev " : float , / / float value for pressure in Pa
" monthly _ tau _ beam " : [ ] , / / list of 12 float values for each month
" monthly _ tau _ diffuse " : [ ] / / list of 12 float values for each month""" | # Initialize the class with all data missing
stat_ob = cls ( None )
# Check required and optional keys
option_keys_none = ( 'ashrae_climate_zone' , 'koppen_climate_zone' , 'extreme_cold_week' , 'extreme_hot_week' , 'standard_pressure_at_elev' )
option_keys_list = ( 'monthly_db_50' , 'monthly_wb_50' , 'monthly_db_range_50' , 'monthly_wb_range_50' , 'monthly_db_100' , 'monthly_wb_100' , 'monthly_db_20' , 'monthly_wb_20' , 'monthly_db_04' , 'monthly_wb_04' , 'monthly_wind' , 'monthly_wind_dirs' , 'monthly_tau_beam' , 'monthly_tau_diffuse' )
option_keys_dict = ( 'typical_weeks' , 'heating_dict' , 'cooling_dict' )
assert 'location' in data , 'Required key "location" is missing!'
for key in option_keys_none :
if key not in data :
data [ key ] = None
for key in option_keys_list :
if key not in data :
data [ key ] = [ ]
for key in option_keys_dict :
if key not in data :
data [ key ] = { }
# assign the properties of the dictionary to the stat object .
stat_ob . _location = Location . from_json ( data [ 'location' ] )
stat_ob . _ashrae_climate_zone = data [ 'ashrae_climate_zone' ]
stat_ob . _koppen_climate_zone = data [ 'koppen_climate_zone' ]
stat_ob . _extreme_cold_week = AnalysisPeriod . from_json ( data [ 'extreme_cold_week' ] ) if data [ 'extreme_cold_week' ] else None
stat_ob . _extreme_hot_week = AnalysisPeriod . from_json ( data [ 'extreme_hot_week' ] ) if data [ 'extreme_hot_week' ] else None
stat_ob . _typical_weeks = { }
for key , val in data [ 'typical_weeks' ] . items ( ) :
if isinstance ( val , list ) :
stat_ob . _typical_weeks [ key ] = [ AnalysisPeriod . from_json ( v ) for v in val ]
else :
stat_ob . _typical_weeks [ key ] = AnalysisPeriod . from_json ( val )
stat_ob . _winter_des_day_dict = data [ 'heating_dict' ]
stat_ob . _summer_des_day_dict = data [ 'cooling_dict' ]
stat_ob . _monthly_db_50 = data [ 'monthly_db_50' ]
stat_ob . _monthly_wb_50 = data [ 'monthly_wb_50' ]
stat_ob . _monthly_db_range_50 = data [ 'monthly_db_range_50' ]
stat_ob . _monthly_wb_range_50 = data [ 'monthly_wb_range_50' ]
stat_ob . _monthly_db_100 = data [ 'monthly_db_100' ]
stat_ob . _monthly_wb_100 = data [ 'monthly_wb_100' ]
stat_ob . _monthly_db_20 = data [ 'monthly_db_20' ]
stat_ob . _monthly_wb_20 = data [ 'monthly_wb_20' ]
stat_ob . _monthly_db_04 = data [ 'monthly_db_04' ]
stat_ob . _monthly_wb_04 = data [ 'monthly_wb_04' ]
stat_ob . _monthly_wind = data [ 'monthly_wind' ]
stat_ob . _monthly_wind_dirs = data [ 'monthly_wind_dirs' ]
stat_ob . _stand_press_at_elev = data [ 'standard_pressure_at_elev' ]
stat_ob . _monthly_tau_beam = data [ 'monthly_tau_beam' ]
stat_ob . _monthly_tau_diffuse = data [ 'monthly_tau_diffuse' ]
return stat_ob |
def showExports ( peInstance ) :
"""Show exports information""" | exports = peInstance . ntHeaders . optionalHeader . dataDirectory [ consts . EXPORT_DIRECTORY ] . info
if exports :
exp_fields = exports . getFields ( )
for field in exp_fields :
print "%s -> %x" % ( field , exp_fields [ field ] . value )
for entry in exports . exportTable :
entry_fields = entry . getFields ( )
for field in entry_fields :
print "%s -> %r" % ( field , entry_fields [ field ] . value )
else :
print "The file does not have exported functions." |
def getLeapSecondLastUpdated ( ) : # @ NoSelf
"""Shows the latest date a leap second was added to the leap second table .""" | print ( 'Leap second last updated:' , str ( CDFepoch . LTS [ - 1 ] [ 0 ] ) + '-' + str ( CDFepoch . LTS [ - 1 ] [ 1 ] ) + '-' + str ( CDFepoch . LTS [ - 1 ] [ 2 ] ) ) |
def append ( self , node ) :
"""Append ( set ) the document root .
@ param node : A root L { Element } or name used to build
the document root element .
@ type node : ( L { Element } | str | None )""" | if isinstance ( node , basestring ) :
self . __root = Element ( node )
return
if isinstance ( node , Element ) :
self . __root = node
return |
def update_ontology ( ont_url , rdf_path ) :
"""Load an ontology formatted like Eidos ' from github .""" | yaml_root = load_yaml_from_url ( ont_url )
G = rdf_graph_from_yaml ( yaml_root )
save_hierarchy ( G , rdf_path ) |
def get_pid ( rundir , process_type = PROCESS_TYPE , name = None ) :
"""Get the pid from the pid file in the run directory , using the given
process type and process name for the filename .
@ returns : pid of the process , or None if not running or file not found .""" | pidPath = get_pidpath ( rundir , process_type , name )
log . log ( 'run' , 'pidfile for %s %s is %s' % ( process_type , name , pidPath ) )
if not os . path . exists ( pidPath ) :
return
pidFile = open ( pidPath , 'r' )
pid = pidFile . readline ( )
pidFile . close ( )
if not pid or int ( pid ) == 0 :
return
return int ( pid ) |
def connection ( self , name = None ) :
"""return a named connection .
This function will return a named connection by either finding one
in its pool by the name or creating a new one . If no name is given ,
it will use the name of the current executing thread as the name of
the connection .
parameters :
name - a name as a string""" | if not name :
name = self . _get_default_connection_name ( )
if name in self . pool :
return self . pool [ name ]
self . pool [ name ] = psycopg2 . connect ( self . dsn )
return self . pool [ name ] |
def update ( self , request , * args , ** kwargs ) :
"""Update the ` ` Relation ` ` object .
Reject the update if user doesn ' t have ` ` EDIT ` ` permission on
the collection referenced in the ` ` Relation ` ` .""" | instance = self . get_object ( )
if ( not request . user . has_perm ( 'edit_collection' , instance . collection ) and not request . user . is_superuser ) :
return Response ( status = status . HTTP_401_UNAUTHORIZED )
return super ( ) . update ( request , * args , ** kwargs ) |
def clean_email ( self ) :
"""Ensure the email address is not already registered .""" | email = self . cleaned_data . get ( "email" )
qs = User . objects . exclude ( id = self . instance . id ) . filter ( email = email )
if len ( qs ) == 0 :
return email
raise forms . ValidationError ( ugettext ( "This email is already registered" ) ) |
def align ( s1 , s2 , gap = ' ' , eq = operator . eq ) :
'''aligns two strings
> > > print ( * align ( ' pharmacy ' , ' farmácia ' , gap = ' _ ' ) , sep = ' \\ n ' )
pharmac _ y
_ farmácia
> > > print ( * align ( ' advantage ' , ' vantagem ' , gap = ' _ ' ) , sep = ' \\ n ' )
advantage _
_ _ vantagem''' | # first we compute the dynamic programming table
m , n = len ( s1 ) , len ( s2 )
table = [ ]
# the table is extended lazily , one row at a time
row = list ( range ( n + 1 ) )
# the first row is 0 , 1 , 2 , . . . , n
table . append ( list ( row ) )
# copy row and insert into table
for i in range ( m ) :
p = i
row [ 0 ] = i + 1
for j in range ( n ) :
t = 0 if eq ( s1 [ i ] , s2 [ j ] ) else 1
p , row [ j + 1 ] = row [ j + 1 ] , min ( p + t , row [ j ] + 1 , row [ j + 1 ] + 1 )
table . append ( list ( row ) )
# copy row and insert into table
# now we trace the best alignment path from cell [ m ] [ n ] to cell [ 0 ] , [ 0]
s1_ , s2_ = '' , ''
i , j = m , n
while i != 0 and j != 0 :
_ , i , j , s1_ , s2_ = min ( ( table [ i - 1 ] [ j - 1 ] , i - 1 , j - 1 , s1 [ i - 1 ] + s1_ , s2 [ j - 1 ] + s2_ ) , ( table [ i - 1 ] [ j ] , i - 1 , j , s1 [ i - 1 ] + s1_ , gap + s2_ ) , ( table [ i ] [ j - 1 ] , i , j - 1 , gap + s1_ , s2 [ j - 1 ] + s2_ ) )
if i != 0 :
s1_ = s1 [ : i ] + s1_
s2_ = gap * i + s2_
if j != 0 :
s1_ = gap * j + s1_
s2_ = s2 [ : j ] + s2_
return s1_ , s2_ |
def buttons ( self ) :
"""Returns a matrix ( list of lists ) containing all buttons of the message
as ` MessageButton < telethon . tl . custom . messagebutton . MessageButton > `
instances .""" | if self . _buttons is None and self . reply_markup :
if not self . input_chat :
return
try :
bot = self . _needed_markup_bot ( )
except ValueError :
return
else :
self . _set_buttons ( self . _input_chat , bot )
return self . _buttons |
def _get_controllers ( self ) :
"""Iterate through the installed controller entry points and import
the module and assign the handle to the CLI . _ controllers dict .
: return : dict""" | controllers = dict ( )
for pkg in pkg_resources . iter_entry_points ( group = self . CONTROLLERS ) :
LOGGER . debug ( 'Loading %s controller' , pkg . name )
controllers [ pkg . name ] = importlib . import_module ( pkg . module_name )
return controllers |
def _MultiStream ( cls , fds ) :
"""Effectively streams data from multiple opened AFF4ImageBase objects .
Args :
fds : A list of opened AFF4Stream ( or AFF4Stream descendants ) objects .
Yields :
Tuples ( chunk , fd , exception ) where chunk is a binary blob of data and fd
is an object from the fds argument .
If one or more chunks are missing , exception will be a MissingChunksError
while chunk will be None . _ MultiStream does its best to skip the file
entirely if one of its chunks is missing , but in case of very large files
it ' s still possible to yield a truncated file .""" | missing_chunks_by_fd = { }
for chunk_fd_pairs in collection . Batch ( cls . _GenerateChunkPaths ( fds ) , cls . MULTI_STREAM_CHUNKS_READ_AHEAD ) :
chunks_map = dict ( chunk_fd_pairs )
contents_map = { }
for chunk_fd in FACTORY . MultiOpen ( chunks_map , mode = "r" , token = fds [ 0 ] . token ) :
if isinstance ( chunk_fd , AFF4Stream ) :
fd = chunks_map [ chunk_fd . urn ]
contents_map [ chunk_fd . urn ] = chunk_fd . read ( )
for chunk_urn , fd in chunk_fd_pairs :
if chunk_urn not in contents_map or not contents_map [ chunk_urn ] :
missing_chunks_by_fd . setdefault ( fd , [ ] ) . append ( chunk_urn )
for chunk_urn , fd in chunk_fd_pairs :
if fd in missing_chunks_by_fd :
continue
yield fd , contents_map [ chunk_urn ] , None
for fd , missing_chunks in iteritems ( missing_chunks_by_fd ) :
e = MissingChunksError ( "%d missing chunks (multi-stream)." % len ( missing_chunks ) , missing_chunks = missing_chunks )
yield fd , None , e |
def parse_value ( self , sn : "DataNode" ) -> ScalarValue :
"""Let schema node ' s type parse the receiver ' s value .""" | res = sn . type . parse_value ( self . value )
if res is None :
raise InvalidKeyValue ( self . value )
return res |
def load_member ( fqn ) :
"""Loads and returns a class for a given fully qualified name .""" | modulename , member_name = split_fqn ( fqn )
module = __import__ ( modulename , globals ( ) , locals ( ) , member_name )
return getattr ( module , member_name ) |
def sentence_matches ( self , sentence_text ) :
"""Returns true iff the sentence contains this mention ' s upstream
and downstream participants , and if one of the stemmed verbs in
the sentence is the same as the stemmed action type .""" | has_upstream = False
has_downstream = False
has_verb = False
# Get the first word of the action type and assume this is the verb
# ( Ex . get depends for depends on )
actiontype_words = word_tokenize ( self . mention . actiontype )
actiontype_verb_stemmed = stem ( actiontype_words [ 0 ] )
words = word_tokenize ( sentence_text )
if self . string_matches_sans_whitespace ( sentence_text . lower ( ) , self . mention . upstream . lower ( ) ) :
has_upstream = True
if self . string_matches_sans_whitespace ( sentence_text . lower ( ) , self . mention . downstream . lower ( ) ) :
has_downstream = True
for word in words :
if actiontype_verb_stemmed == stem ( word ) :
has_verb = True
return has_upstream and has_downstream and has_verb |
def sscan ( self , name , cursor = 0 , match = None , count = None ) :
"""Incrementally return lists of elements in a set . Also return a cursor
indicating the scan position .
` ` match ` ` allows for filtering the keys by pattern
` ` count ` ` allows for hint the minimum number of returns""" | pieces = [ name , cursor ]
if match is not None :
pieces . extend ( [ Token . get_token ( 'MATCH' ) , match ] )
if count is not None :
pieces . extend ( [ Token . get_token ( 'COUNT' ) , count ] )
return self . execute_command ( 'SSCAN' , * pieces ) |
def overlays_at ( self , key ) :
"""Key may be a slice or a point .""" | if isinstance ( key , slice ) :
s , e , _ = key . indices ( len ( self . text ) )
else :
s = e = key
return [ o for o in self . overlays if o . start in Rng ( s , e ) ] |
def get_object ( self , resource_url ) :
"""Get remote resource information . Creates a local directory for the
resource if this is the first access to the resource . Downloads the
resource Json representation and writes it into a . json file in the
cache directory .
Raises ValueError if resource is not cached and does not exist . If the
resource no longer exists on the server but in the local cache , a
reference to the local copy is returned and the value of the is _ active
flag is False .
Parameters
cache _ id : string
Unique cache identifier
resource _ url : string
Url of the resource
Returns
( string , Json , Boolean , string )
Returns a 4 - tuple containing local resource directory , the Json
object representing the resource , an active flag indicating if
the resource still exists on the remote server or only in the local
cache , and the resource unique cache identifier .""" | # Check if resource is in local cache . If not , create a new cache
# identifier and set is _ cached flag to false
if resource_url in self . cache :
cache_id = self . cache [ resource_url ]
else :
cache_id = str ( uuid . uuid4 ( ) )
# The local cahce directory for resource is given by cache identifier
obj_dir = os . path . join ( self . directory , cache_id )
# File for local copy of object ' s Json representation
f_json = os . path . join ( obj_dir , '.json' )
# Object active flag
is_active = True
# Read the remote resource representation
try :
obj_json = sco . JsonResource ( resource_url ) . json
# Save local copy of Json object . Create local resource directory if
# it doesn ' t exist
if not os . path . isdir ( obj_dir ) :
os . mkdir ( obj_dir )
with open ( f_json , 'w' ) as f :
json . dump ( obj_json , f )
except ValueError as ex : # If the resource does not exists but we have a local copy then read
# object from local disk . Set is _ active flag to false . Raise
# ValueError if no local copy exists
if os . path . isfile ( f_json ) :
with open ( f_json , 'r' ) as f :
obj_json = json . load ( f )
is_active = False
else :
raise ex
# Return object directory , Json , active flag , and cache identifier
return obj_dir , obj_json , is_active , cache_id |
def eeg_microstates_plot ( method , path = "" , extension = ".png" , show_sensors_position = False , show_sensors_name = False , plot = True , save = True , dpi = 150 , contours = 0 , colorbar = False , separate = False ) :
"""Plot the microstates .""" | # Generate and store figures
figures = [ ]
names = [ ]
# Check if microstates metrics available
try :
microstates = method [ "microstates_good_fit" ]
except KeyError :
microstates = method [ "microstates" ]
# Create individual plot for each microstate
for microstate in set ( microstates ) :
if microstate != "Bad" :
values = np . mean ( method [ "data" ] [ np . where ( microstates == microstate ) ] , axis = 0 )
values = np . array ( values , ndmin = 2 ) . T
evoked = mne . EvokedArray ( values , method [ "raw.info_example" ] , 0 )
fig = evoked . plot_topomap ( times = 0 , title = microstate , size = 6 , contours = contours , time_format = "" , show = plot , colorbar = colorbar , show_names = show_sensors_name , sensors = show_sensors_position )
figures . append ( fig )
# Save separate figures
name = path + "microstate_%s_%s%s%s_%s%i_%s%s" % ( microstate , method [ "data_scale" ] , method [ "data_normalize" ] , method [ "data_smoothing" ] , method [ "feature_reduction_method" ] , method [ "n_features" ] , method [ "clustering_method" ] , extension )
fig . savefig ( name , dpi = dpi )
names . append ( name )
# Save Combined plot
if save is True : # Combine all plots
image_template = PIL . Image . open ( names [ 0 ] )
X , Y = image_template . size
image_template . close ( )
combined = PIL . Image . new ( 'RGB' , ( int ( X * len ( set ( microstates ) ) / 2 ) , int ( Y * len ( set ( microstates ) ) / 2 ) ) )
fig = 0
for x in np . arange ( 0 , len ( set ( microstates ) ) / 2 * int ( X ) , int ( X ) ) :
for y in np . arange ( 0 , len ( set ( microstates ) ) / 2 * int ( Y ) , int ( Y ) ) :
try :
newfig = PIL . Image . open ( names [ fig ] )
combined . paste ( newfig , ( int ( x ) , int ( y ) ) )
newfig . close ( )
except :
pass
fig += 1
# combined . show ( )
combined_name = path + "microstates_%s%s%s_%s%i_%s%s" % ( method [ "data_scale" ] , method [ "data_normalize" ] , method [ "data_smoothing" ] , method [ "feature_reduction_method" ] , method [ "n_features" ] , method [ "clustering_method" ] , extension )
combined . save ( combined_name )
# Detete separate plots in needed
if separate is False or save is False :
for name in names :
os . remove ( name )
return ( figures ) |
def delete_polygon ( self , polygon ) :
"""Deletes on the Agro API the Polygon identified by the ID of the provided polygon object .
: param polygon : the ` pyowm . agro10 . polygon . Polygon ` object to be deleted
: type polygon : ` pyowm . agro10 . polygon . Polygon ` instance
: returns : ` None ` if deletion is successful , an exception otherwise""" | assert polygon . id is not None
status , _ = self . http_client . delete ( NAMED_POLYGON_URI % str ( polygon . id ) , params = { 'appid' : self . API_key } , headers = { 'Content-Type' : 'application/json' } ) |
def _prepare_deprecation_data ( self ) :
"""Cycles through the list of AppSettingDeprecation instances set on
` ` self . deprecations ` ` and prepulates two new dictionary attributes :
` ` self . _ deprecated _ settings ` ` :
Uses the deprecated setting names themselves as the keys . Used to
check whether a request is for a deprecated setting .
` ` self . _ renamed _ settings ` ` :
Uses the ' replacement setting ' names as keys ( where supplied ) .
Used to allow the helper to temporarily support override settings
defined using the old name , when the values for the new setting are
requested .""" | if not isinstance ( self . deprecations , ( list , tuple ) ) :
raise IncorrectDeprecationsValueType ( "'deprecations' must be a list or tuple, not a {}." . format ( type ( self . deprecations ) . __name__ ) )
self . _deprecated_settings = { }
self . _replacement_settings = defaultdict ( list )
for item in self . deprecations :
item . prefix = self . get_prefix ( )
if not self . in_defaults ( item . setting_name ) :
raise InvalidDeprecationDefinition ( "There is an issue with one of your setting deprecation " "definitions. '{setting_name}' could not be found in " "{defaults_module_path}. Please ensure a default value " "remains there until the end of the setting's deprecation " "period." . format ( setting_name = item . setting_name , defaults_module_path = self . _defaults_module_path , ) )
if item . setting_name in self . _deprecated_settings :
raise DuplicateDeprecationError ( "The setting name for each deprecation definition must be " "unique, but '{setting_name}' has been used more than once " "for {helper_class}." . format ( setting_name = item . setting_name , helper_class = self . __class__ . __name__ , ) )
self . _deprecated_settings [ item . setting_name ] = item
if item . replacement_name :
if not self . in_defaults ( item . replacement_name ) :
raise InvalidDeprecationDefinition ( "There is an issue with one of your settings " "deprecation definitions. '{replacement_name}' is not " "a valid replacement for '{setting_name}', as no such " "value can be found in {defaults_module_path}." . format ( replacement_name = item . replacement_name , setting_name = item . setting_name , defaults_module_path = self . _defaults_module_path , ) )
self . _replacement_settings [ item . replacement_name ] . append ( item ) |
def create_multicast_socket ( address , port ) :
"""Creates a multicast socket according to the given address and port .
Handles both IPv4 and IPv6 addresses .
: param address : Multicast address / group
: param port : Socket port
: return : A tuple ( socket , listening address )
: raise ValueError : Invalid address or port""" | # Get the information about a datagram ( UDP ) socket , of any family
try :
addrs_info = socket . getaddrinfo ( address , port , socket . AF_UNSPEC , socket . SOCK_DGRAM )
except socket . gaierror :
raise ValueError ( "Error retrieving address informations ({0}, {1})" . format ( address , port ) )
if len ( addrs_info ) > 1 :
_logger . debug ( "More than one address information found. Using the first one." )
# Get the first entry : ( family , socktype , proto , canonname , sockaddr )
addr_info = addrs_info [ 0 ]
# Only accept IPv4 / v6 addresses
if addr_info [ 0 ] not in ( socket . AF_INET , socket . AF_INET6 ) : # Unhandled address family
raise ValueError ( "Unhandled socket family : %d" % ( addr_info [ 0 ] ) )
# Prepare the socket
sock = socket . socket ( addr_info [ 0 ] , socket . SOCK_DGRAM , socket . IPPROTO_UDP )
# Reuse address
sock . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 )
if hasattr ( socket , "SO_REUSEPORT" ) : # Special for MacOS
# pylint : disable = E1101
sock . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEPORT , 1 )
# Bind the socket
if sock . family == socket . AF_INET : # IPv4 binding
sock . bind ( ( "0.0.0.0" , port ) )
else : # IPv6 Binding
sock . bind ( ( "::" , port ) )
# Prepare the mreq structure to join the group
# addrinfo [ 4 ] = ( addr , port )
mreq = make_mreq ( sock . family , addr_info [ 4 ] [ 0 ] )
# Join the group
if sock . family == socket . AF_INET : # IPv4
sock . setsockopt ( socket . IPPROTO_IP , socket . IP_ADD_MEMBERSHIP , mreq )
# Allow multicast packets to get back on this host
sock . setsockopt ( socket . IPPROTO_IP , socket . IP_MULTICAST_LOOP , 1 )
elif sock . family == socket . AF_INET6 : # IPv6
sock . setsockopt ( ipproto_ipv6 ( ) , socket . IPV6_JOIN_GROUP , mreq )
# Allow multicast packets to get back on this host
sock . setsockopt ( ipproto_ipv6 ( ) , socket . IPV6_MULTICAST_LOOP , 1 )
return sock , addr_info [ 4 ] [ 0 ] |
def stop_and_reset_thread ( self , ignore_results = False ) :
"""Stop current search thread and clean - up""" | if self . search_thread is not None :
if self . search_thread . isRunning ( ) :
if ignore_results :
self . search_thread . sig_finished . disconnect ( self . search_complete )
self . search_thread . stop ( )
self . search_thread . wait ( )
self . search_thread . setParent ( None )
self . search_thread = None |
def create ( self , project_id = None ) :
"""Creates the bucket .
Args :
project _ id : the project in which to create the bucket .
Returns :
The bucket .
Raises :
Exception if there was an error creating the bucket .""" | if not self . exists ( ) :
if project_id is None :
project_id = self . _api . project_id
try :
self . _info = self . _api . buckets_insert ( self . _name , project_id = project_id )
except Exception as e :
raise e
return self |
def find_write_predecessors ( self , address ) :
"""Returns all predecessor transaction ids for a write of the provided
address .
Arguments :
address ( str ) : the radix address
Returns : a set of transaction ids""" | # A write operation must be preceded by :
# - The " enclosing writer " , which is the writer at the address or
# the nearest writer higher ( closer to the root ) in the tree .
# - The " enclosing readers " , which are the readers at the address
# or higher in the tree .
# - The " children writers " , which include all writers which are
# lower in the tree than the address .
# - The " children readers " , which include all readers which are
# lower in the tree than the address .
# The enclosing writer must be added as it may have modified a node
# which must not happen after the current write .
# Writers which are higher in the tree than the enclosing writer may
# have modified a node at or under the given address . However , we do
# not need to include them here as they will have been considered a
# predecessor to the enclosing writer .
# Enclosing readers must be included . Technically , we only need to add
# enclosing readers which occurred after the enclosing writer , since
# the readers preceding the writer will have been considered a
# predecessor of the enclosing writer . However , with the current
# data structure we can not determine the difference between readers
# so we specify them all ; this is mostly harmless as it will not change
# the eventual sort order generated by the scheduler .
# Children readers must be added , since their reads must happen prior
# to the write .
predecessors = set ( )
enclosing_writer = None
node_stream = self . _tree . walk ( address )
address_len = len ( address )
# First , walk down from the root to the address , collecting all readers
# and updating the enclosing _ writer if needed .
try :
for node_address , node in node_stream :
if node is not None :
predecessors . update ( node . readers )
if node . writer is not None :
enclosing_writer = node . writer
if len ( node_address ) >= address_len :
break
# If the address isn ' t on the tree , then there aren ' t any
# predecessors below the node to worry about ( because there
# isn ' t anything at all ) , so return the predecessors that have
# already been collected .
except AddressNotInTree as err :
if err . match is not None :
return self . find_write_predecessors ( err . match )
return predecessors
finally :
if enclosing_writer is not None :
predecessors . add ( enclosing_writer )
# Next , descend down the tree starting at the address node and
# find all descendant readers and writers .
for _ , node in node_stream :
if node is not None :
if node . writer is not None :
predecessors . add ( node . writer )
predecessors . update ( node . readers )
return predecessors |
def comunicar_certificado_icpbrasil ( self , certificado ) :
"""Função ` ` ComunicarCertificadoICPBRASIL ` ` conforme ER SAT , item 6.1.2.
Envio do certificado criado pela ICP - Brasil .
: param str certificado : Conteúdo do certificado digital criado pela
autoridade certificadora ICP - Brasil .
: return : Retorna * verbatim * a resposta da função SAT .
: rtype : string""" | return self . invocar__ComunicarCertificadoICPBRASIL ( self . gerar_numero_sessao ( ) , self . _codigo_ativacao , certificado ) |
def get_alignments ( attention_matrix : np . ndarray , threshold : float = .9 ) -> Iterator [ Tuple [ int , int ] ] :
"""Yields hard alignments from an attention _ matrix ( target _ length , source _ length )
given a threshold .
: param attention _ matrix : The attention matrix .
: param threshold : The threshold for including an alignment link in the result .
: return : Generator yielding strings of the form 0-0 , 0-1 , 2-1 , 2-2 , 3-4 . . .""" | for src_idx in range ( attention_matrix . shape [ 1 ] ) :
for trg_idx in range ( attention_matrix . shape [ 0 ] ) :
if attention_matrix [ trg_idx , src_idx ] > threshold :
yield ( src_idx , trg_idx ) |
def __recv_cb ( self , msg ) :
"""Calls user - provided callback and marks message for Ack regardless of success""" | try :
self . __msg_callback ( msg )
except :
logger . exception ( "AmqpLink.__recv_cb exception calling msg_callback" )
finally : # only works if all messages handled in series
self . __last_id = msg . delivery_tag
self . __unacked += 1 |
def map ( self , arg , na_action = None ) :
"""Map values of Series according to input correspondence .
Used for substituting each value in a Series with another value ,
that may be derived from a function , a ` ` dict ` ` or
a : class : ` Series ` .
Parameters
arg : function , dict , or Series
Mapping correspondence .
na _ action : { None , ' ignore ' } , default None
If ' ignore ' , propagate NaN values , without passing them to the
mapping correspondence .
Returns
Series
Same index as caller .
See Also
Series . apply : For applying more complex functions on a Series .
DataFrame . apply : Apply a function row - / column - wise .
DataFrame . applymap : Apply a function elementwise on a whole DataFrame .
Notes
When ` ` arg ` ` is a dictionary , values in Series that are not in the
dictionary ( as keys ) are converted to ` ` NaN ` ` . However , if the
dictionary is a ` ` dict ` ` subclass that defines ` ` _ _ missing _ _ ` ` ( i . e .
provides a method for default values ) , then this default is used
rather than ` ` NaN ` ` .
Examples
> > > s = pd . Series ( [ ' cat ' , ' dog ' , np . nan , ' rabbit ' ] )
0 cat
1 dog
2 NaN
3 rabbit
dtype : object
` ` map ` ` accepts a ` ` dict ` ` or a ` ` Series ` ` . Values that are not found
in the ` ` dict ` ` are converted to ` ` NaN ` ` , unless the dict has a default
value ( e . g . ` ` defaultdict ` ` ) :
> > > s . map ( { ' cat ' : ' kitten ' , ' dog ' : ' puppy ' } )
0 kitten
1 puppy
2 NaN
3 NaN
dtype : object
It also accepts a function :
> > > s . map ( ' I am a { } ' . format )
0 I am a cat
1 I am a dog
2 I am a nan
3 I am a rabbit
dtype : object
To avoid applying the function to missing values ( and keep them as
` ` NaN ` ` ) ` ` na _ action = ' ignore ' ` ` can be used :
> > > s . map ( ' I am a { } ' . format , na _ action = ' ignore ' )
0 I am a cat
1 I am a dog
2 NaN
3 I am a rabbit
dtype : object""" | new_values = super ( ) . _map_values ( arg , na_action = na_action )
return self . _constructor ( new_values , index = self . index ) . __finalize__ ( self ) |
def write_bibtex_dict ( stream , entries ) :
"""bibtexparser . write converts the entire database to one big string and
writes it out in one go . I ' m sure it will always all fit in RAM but some
things just will not stand .""" | from bibtexparser . bwriter import BibTexWriter
writer = BibTexWriter ( )
writer . indent = ' '
writer . entry_separator = ''
first = True
for rec in entries :
if first :
first = False
else :
stream . write ( b'\n' )
stream . write ( writer . _entry_to_bibtex ( rec ) . encode ( 'utf8' ) ) |
def disassemble ( self , * , transforms = None ) -> Iterator [ Instruction ] :
"""Disassembles this method , yielding an iterable of
: class : ` ~ jawa . util . bytecode . Instruction ` objects .""" | if transforms is None :
if self . cf . classloader :
transforms = self . cf . classloader . bytecode_transforms
else :
transforms = [ ]
transforms = [ self . _bind_transform ( t ) for t in transforms ]
with io . BytesIO ( self . _code ) as code :
ins_iter = iter ( lambda : read_instruction ( code , code . tell ( ) ) , None )
for ins in ins_iter :
for transform in transforms :
ins = transform ( ins )
yield ins |
def main ( conf_file , overwrite , logger ) :
"""Create configuration and log file . Restart the daemon when configuration
is done .
Args :
conf _ file ( str ) : Path to the configuration file .
overwrite ( bool ) : Overwrite the configuration file with ` clean ` config ?""" | uid = pwd . getpwnam ( get_username ( ) ) . pw_uid
# stop the daemon
logger . info ( "Stopping the daemon." )
sh . service ( get_service_name ( ) , "stop" )
# create files
logger . info ( "Creating config file." )
create_config ( cnf_file = conf_file , uid = uid , overwrite = overwrite )
logger . info ( "Creating log file." )
create_log ( log_file = REQUIRED_SETTINGS [ "LogFile" ] , uid = uid )
# start the daemon
logger . info ( "Starting the daemon.." )
sh . service ( get_service_name ( ) , "start" ) |
def handle_import ( self , options ) :
"""Gets posts from Blogger .""" | blog_id = options . get ( "blog_id" )
if blog_id is None :
raise CommandError ( "Usage is import_blogger %s" % self . args )
try :
from gdata import service
except ImportError :
raise CommandError ( "Could not import the gdata library." )
blogger = service . GDataService ( )
blogger . service = "blogger"
blogger . server = "www.blogger.com"
start_index = 1
processed_posts = [ ]
new_posts = 1
while new_posts :
new_posts = 0
query = service . Query ( )
query . feed = "/feeds/%s/posts/full" % blog_id
query . max_results = 500
query . start_index = start_index
try :
feed = blogger . Get ( query . ToUri ( ) )
except service . RequestError as err :
message = "There was a service error. The response was: " "%(status)s %(reason)s - %(body)s" % err . message
raise CommandError ( message , blogger . server + query . feed , err . message [ "status" ] )
for ( i , entry ) in enumerate ( feed . entry ) : # this basically gets the unique post ID from the URL to itself
# and pulls the ID off the end .
post_id = entry . GetSelfLink ( ) . href . split ( "/" ) [ - 1 ]
# Skip duplicate posts . Important for the last query .
if post_id in processed_posts :
continue
title = entry . title . text
content = entry . content . text
# this strips off the time zone info off the end as we want UTC
clean_date = entry . published . text [ : re . search ( r"\.\d{3}" , entry . published . text ) . end ( ) ]
published_date = self . parse_datetime ( clean_date )
# TODO - issues with content not generating correct < P > tags
tags = [ tag . term for tag in entry . category ]
post = self . add_post ( title = title , content = content , pub_date = published_date , tags = tags )
# get the comments from the post feed and then add them to
# the post details
comment_url = "/feeds/%s/%s/comments/full?max-results=1000"
comments = blogger . Get ( comment_url % ( blog_id , post_id ) )
for comment in comments . entry :
email = comment . author [ 0 ] . email . text
author_name = comment . author [ 0 ] . name . text
# Strip off the time zone info off the end as we want UTC
clean_date = comment . published . text [ : re . search ( r"\.\d{3}" , comment . published . text ) . end ( ) ]
comment_date = self . parse_datetime ( clean_date )
website = ""
if comment . author [ 0 ] . uri :
website = comment . author [ 0 ] . uri . text
body = comment . content . text
# add the comment as a dict to the end of the comments list
self . add_comment ( post = post , name = author_name , email = email , body = body , website = website , pub_date = comment_date )
processed_posts . append ( post_id )
new_posts += 1
start_index += 500 |
def set_write_bit ( fn ) : # type : ( str ) - > None
"""Set read - write permissions for the current user on the target path . Fail silently
if the path doesn ' t exist .
: param str fn : The target filename or path
: return : None""" | fn = fs_encode ( fn )
if not os . path . exists ( fn ) :
return
file_stat = os . stat ( fn ) . st_mode
os . chmod ( fn , file_stat | stat . S_IRWXU | stat . S_IRWXG | stat . S_IRWXO )
if not os . path . isdir ( fn ) :
for path in [ fn , os . path . dirname ( fn ) ] :
try :
os . chflags ( path , 0 )
except AttributeError :
pass
return None
for root , dirs , files in os . walk ( fn , topdown = False ) :
for dir_ in [ os . path . join ( root , d ) for d in dirs ] :
set_write_bit ( dir_ )
for file_ in [ os . path . join ( root , f ) for f in files ] :
set_write_bit ( file_ ) |
def update_balances ( self , recursive = True ) :
"""Calculate tree balance factor""" | if self . node :
if recursive :
if self . node . left :
self . node . left . update_balances ( )
if self . node . right :
self . node . right . update_balances ( )
self . balance = self . node . left . height - self . node . right . height
else :
self . balance = 0 |
def schemaNewValidCtxt ( self ) :
"""Create an XML Schemas validation context based on the given
schema .""" | ret = libxml2mod . xmlSchemaNewValidCtxt ( self . _o )
if ret is None :
raise treeError ( 'xmlSchemaNewValidCtxt() failed' )
__tmp = SchemaValidCtxt ( _obj = ret )
__tmp . schema = self
return __tmp |
def sigma_operator_indices ( A ) :
r"""If A is an outer - product type operator | a > < b | return a , b .
> > > sig = ket ( 2 , 3 ) * bra ( 1 , 3)
> > > sigma _ operator _ indices ( sig )
(1 , 0)
> > > sigma _ operator _ indices ( sig + sig . adjoint ( ) )
( None , None )""" | Ne = A . shape [ 0 ]
band = True
if sum ( A ) != 1 :
band = False
a = None ;
b = None
for i in range ( Ne ) :
for j in range ( Ne ) :
if A [ i , j ] == 1 :
a = i ;
b = j
elif A [ i , j ] != 0 :
band = False
if band :
return a , b
else :
return None , None |
def cms_check ( migrate_cmd = False ) :
"""Runs the django CMS ` ` cms check ` ` command""" | from django . core . management import call_command
try :
import cms
# NOQA # nopyflakes
_create_db ( migrate_cmd )
call_command ( 'cms' , 'check' )
except ImportError :
print ( 'cms_check available only if django CMS is installed' ) |
def ensure_provisioning ( table_name , key_name , num_consec_read_checks , num_consec_write_checks ) :
"""Ensure that provisioning is correct
: type table _ name : str
: param table _ name : Name of the DynamoDB table
: type key _ name : str
: param key _ name : Configuration option key name
: type num _ consec _ read _ checks : int
: param num _ consec _ read _ checks : How many consecutive checks have we had
: type num _ consec _ write _ checks : int
: param num _ consec _ write _ checks : How many consecutive checks have we had
: returns : ( int , int ) - - num _ consec _ read _ checks , num _ consec _ write _ checks""" | if get_global_option ( 'circuit_breaker_url' ) or get_table_option ( key_name , 'circuit_breaker_url' ) :
if circuit_breaker . is_open ( table_name , key_name ) :
logger . warning ( 'Circuit breaker is OPEN!' )
return ( 0 , 0 )
# Handle throughput alarm checks
__ensure_provisioning_alarm ( table_name , key_name )
try :
read_update_needed , updated_read_units , num_consec_read_checks = __ensure_provisioning_reads ( table_name , key_name , num_consec_read_checks )
write_update_needed , updated_write_units , num_consec_write_checks = __ensure_provisioning_writes ( table_name , key_name , num_consec_write_checks )
if read_update_needed :
num_consec_read_checks = 0
if write_update_needed :
num_consec_write_checks = 0
# Handle throughput updates
if read_update_needed or write_update_needed :
logger . info ( '{0} - Changing provisioning to {1:d} ' 'read units and {2:d} write units' . format ( table_name , int ( updated_read_units ) , int ( updated_write_units ) ) )
__update_throughput ( table_name , key_name , updated_read_units , updated_write_units )
else :
logger . info ( '{0} - No need to change provisioning' . format ( table_name ) )
except JSONResponseError :
raise
except BotoServerError :
raise
return num_consec_read_checks , num_consec_write_checks |
def _delete_record ( self , identifier = None , rtype = None , name = None , content = None ) :
"""Connects to Hetzner account , removes an existing record from the zone and returns a
boolean , if deletion was successful or not . Uses identifier or rtype , name & content to
lookup over all records of the zone for one or more records to delete .""" | with self . _session ( self . domain , self . domain_id ) as ddata : # Validate method parameters
if identifier :
rtype , name , content = self . _parse_identifier ( identifier , ddata [ 'zone' ] [ 'data' ] )
if rtype is None or name is None or content is None :
LOGGER . info ( 'Hetzner => Record with identifier \'%s\' does not exist' , identifier )
return True
name = ddata [ 'cname' ] if ddata [ 'cname' ] else ( self . _fqdn_name ( name ) if name else None )
records = self . _list_records_in_zone ( ddata [ 'zone' ] [ 'data' ] , rtype , name , content )
if records : # Remove records from zone
for record in records :
rrset = ddata [ 'zone' ] [ 'data' ] . get_rdataset ( record [ 'name' ] + '.' , rdtype = record [ 'type' ] )
rdatas = [ ]
for rdata in rrset :
if self . _convert_content ( record [ 'type' ] , record [ 'content' ] ) != rdata . to_text ( ) :
rdatas . append ( rdata . to_text ( ) )
if rdatas :
rdataset = dns . rdataset . from_text_list ( rrset . rdclass , rrset . rdtype , record [ 'ttl' ] , rdatas )
ddata [ 'zone' ] [ 'data' ] . replace_rdataset ( record [ 'name' ] + '.' , rdataset )
else :
ddata [ 'zone' ] [ 'data' ] . delete_rdataset ( record [ 'name' ] + '.' , record [ 'type' ] )
# Post zone to Hetzner
synced_change = self . _post_zone ( ddata [ 'zone' ] )
return synced_change
LOGGER . info ( 'Hetzner => Record lookup has no matches' )
return True |
def validate_password_strength ( value ) :
"""Validates that a password is as least 7 characters long and has at least
1 digit and 1 letter .""" | min_length = 7
if len ( value ) < min_length :
raise ValidationError ( _ ( 'Password must be at least {0} characters ' 'long.' ) . format ( min_length ) )
# check for digit
if not any ( char . isdigit ( ) for char in value ) :
raise ValidationError ( _ ( 'Password must contain at least 1 digit.' ) )
# check for letter
if not any ( char . isalpha ( ) for char in value ) :
raise ValidationError ( _ ( 'Password must contain at least 1 letter.' ) ) |
def check_info ( info ) :
"""Validate info dict .
Raise ValueError if validation fails .""" | if not isinstance ( info , dict ) :
raise ValueError ( "bad metainfo - not a dictionary" )
pieces = info . get ( "pieces" )
if not isinstance ( pieces , basestring ) or len ( pieces ) % 20 != 0 :
raise ValueError ( "bad metainfo - bad pieces key" )
piece_size = info . get ( "piece length" )
if not isinstance ( piece_size , ( int , long ) ) or piece_size <= 0 :
raise ValueError ( "bad metainfo - illegal piece length" )
name = info . get ( "name" )
if not isinstance ( name , basestring ) :
raise ValueError ( "bad metainfo - bad name (type is %r)" % type ( name ) . __name__ )
if not ALLOWED_ROOT_NAME . match ( name ) :
raise ValueError ( "name %s disallowed for security reasons" % name )
if ( "files" in info ) == ( "length" in info ) :
raise ValueError ( "single/multiple file mix" )
if "length" in info :
length = info . get ( "length" )
if not isinstance ( length , ( int , long ) ) or length < 0 :
raise ValueError ( "bad metainfo - bad length" )
else :
files = info . get ( "files" )
if not isinstance ( files , ( list , tuple ) ) :
raise ValueError ( "bad metainfo - bad file list" )
for item in files :
if not isinstance ( item , dict ) :
raise ValueError ( "bad metainfo - bad file value" )
length = item . get ( "length" )
if not isinstance ( length , ( int , long ) ) or length < 0 :
raise ValueError ( "bad metainfo - bad length" )
path = item . get ( "path" )
if not isinstance ( path , ( list , tuple ) ) or not path :
raise ValueError ( "bad metainfo - bad path" )
for part in path :
if not isinstance ( part , basestring ) :
raise ValueError ( "bad metainfo - bad path dir" )
part = fmt . to_unicode ( part )
if part == '..' :
raise ValueError ( "relative path in %s disallowed for security reasons" % '/' . join ( path ) )
if part and not ALLOWED_PATH_NAME . match ( part ) :
raise ValueError ( "path %s disallowed for security reasons" % part )
file_paths = [ os . sep . join ( item [ "path" ] ) for item in files ]
if len ( set ( file_paths ) ) != len ( file_paths ) :
raise ValueError ( "bad metainfo - duplicate path" )
return info |
def get_pubkey ( self ) :
'''Return the key string for the SSH public key''' | if '__master_opts__' in self . opts and self . opts [ '__master_opts__' ] . get ( 'ssh_use_home_key' ) and os . path . isfile ( os . path . expanduser ( '~/.ssh/id_rsa' ) ) :
priv = os . path . expanduser ( '~/.ssh/id_rsa' )
else :
priv = self . opts . get ( 'ssh_priv' , os . path . join ( self . opts [ 'pki_dir' ] , 'ssh' , 'salt-ssh.rsa' ) )
pub = '{0}.pub' . format ( priv )
with salt . utils . files . fopen ( pub , 'r' ) as fp_ :
return '{0} rsa root@master' . format ( fp_ . read ( ) . split ( ) [ 1 ] ) |
def _conf ( cls , opts ) :
"""Setup logging via ini - file from logging _ conf _ file option .""" | if not opts . logging_conf_file :
return False
if not os . path . exists ( opts . logging_conf_file ) : # FileNotFoundError added only in Python 3.3
# https : / / docs . python . org / 3 / whatsnew / 3.3 . html # pep - 3151 - reworking - the - os - and - io - exception - hierarchy
raise OSError ( "Error: Unable to locate specified logging configuration file!" )
logging . config . fileConfig ( opts . logging_conf_file , disable_existing_loggers = False )
return True |
def error ( self , i : int = None ) -> str :
"""Returns an error message""" | head = "[" + colors . red ( "error" ) + "]"
if i is not None :
head = str ( i ) + " " + head
return head |
def load ( self , rule_type , quiet = False ) :
"""Open a JSON file definiting a ruleset and load it into a Ruleset object
: param quiet :
: return :""" | if self . filename and os . path . exists ( self . filename ) :
try :
with open ( self . filename , 'rt' ) as f :
ruleset = json . load ( f )
self . about = ruleset [ 'about' ] if 'about' in ruleset else ''
self . rules = { }
for filename in ruleset [ 'rules' ] :
self . rules [ filename ] = [ ]
for rule in ruleset [ 'rules' ] [ filename ] :
self . handle_rule_versions ( filename , rule_type , rule )
except Exception as e :
printException ( e )
printError ( 'Error: ruleset file %s contains malformed JSON.' % self . filename )
self . rules = [ ]
self . about = ''
else :
self . rules = [ ]
if not quiet :
printError ( 'Error: the file %s does not exist.' % self . filename ) |
def get_created_date_metadata ( self ) :
"""Gets the metadata for the asset creation date .
return : ( osid . Metadata ) - metadata for the created date
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for osid . resource . ResourceForm . get _ group _ metadata _ template
metadata = dict ( self . _mdata [ 'created_date' ] )
metadata . update ( { 'existing_date_time_values' : self . _my_map [ 'createdDate' ] } )
return Metadata ( ** metadata ) |
def get_sequences ( input_seqs ) :
"""Returns a list of Sequences
Parameters
input _ seqs : iterable of ( str , str )
The list of input sequences in ( label , sequence ) format
Returns
list of Sequence
Raises
ValueError
If no sequences where found in ` input _ seqs `
If all the sequences do not have the same length either aligned or
unaligned .""" | try :
seqs = [ Sequence ( id , seq ) for id , seq in input_seqs ]
except Exception :
seqs = [ ]
if len ( seqs ) == 0 :
logger = logging . getLogger ( __name__ )
logger . warn ( 'No sequences found in fasta file!' )
return None
# Check that all the sequence lengths ( aligned and unaligned are the same )
aligned_lengths = set ( s . length for s in seqs )
unaligned_lengths = set ( s . unaligned_length for s in seqs )
if len ( aligned_lengths ) != 1 or len ( unaligned_lengths ) != 1 :
raise ValueError ( "Not all sequence have the same length. Aligned lengths: %s, " "sequence lengths: %s" % ( ", " . join ( map ( str , aligned_lengths ) ) , ", " . join ( map ( str , unaligned_lengths ) ) ) )
seqs = sorted ( seqs , key = attrgetter ( 'frequency' ) , reverse = True )
return seqs |
def filter_noexpand_columns ( columns ) :
"""Return columns not containing and containing the noexpand prefix .
Parameters
columns : sequence of str
A sequence of strings to be split
Returns
Two lists , the first containing strings without the noexpand prefix , the
second containing those that do with the prefix filtered out .""" | prefix_len = len ( NOEXPAND_PREFIX )
noexpand = [ c [ prefix_len : ] for c in columns if c . startswith ( NOEXPAND_PREFIX ) ]
other = [ c for c in columns if not c . startswith ( NOEXPAND_PREFIX ) ]
return other , noexpand |
def _get_action_urls ( self ) :
"""Get the url patterns that route each action to a view .""" | actions = { }
model_name = self . model . _meta . model_name
# e . g . : polls _ poll
base_url_name = '%s_%s' % ( self . model . _meta . app_label , model_name )
# e . g . : polls _ poll _ actions
model_actions_url_name = '%s_actions' % base_url_name
self . tools_view_name = 'admin:' + model_actions_url_name
# WISHLIST use get _ change _ actions and get _ changelist _ actions
# TODO separate change and changelist actions
for action in chain ( self . change_actions , self . changelist_actions ) :
actions [ action ] = getattr ( self , action )
return [ # change , supports the same pks the admin does
# https : / / github . com / django / django / blob / stable / 1.10 . x / django / contrib / admin / options . py # L555
url ( r'^(?P<pk>.+)/actions/(?P<tool>\w+)/$' , self . admin_site . admin_view ( # checks permissions
ChangeActionView . as_view ( model = self . model , actions = actions , back = 'admin:%s_change' % base_url_name , current_app = self . admin_site . name , ) ) , name = model_actions_url_name ) , # changelist
url ( r'^actions/(?P<tool>\w+)/$' , self . admin_site . admin_view ( # checks permissions
ChangeListActionView . as_view ( model = self . model , actions = actions , back = 'admin:%s_changelist' % base_url_name , current_app = self . admin_site . name , ) ) , # Dupe name is fine . https : / / code . djangoproject . com / ticket / 14259
name = model_actions_url_name ) , ] |
def stream ( self , name ) :
"""Stream can be used to record different time series :
run . history . stream ( " batch " ) . add ( { " gradients " : 1 } )""" | if self . stream_name != "default" :
raise ValueError ( "Nested streams aren't supported" )
if self . _streams . get ( name ) == None :
self . _streams [ name ] = History ( self . fname , out_dir = self . out_dir , add_callback = self . _add_callback , stream_name = name )
return self . _streams [ name ] |
def validate ( self , value ) :
"""Check if ` ` value ` ` is valid .
: returns : [ errors ] If ` ` value ` ` is invalid , otherwise [ ] .""" | errors = [ ]
# Make sure the type validates first .
valid = self . _is_valid ( value )
if not valid :
errors . append ( self . fail ( value ) )
return errors
# Then validate all the constraints second .
for constraint in self . _constraints_inst :
error = constraint . is_valid ( value )
if error :
errors . append ( error )
return errors |
def get_text_position ( fig , ax , ha = 'left' , va = 'top' , pad_scale = 1.0 ) :
"""Return text position inside of the given axis""" | # # Check and preprocess input arguments
try :
pad_scale = float ( pad_scale )
except :
raise TypeError ( "'pad_scale should be of type 'float'" )
for arg in [ va , ha ] :
assert type ( arg ) is str
arg = arg . lower ( )
# Make it lowercase to prevent case problem .
# # Get axis size in inches
ax_height , ax_width = get_ax_size_in_inch ( fig , ax )
# # Construct inversion factor from inch to plot coordinate
length_x = ax . get_xlim ( ) [ 1 ] - ax . get_xlim ( ) [ 0 ]
length_y = ax . get_ylim ( ) [ 1 ] - ax . get_ylim ( ) [ 0 ]
inch2coord_x = length_x / ax_width
inch2coord_y = length_y / ax_height
# # Set padding size relative to the text size
# pad _ inch = text _ bbox _ inch . height * pad _ scale
# pad _ inch = fontsize _ points * point2inch * pad _ scale
ax_length_geom_average = ( ax_height * ax_width ) ** 0.5
pad_inch = ax_length_geom_average * 0.03 * pad_scale
pad_inch_x , pad_inch_y = pad_inch , pad_inch
pad_coord_x = pad_inch_x * inch2coord_x
pad_coord_y = pad_inch_y * inch2coord_y
if ha == 'left' :
pos_x = ax . get_xlim ( ) [ 0 ] + pad_coord_x
elif ha == 'right' :
pos_x = ax . get_xlim ( ) [ 1 ] - pad_coord_x
else :
raise Exception ( "Unsupported value for 'ha'" )
if va in [ 'top' , 'up' , 'upper' ] :
pos_y = ax . get_ylim ( ) [ 1 ] - pad_coord_y
elif va in [ 'bottom' , 'down' , 'lower' ] :
pos_y = ax . get_ylim ( ) [ 0 ] + pad_coord_y
else :
raise Exception ( "Unsupported value for 'va'" )
return pos_x , pos_y |
def messages ( count , size ) :
'''Generator for count messages of the provided size''' | import string
# Make sure we have at least ' size ' letters
letters = islice ( cycle ( chain ( string . lowercase , string . uppercase ) ) , size )
return islice ( cycle ( '' . join ( l ) for l in permutations ( letters , size ) ) , count ) |
def find_any_valid_version ( self ) : # type : ( ) - > str
"""Find version candidates , return first ( or any , since they aren ' t ordered )
Blow up if versions are not homogeneous
: return :""" | versions = self . all_current_versions ( )
if not versions and not self . force_init :
raise JiggleVersionException ( "Have no versions to work with, failed to find any. Include --init to start out at 0.1.0" )
if not versions and self . force_init :
versions = { "force_init" : "0.1.0" }
if len ( versions ) > 1 :
if not self . all_versions_equal ( versions ) :
if not self . all_versions_equal ( versions ) :
almost_same = self . almost_the_same_version ( [ x for x in versions . values ( ) ] )
if almost_same : # TODO : disable with strict option
logger . warning ( "Version very by a patch level, will use greater." )
return unicode ( almost_same )
if not versions . keys ( ) :
raise JiggleVersionException ( "Noooo! Must find a value" + unicode ( versions ) )
return unicode ( first_value_in_dict ( versions ) ) |
def _kill_cursors ( self , cursor_ids , address , topology , session ) :
"""Send a kill cursors message with the given ids .""" | listeners = self . _event_listeners
publish = listeners . enabled_for_commands
if address : # address could be a tuple or _ CursorAddress , but
# select _ server _ by _ address needs ( host , port ) .
server = topology . select_server_by_address ( tuple ( address ) )
else : # Application called close _ cursor ( ) with no address .
server = topology . select_server ( writable_server_selector )
try :
namespace = address . namespace
db , coll = namespace . split ( '.' , 1 )
except AttributeError :
namespace = None
db = coll = "OP_KILL_CURSORS"
spec = SON ( [ ( 'killCursors' , coll ) , ( 'cursors' , cursor_ids ) ] )
with server . get_socket ( self . __all_credentials ) as sock_info :
if sock_info . max_wire_version >= 4 and namespace is not None :
sock_info . command ( db , spec , session = session , client = self )
else :
if publish :
start = datetime . datetime . now ( )
request_id , msg = message . kill_cursors ( cursor_ids )
if publish :
duration = datetime . datetime . now ( ) - start
# Here and below , address could be a tuple or
# _ CursorAddress . We always want to publish a
# tuple to match the rest of the monitoring
# API .
listeners . publish_command_start ( spec , db , request_id , tuple ( address ) )
start = datetime . datetime . now ( )
try :
sock_info . send_message ( msg , 0 )
except Exception as exc :
if publish :
dur = ( ( datetime . datetime . now ( ) - start ) + duration )
listeners . publish_command_failure ( dur , message . _convert_exception ( exc ) , 'killCursors' , request_id , tuple ( address ) )
raise
if publish :
duration = ( ( datetime . datetime . now ( ) - start ) + duration )
# OP _ KILL _ CURSORS returns no reply , fake one .
reply = { 'cursorsUnknown' : cursor_ids , 'ok' : 1 }
listeners . publish_command_success ( duration , reply , 'killCursors' , request_id , tuple ( address ) ) |
def get ( cls , name , service = Service ( ) ) :
'''fetch given bin from the service''' | path = pathjoin ( cls . path , name )
response = service . send ( SRequest ( 'GET' , path ) )
return cls . from_response ( response , service = service ) |
async def _send_rtcp_pli ( self , media_ssrc ) :
"""Send an RTCP packet to report picture loss .""" | if self . __rtcp_ssrc is not None :
packet = RtcpPsfbPacket ( fmt = RTCP_PSFB_PLI , ssrc = self . __rtcp_ssrc , media_ssrc = media_ssrc )
await self . _send_rtcp ( packet ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.