signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def j0_2 ( a = 1 ) :
r"""Hankel transform pair J0_2 ( [ Ande75 ] _ ) .""" | def lhs ( x ) :
return np . exp ( - a * x )
def rhs ( b ) :
return 1 / np . sqrt ( b ** 2 + a ** 2 )
return Ghosh ( 'j0' , lhs , rhs ) |
def str2dict_keys ( str_in ) :
'''Extracts the keys from a string that represents a dict and returns them
sorted by key .
Args :
str _ in ( string ) that contains python dict
Returns :
( list ) with keys or None if no valid dict was found
Raises :''' | tmp_dict = str2dict ( str_in )
if tmp_dict is None :
return None
return sorted ( [ k for k in tmp_dict ] ) |
def sizeHint ( self , option , index ) :
"""Reimplements the : meth : ` QStyledItemDelegate . sizeHint ` method .""" | document = QTextDocument ( )
document . setDefaultFont ( option . font )
data = index . model ( ) . data ( index )
text = umbra . ui . common . QVariant_to_string ( data )
self . __label . setText ( text )
document . setHtml ( text )
return QSize ( document . idealWidth ( ) + self . __indent , option . fontMetrics . height ( ) ) |
def _filter_for_scenario ( self ) :
"""Find the scenario matching the provided scenario name and returns a
list .
: return : list""" | return [ c . scenario for c in self . _configs if c . scenario . name == self . _scenario_name ] |
def main ( ) :
'''i2a creates ASCII art from images right on your terminal .''' | arguments = docopt ( __doc__ , version = __version__ )
if arguments [ 'FILE' ] :
display_output ( arguments )
else :
print ( __doc__ ) |
def _check_box_toggled ( self , widget , data = None ) :
"""Function manipulates with entries and buttons .""" | active = widget . get_active ( )
arg_name = data
if 'entry' in self . args [ arg_name ] :
self . args [ arg_name ] [ 'entry' ] . set_sensitive ( active )
if 'browse_btn' in self . args [ arg_name ] :
self . args [ arg_name ] [ 'browse_btn' ] . set_sensitive ( active )
self . path_window . show_all ( ) |
def enable_scanners_by_ids ( self , scanner_ids ) :
"""Enable a list of scanner IDs .""" | scanner_ids = ',' . join ( scanner_ids )
self . logger . debug ( 'Enabling scanners with IDs {0}' . format ( scanner_ids ) )
return self . zap . ascan . enable_scanners ( scanner_ids ) |
def serialize ( self , elt , sw , pyobj , name = None , orig = None , ** kw ) :
'''Handles the start and end tags , and attributes . callout
to get _ formatted _ content to get the textNode value .
Parameters :
elt - - ElementProxy / DOM element
sw - - SoapWriter instance
pyobj - - processed content
KeyWord Parameters :
name - - substitute name , ( nspname , name ) or name
orig - -''' | objid = _get_idstr ( pyobj )
ns , n = self . get_name ( name , objid )
# nillable
el = elt . createAppendElement ( ns , n )
if self . nillable is True and pyobj is Nilled :
self . serialize_as_nil ( el )
return None
# other attributes
self . set_attributes ( el , pyobj )
# soap href attribute
unique = self . unique or kw . get ( 'unique' , False )
if unique is False and sw . Known ( orig or pyobj ) :
self . set_attribute_href ( el , objid )
return None
# xsi : type attribute
if kw . get ( 'typed' , self . typed ) is True :
self . set_attribute_xsi_type ( el , ** kw )
# soap id attribute
if self . unique is False :
self . set_attribute_id ( el , objid )
# Content , < empty tag / > c
self . serialize_text_node ( el , sw , pyobj )
return el |
def alias_item ( self , item_id , alias_id ) :
"""Adds an ` ` Id ` ` to an ` ` Item ` ` for the purpose of creating compatibility .
The primary ` ` Id ` ` of the ` ` Item ` ` is determined by the
provider . The new ` ` Id ` ` is an alias to the primary ` ` Id ` ` . If
the alias is a pointer to another item , it is reassigned to the
given item ` ` Id ` ` .
arg : item _ id ( osid . id . Id ) : the ` ` Id ` ` of an ` ` Item ` `
arg : alias _ id ( osid . id . Id ) : the alias ` ` Id ` `
raise : AlreadyExists - ` ` alias _ id ` ` is in use as a primary
` ` Id ` `
raise : NotFound - ` ` item _ id ` ` not found
raise : NullArgument - ` ` item _ id ` ` or ` ` alias _ id ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure occurred
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for
# osid . resource . ResourceAdminSession . alias _ resources _ template
self . _alias_id ( primary_id = item_id , equivalent_id = alias_id ) |
def _getDeltas ( self , firstSub , secondSub ) :
"""Arguments must have " start " and " end " properties which are FrameTimes .""" | startDelta = max ( firstSub . start , secondSub . start ) - min ( firstSub . start , secondSub . start )
endDelta = max ( firstSub . end , secondSub . end ) - min ( firstSub . end , secondSub . end )
return ( startDelta , endDelta ) |
def cb ( self , elt , sw , pyobj , unsuppressedPrefixes = [ ] ) :
"""pyobj - - xml . dom . Node . ELEMENT _ NODE""" | # if sw . Known ( pyobj ) :
# return
if type ( pyobj ) in _stringtypes :
elt . createAppendTextNode ( pyobj )
return
# # grab document and import node , and append it
doc = elt . getDocument ( )
node = doc . importNode ( pyobj , deep = 1 )
child = elt . node . appendChild ( node )
# # copy xmlns : attributes into appended node
parent = pyobj . parentNode
while parent . nodeType == _Node . ELEMENT_NODE :
for attr in filter ( lambda a : a . name . startswith ( 'xmlns:' ) and a . name not in child . attributes . keys ( ) , parent . attributes ) :
child . setAttributeNode ( attr . cloneNode ( 1 ) )
parent = parent . parentNode |
def randomEarlyShared ( store , role ) :
"""If there are no explicitly - published public index pages to display , find a
shared item to present to the user as first .""" | for r in role . allRoles ( ) :
share = store . findFirst ( Share , Share . sharedTo == r , sort = Share . storeID . ascending )
if share is not None :
return share . sharedItem
raise NoSuchShare ( "Why, that user hasn't shared anything at all!" ) |
def level ( self , channel , axis , npts , * , verbose = True ) :
"""Subtract the average value of npts at the edge of a given axis .
Parameters
channel : int or str
Channel to level .
axis : int
Axis to level along .
npts : int
Number of points to average for each slice . Positive numbers
take points at leading indicies and negative numbers take points
at trailing indicies .
verbose : bool ( optional )
Toggle talkback . Default is True .""" | warnings . warn ( "level" , category = wt_exceptions . EntireDatasetInMemoryWarning )
channel_index = wt_kit . get_index ( self . channel_names , channel )
channel = self . channels [ channel_index ]
# verify npts not zero
npts = int ( npts )
if npts == 0 :
raise wt_exceptions . ValueError ( "npts must not be zero" )
# get subtrahend
ss = [ slice ( None ) ] * self . ndim
if npts > 0 :
ss [ axis ] = slice ( 0 , npts , None )
else :
ss [ axis ] = slice ( npts , None , None )
subtrahend = np . nanmean ( channel [ ss ] , axis = axis )
if self . ndim > 1 :
subtrahend = np . expand_dims ( subtrahend , axis = axis )
# level
channel -= subtrahend
# finish
channel . _null = 0
if verbose :
print ( "channel {0} leveled along axis {1}" . format ( channel . natural_name , axis ) ) |
def _read_gaf_nts ( self , fin_gaf , hdr_only , allow_missing_symbol ) :
"""Read GAF file . Store annotation data in a list of namedtuples .""" | nts = [ ]
ver = None
hdrobj = GafHdr ( )
datobj = None
# pylint : disable = not - callable
ntobj_make = None
get_gafvals = None
lnum = - 1
line = ''
try :
with open ( fin_gaf ) as ifstrm :
for lnum , line in enumerate ( ifstrm , 1 ) : # Read data
if get_gafvals : # print ( lnum , line )
gafvals = get_gafvals ( line )
if gafvals :
nts . append ( ntobj_make ( gafvals ) )
else :
datobj . ignored . append ( ( lnum , line ) )
# Read header
elif datobj is None :
if line [ 0 ] == '!' :
if ver is None and line [ 1 : 13 ] == 'gaf-version:' :
ver = line [ 13 : ] . strip ( )
hdrobj . chkaddhdr ( line )
else :
self . hdr = hdrobj . get_hdr ( )
if hdr_only :
return nts
datobj = GafData ( ver , allow_missing_symbol )
get_gafvals = datobj . get_gafvals
ntobj_make = datobj . get_ntobj ( ) . _make
except Exception as inst :
import traceback
traceback . print_exc ( )
sys . stderr . write ( "\n **FATAL-gaf: {MSG}\n\n" . format ( MSG = str ( inst ) ) )
sys . stderr . write ( "**FATAL-gaf: {FIN}[{LNUM}]:\n{L}" . format ( FIN = fin_gaf , L = line , LNUM = lnum ) )
if datobj is not None :
datobj . prt_line_detail ( sys . stdout , line )
sys . exit ( 1 )
self . datobj = datobj
return nts |
def get_instance_field ( self , field_name ) :
"""Return the field object with the given name ( works for a bound instance )""" | if not self . has_field ( field_name ) :
raise AttributeError ( '"%s" is not a field for the model "%s"' % ( field_name , self . __class__ . __name__ ) )
field = getattr ( self , field_name )
return field |
def search_fast ( self , text ) :
"""do a sloppy quick " search " via the json index""" | resp = self . impl . get ( "{base_url}/{text}/json" . format ( base_url = self . base_url , text = text ) )
return resp . json ( ) [ "info" ] [ "package_url" ] |
def record_set ( self , train , labels = None , channel = "train" ) :
"""Build a : class : ` ~ RecordSet ` from a numpy : class : ` ~ ndarray ` matrix and label vector .
For the 2D ` ` ndarray ` ` ` ` train ` ` , each row is converted to a : class : ` ~ Record ` object .
The vector is stored in the " values " entry of the ` ` features ` ` property of each Record .
If ` ` labels ` ` is not None , each corresponding label is assigned to the " values " entry
of the ` ` labels ` ` property of each Record .
The collection of ` ` Record ` ` objects are protobuf serialized and uploaded to new
S3 locations . A manifest file is generated containing the list of objects created and
also stored in S3.
The number of S3 objects created is controlled by the ` ` train _ instance _ count ` ` property
on this Estimator . One S3 object is created per training instance .
Args :
train ( numpy . ndarray ) : A 2D numpy array of training data .
labels ( numpy . ndarray ) : A 1D numpy array of labels . Its length must be equal to the
number of rows in ` ` train ` ` .
channel ( str ) : The SageMaker TrainingJob channel this RecordSet should be assigned to .
Returns :
RecordSet : A RecordSet referencing the encoded , uploading training and label data .""" | s3 = self . sagemaker_session . boto_session . resource ( 's3' )
parsed_s3_url = urlparse ( self . data_location )
bucket , key_prefix = parsed_s3_url . netloc , parsed_s3_url . path
key_prefix = key_prefix + '{}-{}/' . format ( type ( self ) . __name__ , sagemaker_timestamp ( ) )
key_prefix = key_prefix . lstrip ( '/' )
logger . debug ( 'Uploading to bucket {} and key_prefix {}' . format ( bucket , key_prefix ) )
manifest_s3_file = upload_numpy_to_s3_shards ( self . train_instance_count , s3 , bucket , key_prefix , train , labels )
logger . debug ( "Created manifest file {}" . format ( manifest_s3_file ) )
return RecordSet ( manifest_s3_file , num_records = train . shape [ 0 ] , feature_dim = train . shape [ 1 ] , channel = channel ) |
def xmoe2_v1_l4k ( ) :
"""With sequence length 4096.""" | hparams = xmoe2_v1 ( )
hparams . batch_size = 32
hparams . max_length = 4096
hparams . split_to_length = 4096
hparams . reshape_logits_hack = True
return hparams |
def add_metadata_sectors ( self , vtoc , sector_list , header ) :
"""Add track / sector list""" | tslist = BaseSectorList ( header )
for start in range ( 0 , len ( sector_list ) , header . ts_pairs ) :
end = min ( start + header . ts_pairs , len ( sector_list ) )
if _xd :
log . debug ( "ts: %d-%d" % ( start , end ) )
s = Dos33TSSector ( header , sector_list , start , end )
s . ts_start , s . ts_end = start , end
tslist . append ( s )
self . num_tslists = len ( tslist )
vtoc . assign_sector_numbers ( self , tslist )
sector_list . extend ( tslist )
self . track , self . sector = header . track_from_sector ( tslist [ 0 ] . sector_num )
if _xd :
log . debug ( "track/sector lists:\n%s" % str ( tslist ) ) |
def get_session ( self , token = None ) :
'''If provided , the ` token ` parameter is used to initialize an
authenticated session , otherwise an unauthenticated session object is
generated . Returns an instance of : attr : ` session _ obj ` . .
: param token : A token with which to initilize the session .
: type token : str''' | if token is not None :
session = self . session_obj ( self . client_id , self . client_secret , token , service = self )
else : # pragma : no cover
session = self . session_obj ( self . client_id , self . client_secret , service = self )
return session |
def after_this_websocket ( func : Callable ) -> Callable :
"""Schedule the func to be called after the current websocket .
This is useful in situations whereby you want an after websocket
function for a specific route or circumstance only , for example ,
. . note : :
The response is an optional argument , and will only be
passed if the websocket was not active ( i . e . there was an
error ) .
. . code - block : : python
def index ( ) :
@ after _ this _ websocket
def set _ cookie ( response : Optional [ Response ] ) :
response . set _ cookie ( ' special ' , ' value ' )
return response""" | _websocket_ctx_stack . top . _after_websocket_functions . append ( func )
return func |
def reset_alarm_ranges ( self , parameter ) :
"""Reset all alarm limits for the specified parameter to their original MDB value .""" | req = mdb_pb2 . ChangeParameterRequest ( )
req . action = mdb_pb2 . ChangeParameterRequest . RESET_ALARMS
url = '/mdb/{}/{}/parameters/{}' . format ( self . _instance , self . _processor , parameter )
response = self . _client . post_proto ( url , data = req . SerializeToString ( ) ) |
def define ( self , key , value ) :
"""Defines the value for the inputted key by setting both its default and value to the inputted value .
: param key | < str >
value | < variant >""" | skey = nstr ( key )
self . _defaults [ skey ] = value
self [ skey ] = value |
def set_blend_equation ( self , mode_rgb , mode_alpha = None ) :
"""Specify the equation for RGB and alpha blending
Parameters
mode _ rgb : str
Mode for RGB .
mode _ alpha : str | None
Mode for Alpha . If None , ` ` mode _ rgb ` ` is used .
Notes
See ` ` set _ blend _ equation ` ` for valid modes .""" | mode_alpha = mode_rgb if mode_alpha is None else mode_alpha
self . glir . command ( 'FUNC' , 'glBlendEquationSeparate' , mode_rgb , mode_alpha ) |
def post_question_answer ( self , number : str , description : str , file_path : str ) -> bool :
"""上傳特定題目的作業""" | try : # 操作所需資訊
params = { 'hwId' : number }
data = { 'FileDesc' : description }
files = { 'hwFile' : open ( file_path , 'rb' ) }
# 上傳作業
self . __session . get ( self . __url + '/upLoadHw' , params = params , timeout = 0.5 , verify = False )
response = self . __session . post ( self . __url + '/upLoadFile' , data = data , files = files , timeout = 0.5 )
soup = BeautifulSoup ( response . text , 'html.parser' )
# 回傳結果
return soup . find ( 'body' ) . get_text ( ) . strip ( ) != '您沒有上傳檔案 請重新操作'
except requests . exceptions . Timeout :
return False |
def get_complexes ( self ) :
"""Extract INDRA Complex Statements from the BioPAX model .
This method searches for org . biopax . paxtools . model . level3 . Complex
objects which represent molecular complexes . It doesn ' t reuse
BioPAX Pattern ' s org . biopax . paxtools . pattern . PatternBox . inComplexWith
query since that retrieves pairs of complex members rather than
the full complex .""" | for obj in self . model . getObjects ( ) . toArray ( ) :
bpe = _cast_biopax_element ( obj )
if not _is_complex ( bpe ) :
continue
ev = self . _get_evidence ( bpe )
members = self . _get_complex_members ( bpe )
if members is not None :
if len ( members ) > 10 :
logger . debug ( 'Skipping complex with more than 10 members.' )
continue
complexes = _get_combinations ( members )
for c in complexes :
self . statements . append ( decode_obj ( Complex ( c , ev ) , encoding = 'utf-8' ) ) |
def make_locations ( locations = None , verbose = True ) :
'''Creates folders
: param locations :
A list of folders to create ( can be a dictionary , see note below )
: param verbose :
Warn if any folders were created
. . note : :
* | params _ locations _ dict |
* | param _ locations _ none |''' | from photon . util . structures import to_list
from photon . util . system import shell_notify
if not locations :
locations = get_locations ( ) . values ( )
locations = to_list ( locations )
r = list ( )
for p in reversed ( sorted ( locations ) ) :
if not _path . exists ( p ) :
_makedirs ( p )
r . append ( p )
if verbose and r :
shell_notify ( 'path created' , state = None , more = r )
return r |
def __get_segments_from_node ( node , graph ) :
"""Calculates the segments that can emanate from a particular node on the main cycle .""" | list_of_segments = [ ]
node_object = graph . get_node ( node )
for e in node_object [ 'edges' ] :
list_of_segments . append ( e )
return list_of_segments |
def discover ( source ) :
"Given a JavaScript file , find the sourceMappingURL line" | source = source . splitlines ( )
# Source maps are only going to exist at either the top or bottom of the document .
# Technically , there isn ' t anything indicating * where * it should exist , so we
# are generous and assume it ' s somewhere either in the first or last 5 lines .
# If it ' s somewhere else in the document , you ' re probably doing it wrong .
if len ( source ) > 10 :
possibilities = source [ : 5 ] + source [ - 5 : ]
else :
possibilities = source
for line in set ( possibilities ) :
pragma = line [ : 21 ]
if pragma == '//# sourceMappingURL=' or pragma == '//@ sourceMappingURL=' : # We want everything AFTER the pragma , which is 21 chars long
return line [ 21 : ] . rstrip ( )
# XXX : Return None or raise an exception ?
return None |
def add_bookmark ( request ) :
"""This view serves and validates a bookmark form .
If requested via ajax it also returns the drop bookmark form to replace the
add bookmark form .""" | if request . method == "POST" :
form = BookmarkForm ( user = request . user , data = request . POST )
if form . is_valid ( ) :
bookmark = form . save ( )
if not request . is_ajax ( ) :
messages . success ( request , 'Bookmark added' )
if request . POST . get ( 'next' ) :
return HttpResponseRedirect ( request . POST . get ( 'next' ) )
return HttpResponse ( 'Added' )
return render_to_response ( 'admin_tools/menu/remove_bookmark_form.html' , { 'bookmark' : bookmark , 'url' : bookmark . url } )
else :
form = BookmarkForm ( user = request . user )
return render_to_response ( 'admin_tools/menu/form.html' , { 'form' : form , 'title' : 'Add Bookmark' } ) |
def main ( ) :
"""MAIN""" | config = { "api" : { "services" : [ { "name" : "my_api" , "testkey" : "testval" , } , ] , "calls" : { "hello_world" : { "delay" : 5 , "priority" : 1 , "arguments" : None , } , "marco" : { "delay" : 1 , "priority" : 1 , } , "pollo" : { "delay" : 1 , "priority" : 1 , } , } } }
app = AppBuilder ( [ MyAPI ] , Strategy ( Print ( ) ) , AppConf ( config ) )
app . run ( ) |
def stop ( self ) :
"""Stop this WriterProcessBase , and reset the cursor .""" | self . stop_flag . value = True
with self . lock :
( Control ( ) . text ( C ( ' ' , style = 'reset_all' ) ) . pos_restore ( ) . move_column ( 1 ) . erase_line ( ) . write ( self . file ) ) |
def send_mail ( self , subject , to , template , ** template_ctx ) :
"""Utility method to send mail with the ` mail ` template context .""" | if not self . mail :
from warnings import warn
warn ( 'Attempting to send mail without the mail bundle installed! ' 'Please install it, or fix your configuration.' )
return
self . mail . send ( subject , to , template , ** dict ( ** self . security . run_ctx_processor ( 'mail' ) , ** template_ctx ) ) |
def prop_or ( default , key , dct_or_obj ) :
"""Ramda propOr implementation . This also resolves object attributes , so key
can be a dict prop or an attribute of dct _ or _ obj
: param default : Value if dct _ or _ obj doesn ' t have key _ or _ prop or the resolved value is null
: param key :
: param dct _ or _ obj :
: return :""" | # Note that hasattr is a builtin and getattr is a ramda function , hence the different arg position
if isinstance ( dict , dct_or_obj ) :
value = dct_or_obj [ key ] if has ( key , dct_or_obj ) else default
elif isinstance ( object , dct_or_obj ) :
value = getattr ( key , dct_or_obj ) if hasattr ( dct_or_obj , key ) else default
else :
value = default
# 0 and False are ok , None defaults
if value == None :
return default
return value |
def parse_authn_request ( self , enc_request , binding = BINDING_HTTP_REDIRECT ) :
"""Parse a Authentication Request
: param enc _ request : The request in its transport format
: param binding : Which binding that was used to transport the message
to this entity .
: return : A request instance""" | return self . _parse_request ( enc_request , AuthnRequest , "single_sign_on_service" , binding ) |
def bills ( self , member_id , type = 'introduced' ) :
"Same as BillsClient . by _ member" | path = "members/{0}/bills/{1}.json" . format ( member_id , type )
return self . fetch ( path ) |
def on_message ( self , con , event ) :
"""Handles messge stanzas""" | msg_type = event . getType ( )
nick = event . getFrom ( ) . getResource ( )
from_jid = event . getFrom ( ) . getStripped ( )
body = event . getBody ( )
if msg_type == 'chat' and body is None :
return
logger . debug ( 'msg_type[%s] from[%s] nick[%s] body[%s]' % ( msg_type , from_jid , nick , body , ) )
sender = filter ( lambda m : m [ 'JID' ] == from_jid , self . params [ 'MEMBERS' ] )
should_process = msg_type in [ 'message' , 'chat' , None ] and body is not None and len ( sender ) == 1
if not should_process :
return
sender = sender [ 0 ]
try :
for p in self . command_patterns :
reg , cmd = p
m = reg . match ( body )
if m :
logger . info ( 'pattern matched for bot command \'%s\'' % ( cmd , ) )
function = getattr ( self , str ( cmd ) , None )
if function :
return function ( sender , body , m )
words = body . split ( ' ' )
cmd , args = words [ 0 ] , words [ 1 : ]
if cmd and cmd [ 0 ] == '/' :
cmd = cmd [ 1 : ]
command_handler = getattr ( self , 'do_' + cmd , None )
if command_handler :
return command_handler ( sender , body , args )
broadcast_body = '[%s] %s' % ( sender [ 'NICK' ] , body , )
return self . broadcast ( broadcast_body , exclude = ( sender , ) )
except :
logger . exception ( 'Error handling message [%s] from [%s]' % ( body , sender [ 'JID' ] ) ) |
def from_dataset ( cls , * args , ** kwargs ) :
"""Create an InteractiveList instance from the given base dataset
Parameters
% ( ArrayList . from _ dataset . parameters . no _ plotter ) s
plotter : psyplot . plotter . Plotter
The plotter instance that is used to visualize the data in this
list
make _ plot : bool
If True , the plot is made
Other Parameters
% ( ArrayList . from _ dataset . other _ parameters . no _ args _ kwargs ) s
` ` * * kwargs ` `
Further keyword arguments may point to any of the dimensions of the
data ( see ` dims ` )
Returns
% ( ArrayList . from _ dataset . returns ) s""" | plotter = kwargs . pop ( 'plotter' , None )
make_plot = kwargs . pop ( 'make_plot' , True )
instance = super ( InteractiveList , cls ) . from_dataset ( * args , ** kwargs )
if plotter is not None :
plotter . initialize_plot ( instance , make_plot = make_plot )
return instance |
async def tag ( self , name : str , repo : str , * , tag : str = None ) -> bool :
"""Tag the given image so that it becomes part of a repository .
Args :
repo : the repository to tag in
tag : the name for the new tag""" | params = { "repo" : repo }
if tag :
params [ "tag" ] = tag
await self . docker . _query ( "images/{name}/tag" . format ( name = name ) , "POST" , params = params , headers = { "content-type" : "application/json" } , )
return True |
def page_crawled ( self , page_resp ) :
"""Check if page has been crawled by hashing its text content .
Add new pages to the page cache .
Return whether page was found in cache .""" | page_text = utils . parse_text ( page_resp )
page_hash = utils . hash_text ( '' . join ( page_text ) )
if page_hash not in self . page_cache :
utils . cache_page ( self . page_cache , page_hash , self . args [ 'cache_size' ] )
return False
return True |
def save_to_txt ( self , path = './' , name = None , include = [ 'Mod' , 'Cls' , 'Exp' , 'Name' ] , fmt = '%.18e' , delimiter = ' ' , footer = '' , encoding = None , verb = True , return_pfe = False ) :
"""Save the basic geometrical attributes only ( polygon and pos / extent )
The attributes are saved to a txt file with chosen encoding
Usefu for easily sharing input with non - python users
BEWARE : doesn ' t save all attributes ! ! !
Only saves the basic geometrical inputs ! ! !
Not equivalent to full tofu save ( using self . save ( ) ) ! ! !
The saving convention is :
* data is saved on 2 columns
* The first line gives 2 numbers : nP , no
- nP = Number of points in the polygon
( i . e . : the number of following lines describing the polygon )
- no = Number of occurences ( toroidal if in toroidal geometry )
( i . e . : the nb . of pos / extent lines after the first nP lines )
* Hence , the data is a 2D array of shape ( 1 + nP + no , 2)
* The two columns of the nP lines describing the polygon represent :
- 1st : R ( resp . Y ) coordinate of polygon points
- 2nd : Z ( resp . Z ) coordinate of polygon points
* The two columns of the no lines representing the occurences are :
- 1st : pos , the tor . angle ( resp . X ) center of occurences
- 2nd : extent , the tor . angle ( resp . X ) extension of occurences
Hence , the polygon and pos / extent of the object can be retrieved with :
> > > import numpy as np
> > > out = np . loadtxt ( filename )
> > > nP , no = out [ 0 , : ]
> > > poly = out [ 1:1 + nP , : ]
> > > pos , extent = out [ 1 + nP : , 0 ] , out [ 1 + nP : , 1]
All parameters apart from path , name and include are fed to numpy . savetxt ( )
Parameters
path : None / str
The path where to save the file
If None - > self . Id . SavePath
name : None / str
The name to use for the saved file
If None - > self . Id . SaveName ( include )
include : list
List of attributes of to be used to built the default saving name
Fed to tf . utils . ID . generate _ SaveName ( )
Recommended : [ ' Mod ' , ' Cls ' , ' Exp ' , ' Name ' ]""" | if name is None :
name = self . Id . generate_SaveName ( include )
if path is None :
path = self . Id . SavePath
path = os . path . abspath ( path )
pfe = os . path . join ( path , name + '.txt' )
nPno = np . r_ [ self . Poly . shape [ 1 ] , self . noccur ]
poly = self . Poly . T
posext = np . vstack ( ( self . pos , self . extent ) ) . T
out = np . vstack ( ( nPno , poly , posext ) )
# default standards
newline = '\n'
comments = '#'
header = ' Cls = %s\n Exp = %s\n Name = %s' % ( self . __class__ . __name__ , self . Id . Exp , self . Id . Name )
kwds = dict ( fmt = fmt , delimiter = delimiter , newline = newline , header = header , footer = footer , comments = comments )
if 'encoding' in inspect . signature ( np . savetxt ) . parameters :
kwds [ 'encoding' ] = encoding
np . savetxt ( pfe , out , ** kwds )
if verb :
print ( "save_to_txt in:\n" , pfe )
if return_pfe :
return pfe |
def tokenize_string ( source ) :
"""Tokenize a Python source code string .
Parameters
source : str
A Python source code string""" | line_reader = StringIO ( source ) . readline
token_generator = tokenize . generate_tokens ( line_reader )
# Loop over all tokens till a backtick ( ` ) is found .
# Then , take all tokens till the next backtick to form a backtick quoted
# string .
for toknum , tokval , _ , _ , _ in token_generator :
if tokval == '`' :
tokval = " " . join ( it . takewhile ( lambda tokval : tokval != '`' , map ( operator . itemgetter ( 1 ) , token_generator ) ) )
toknum = _BACKTICK_QUOTED_STRING
yield toknum , tokval |
def bvlpdu_contents ( self , use_dict = None , as_class = dict ) :
"""Return the contents of an object as a dict .""" | # make / extend the dictionary of content
if use_dict is None :
use_dict = as_class ( )
# call the normal procedure
key_value_contents ( use_dict = use_dict , as_class = as_class , key_values = ( ( 'function' , 'DistributeBroadcastToNetwork' ) , ) )
# this message has data
PDUData . dict_contents ( self , use_dict = use_dict , as_class = as_class )
# return what we built / updated
return use_dict |
def compilable_sources ( self , sourcedir , absolute = False , recursive = True , excludes = [ ] ) :
"""Find all scss sources that should be compiled , aka all sources that
are not " partials " Sass sources .
Args :
sourcedir ( str ) : Directory path to scan .
Keyword Arguments :
absolute ( bool ) : Returned paths will be absolute using
` ` sourcedir ` ` argument ( if True ) , else return relative paths .
recursive ( bool ) : Switch to enabled recursive finding ( if True ) .
Default to True .
excludes ( list ) : A list of excluding patterns ( glob patterns ) .
Patterns are matched against the relative filepath ( from its
sourcedir ) .
Returns :
list : List of source paths .""" | filepaths = [ ]
for root , dirs , files in os . walk ( sourcedir ) : # Sort structure to avoid arbitrary order
dirs . sort ( )
files . sort ( )
for item in files : # Store relative directory but drop it if at root ( ' . ' )
relative_dir = os . path . relpath ( root , sourcedir )
if relative_dir == '.' :
relative_dir = ''
# Matching all conditions
absolute_filepath = os . path . join ( root , item )
conditions = { 'sourcedir' : sourcedir , 'nopartial' : True , 'exclude_patterns' : excludes , 'excluded_libdirs' : [ ] , }
if self . match_conditions ( absolute_filepath , ** conditions ) :
relative_filepath = os . path . join ( relative_dir , item )
if absolute :
filepath = absolute_filepath
else :
filepath = relative_filepath
filepaths . append ( filepath )
# For non recursive usage , break from the first entry
if not recursive :
break
return filepaths |
def config_amend_key_ ( self , key , value ) :
"""This will take a stringified key representation and value and
load it into the configuration file for furthur usage . The good
part about this method is that it doesn ' t clobber , only appends
when keys are missing .""" | cfg_i = self . _cfg
keys = key . split ( '.' )
last_key = keys . pop ( )
trail = [ ]
for e in keys :
cfg_i . setdefault ( e , { } )
cfg_i = cfg_i [ e ]
trail . append ( e )
if not isinstance ( cfg_i , dict ) :
raise Exception ( '.' . join ( trail ) + ' has conflicting dict/scalar types!' )
cfg_i . setdefault ( last_key , value ) |
def _payload ( self , * args , ** kwargs ) :
'''Find all unmanaged files . Returns maximum 1000 values .
Parameters :
* * * filter * * : Include only results which path starts from the filter string .
* * * time * * : Display time in Unix ticks or format according to the configured TZ ( default )
Values : ticks , tz ( default )
* * * size * * : Format size . Values : B , KB , MB , GB
* * * owners * * : Resolve UID / GID to an actual names or leave them numeric ( default ) .
Values : name ( default ) , id
* * * type * * : Comma - separated type of included payload : dir ( or directory ) , link and / or file .
* * * brief * * : Return just a list of matches , if True . Default : False
* * * offset * * : Offset of the files
* * * max * * : Maximum returned values . Default 1000.
Options :
* * * total * * : Return a total amount of found payload files''' | def _size_format ( size , fmt ) :
if fmt is None :
return size
fmt = fmt . lower ( )
if fmt == "b" :
return "{0} Bytes" . format ( size )
elif fmt == "kb" :
return "{0} Kb" . format ( round ( ( float ( size ) / 0x400 ) , 2 ) )
elif fmt == "mb" :
return "{0} Mb" . format ( round ( ( float ( size ) / 0x400 / 0x400 ) , 2 ) )
elif fmt == "gb" :
return "{0} Gb" . format ( round ( ( float ( size ) / 0x400 / 0x400 / 0x400 ) , 2 ) )
filter = kwargs . get ( 'filter' )
offset = kwargs . get ( 'offset' , 0 )
timeformat = kwargs . get ( "time" , "tz" )
if timeformat not in [ "ticks" , "tz" ] :
raise InspectorQueryException ( 'Unknown "{0}" value for parameter "time"' . format ( timeformat ) )
tfmt = lambda param : timeformat == "tz" and time . strftime ( "%b %d %Y %H:%M:%S" , time . gmtime ( param ) ) or int ( param )
size_fmt = kwargs . get ( "size" )
if size_fmt is not None and size_fmt . lower ( ) not in [ "b" , "kb" , "mb" , "gb" ] :
raise InspectorQueryException ( 'Unknown "{0}" value for parameter "size". ' 'Should be either B, Kb, Mb or Gb' . format ( timeformat ) )
owners = kwargs . get ( "owners" , "id" )
if owners not in [ "name" , "id" ] :
raise InspectorQueryException ( 'Unknown "{0}" value for parameter "owners". ' 'Should be either name or id (default)' . format ( owners ) )
incl_type = [ prm for prm in kwargs . get ( "type" , "" ) . lower ( ) . split ( "," ) if prm ]
if not incl_type :
incl_type . append ( "file" )
for i_type in incl_type :
if i_type not in [ "directory" , "dir" , "d" , "file" , "f" , "link" , "l" ] :
raise InspectorQueryException ( 'Unknown "{0}" values for parameter "type". ' 'Should be comma separated one or more of ' 'dir, file and/or link.' . format ( ", " . join ( incl_type ) ) )
self . db . open ( )
if "total" in args :
return { 'total' : len ( self . db . get ( PayloadFile ) ) }
brief = kwargs . get ( "brief" )
pld_files = list ( ) if brief else dict ( )
for pld_data in self . db . get ( PayloadFile ) [ offset : offset + kwargs . get ( 'max' , 1000 ) ] :
if brief :
pld_files . append ( pld_data . path )
else :
pld_files [ pld_data . path ] = { 'uid' : self . _id_resolv ( pld_data . uid , named = ( owners == "id" ) ) , 'gid' : self . _id_resolv ( pld_data . gid , named = ( owners == "id" ) , uid = False ) , 'size' : _size_format ( pld_data . p_size , fmt = size_fmt ) , 'mode' : oct ( pld_data . mode ) , 'accessed' : tfmt ( pld_data . atime ) , 'modified' : tfmt ( pld_data . mtime ) , 'created' : tfmt ( pld_data . ctime ) , }
return pld_files |
def generate ( env ) :
"""Add Builders and construction variables for the OS / 2 to an Environment .""" | cc . generate ( env )
env [ 'CC' ] = 'icc'
env [ 'CCCOM' ] = '$CC $CFLAGS $CCFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c $SOURCES /Fo$TARGET'
env [ 'CXXCOM' ] = '$CXX $CXXFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c $SOURCES /Fo$TARGET'
env [ 'CPPDEFPREFIX' ] = '/D'
env [ 'CPPDEFSUFFIX' ] = ''
env [ 'INCPREFIX' ] = '/I'
env [ 'INCSUFFIX' ] = ''
env [ 'CFILESUFFIX' ] = '.c'
env [ 'CXXFILESUFFIX' ] = '.cc' |
def rename ( self , file_id , new_filename , session = None ) :
"""Renames the stored file with the specified file _ id .
For example : :
my _ db = MongoClient ( ) . test
fs = GridFSBucket ( my _ db )
# Get _ id of file to rename
file _ id = fs . upload _ from _ stream ( " test _ file " , " data I want to store ! " )
fs . rename ( file _ id , " new _ test _ name " )
Raises : exc : ` ~ gridfs . errors . NoFile ` if no file with file _ id exists .
: Parameters :
- ` file _ id ` : The _ id of the file to be renamed .
- ` new _ filename ` : The new name of the file .
- ` session ` ( optional ) : a
: class : ` ~ pymongo . client _ session . ClientSession `
. . versionchanged : : 3.6
Added ` ` session ` ` parameter .""" | result = self . _files . update_one ( { "_id" : file_id } , { "$set" : { "filename" : new_filename } } , session = session )
if not result . matched_count :
raise NoFile ( "no files could be renamed %r because none " "matched file_id %i" % ( new_filename , file_id ) ) |
def get_logger ( context = None , name = None ) :
"""Return a logger for * context * .
Return a : class : ` ContextLogger ` instance . The instance implements the
standard library ' s : class : ` logging . Logger ` interface .""" | # Many class instances have their own logger . Share them to save memory if
# possible , i . e . when * context * is not set .
if name is None :
name = _logger_name
if context is None and name in _logger_dict :
return _logger_dict [ name ]
if context is not None and not isinstance ( context , six . string_types ) :
context = util . objref ( context )
logger = logging . getLogger ( name )
logger = ContextLogger ( logger , context )
if context is None :
_logger_dict [ name ] = logger
return logger |
def _addFlushBatch ( self ) :
"""Sends all waiting documents to Solr""" | if len ( self . _add_batch ) > 0 :
language_batches = { }
# Create command JSONs for each of language endpoints
for lang in self . endpoints : # Append documents with languages without endpoint to default endpoint
document_jsons = [ "\"add\":" + json . dumps ( data ) for data in self . _add_batch if data [ 'doc' ] . get ( "language" , self . default_endpoint ) == lang or ( lang == self . default_endpoint and not self . endpoints . has_key ( data [ 'doc' ] . get ( "language" , None ) ) ) ]
command_json = "{" + "," . join ( document_jsons ) + "}"
language_batches [ lang ] = command_json
# Solr requires for documents to be sent in { " add " : { " doc " : { . . . } } , " add " : { " doc " : { . . . } , . . . }
# format which isn ' t possible with python dictionaries
for lang in language_batches :
self . _send_solr_command ( self . endpoints [ lang ] , language_batches [ lang ] )
self . _add_batch = [ ] |
def heartbeat_tick ( self , rate = 2 ) :
"""Send heartbeat packets , if necessary , and fail if none have been
received recently . This should be called frequently , on the order of
once per second .
: keyword rate : Ignored""" | if not self . heartbeat :
return
# treat actual data exchange in either direction as a heartbeat
sent_now = self . method_writer . bytes_sent
recv_now = self . method_reader . bytes_recv
if self . prev_sent is None or self . prev_sent != sent_now :
self . last_heartbeat_sent = monotonic ( )
if self . prev_recv is None or self . prev_recv != recv_now :
self . last_heartbeat_received = monotonic ( )
self . prev_sent , self . prev_recv = sent_now , recv_now
# send a heartbeat if it ' s time to do so
if monotonic ( ) > self . last_heartbeat_sent + self . heartbeat :
self . send_heartbeat ( )
self . last_heartbeat_sent = monotonic ( )
# if we ' ve missed two intervals ' heartbeats , fail ; this gives the
# server enough time to send heartbeats a little late
if ( self . last_heartbeat_received and self . last_heartbeat_received + 2 * self . heartbeat < monotonic ( ) ) :
raise ConnectionForced ( 'Too many heartbeats missed' ) |
def addService ( self , service , name = None , description = None , authenticator = None , expose_request = None , preprocessor = None ) :
"""Adds a service to the gateway .
@ param service : The service to add to the gateway .
@ type service : C { callable } , class instance , or a module
@ param name : The name of the service .
@ type name : C { str }
@ raise pyamf . remoting . RemotingError : Service already exists .
@ raise TypeError : C { service } cannot be a scalar value .
@ raise TypeError : C { service } must be C { callable } or a module .""" | if isinstance ( service , ( int , long , float , basestring ) ) :
raise TypeError ( "Service cannot be a scalar value" )
allowed_types = ( types . ModuleType , types . FunctionType , types . DictType , types . MethodType , types . InstanceType , types . ObjectType )
if not python . callable ( service ) and not isinstance ( service , allowed_types ) :
raise TypeError ( "Service must be a callable, module, or an object" )
if name is None : # TODO : include the module in the name
if isinstance ( service , ( type , types . ClassType ) ) :
name = service . __name__
elif isinstance ( service , types . FunctionType ) :
name = service . func_name
elif isinstance ( service , types . ModuleType ) :
name = service . __name__
else :
name = str ( service )
if name in self . services :
raise remoting . RemotingError ( "Service %s already exists" % name )
self . services [ name ] = ServiceWrapper ( service , description , authenticator , expose_request , preprocessor ) |
def cache_page ( ** kwargs ) :
"""This decorator is similar to ` django . views . decorators . cache . cache _ page `""" | cache_timeout = kwargs . pop ( 'cache_timeout' , None )
key_prefix = kwargs . pop ( 'key_prefix' , None )
cache_min_age = kwargs . pop ( 'cache_min_age' , None )
decorator = decorators . decorator_from_middleware_with_args ( CacheMiddleware ) ( cache_timeout = cache_timeout , key_prefix = key_prefix , cache_min_age = cache_min_age , ** kwargs )
return decorator |
def translated ( self , * language_codes , ** translated_fields ) :
"""Only return translated objects which of the given languages .
When no language codes are given , only the currently active language is returned .
. . note : :
Due to Django ` ORM limitations < https : / / docs . djangoproject . com / en / dev / topics / db / queries / # spanning - multi - valued - relationships > ` _ ,
this method can ' t be combined with other filters that access the translated fields . As such , query the fields in one filter :
. . code - block : : python
qs . translated ( ' en ' , name = " Cheese Omelette " )
This will query the translated model for the ` ` name ` ` field .""" | relname = self . model . _parler_meta . root_rel_name
if not language_codes :
language_codes = ( get_language ( ) , )
filters = { }
for field_name , val in six . iteritems ( translated_fields ) :
if field_name . startswith ( 'master__' ) :
filters [ field_name [ 8 : ] ] = val
# avoid translations _ _ master _ _ back and forth
else :
filters [ "{0}__{1}" . format ( relname , field_name ) ] = val
if len ( language_codes ) == 1 :
filters [ relname + '__language_code' ] = language_codes [ 0 ]
return self . filter ( ** filters )
else :
filters [ relname + '__language_code__in' ] = language_codes
return self . filter ( ** filters ) . distinct ( ) |
def delete_action ( self , action , player_idx = 0 ) :
"""Return a new ` Player ` instance with the action ( s ) specified by
` action ` deleted from the action set of the player specified by
` player _ idx ` . Deletion is not performed in place .
Parameters
action : scalar ( int ) or array _ like ( int )
Integer or array like of integers representing the action ( s )
to be deleted .
player _ idx : scalar ( int ) , optional ( default = 0)
Index of the player to delete action ( s ) for .
Returns
Player
Copy of ` self ` with the action ( s ) deleted as specified .
Examples
> > > player = Player ( [ [ 3 , 0 ] , [ 0 , 3 ] , [ 1 , 1 ] ] )
> > > player
Player ( [ [ 3 , 0 ] ,
[0 , 3 ] ,
[1 , 1 ] ] )
> > > player . delete _ action ( 2)
Player ( [ [ 3 , 0 ] ,
[0 , 3 ] ] )
> > > player . delete _ action ( 0 , player _ idx = 1)
Player ( [ [ 0 ] ,""" | payoff_array_new = np . delete ( self . payoff_array , action , player_idx )
return Player ( payoff_array_new ) |
def routeCoverage ( self , msisdn ) :
"""If the route coverage lookup encounters an error , we will treat it as " not covered " .""" | try :
content = self . parseLegacy ( self . request ( 'utils/routeCoverage' , { 'msisdn' : msisdn } ) )
return { 'routable' : True , 'destination' : msisdn , 'charge' : float ( content [ 'Charge' ] ) }
except Exception : # If we encounter any error , we will treat it like it ' s " not covered "
# TODO perhaps catch different types of exceptions so we can isolate certain global exceptions
# like authentication
return { 'routable' : False , 'destination' : msisdn , 'charge' : 0 } |
def handle ( self , * args , ** options ) :
"""Sets options common to all commands .
Any command subclassing this object should implement its own
handle method , as is standard in Django , and run this method
via a super call to inherit its functionality .""" | # Create a data directory
self . data_dir = os . path . join ( settings . BASE_DIR , 'data' )
if not os . path . exists ( self . data_dir ) :
os . makedirs ( self . data_dir )
# Start the clock
self . start_datetime = datetime . now ( ) |
def check_dependee_exists ( self , depender , dependee , dependee_id ) :
"""Checks whether a depended - on module is available .""" | shutit_global . shutit_global_object . yield_to_draw ( )
# If the module id isn ' t there , there ' s a problem .
if dependee is None :
return 'module: \n\n' + dependee_id + '\n\nnot found in paths: ' + str ( self . host [ 'shutit_module_path' ] ) + ' but needed for ' + depender . module_id + '\nCheck your --shutit_module_path setting and ensure that all modules configured to be built are in that path setting, eg "--shutit_module_path /path/to/other/module/:."\n\nAlso check that the module is configured to be built with the correct module id in that module\'s configs/build.cnf file.\n\nSee also help.'
return '' |
async def nearest_by_coordinates ( self , latitude : float , longitude : float ) -> dict :
"""Get the nearest report ( with local and state info ) to a lat / lon .""" | # Since user data is more granular than state or CDC data , find the
# user report whose coordinates are closest to the provided
# coordinates :
nearest_user_report = get_nearest_by_coordinates ( await self . user_reports ( ) , 'latitude' , 'longitude' , latitude , longitude )
try : # If the user report corresponds to a known state in
# flunearyou . org ' s database , we can safely assume that ' s the
# correct state :
nearest_state = next ( ( state for state in await self . state_data ( ) if state [ 'place_id' ] == nearest_user_report [ 'contained_by' ] ) )
except StopIteration : # If a place ID doesn ' t exist ( e . g . , ZIP Code 98012 doesn ' t have a
# place ID ) , calculate the nearest state by measuring the distance
# from the provided latitude / longitude to flunearyou . org ' s
# latitude / longitude that defines each state :
nearest_state = get_nearest_by_coordinates ( await self . state_data ( ) , 'lat' , 'lon' , latitude , longitude )
return { 'local' : nearest_user_report , 'state' : nearest_state } |
def getPhotos ( self , tags = '' , per_page = '' , page = '' ) :
"""Get a list of photo objects for this group""" | method = 'flickr.groups.pools.getPhotos'
data = _doget ( method , group_id = self . id , tags = tags , per_page = per_page , page = page )
photos = [ ]
for photo in data . rsp . photos . photo :
photos . append ( _parse_photo ( photo ) )
return photos |
def generate ( self , output_path = None , in_memory = False ) :
"""Executes the Statik project generator .
Args :
output _ path : The path to which to write output files .
in _ memory : Whether or not to generate the results in memory . If True , this will
generate the output result as a dictionary . If False , this will write the output
to files in the output _ path .
Returns :
If in _ memory is True , this returns a dictionary containing the actual generated static
content . If in _ memory is False , this returns an integer indicating the number of files
generated in the output path .""" | result = dict ( ) if in_memory else 0
logger . info ( "Generating Statik build..." )
try :
if output_path is None and not in_memory :
raise InternalError ( "If project is not to be generated in-memory, an output path must be specified" )
self . error_context . update ( filename = self . config_file_path )
self . config = self . config or StatikConfig ( self . config_file_path )
if self . config . encoding is not None :
logger . debug ( "Using encoding: %s" , self . config . encoding )
else :
logger . debug ( "Using encoding: %s" , self . config . encoding )
self . error_context . clear ( )
self . models = self . load_models ( )
self . template_engine = StatikTemplateEngine ( self )
if self . config . external_database is not None :
self . config . external_database . write_files ( output_path , self . models )
self . views = self . load_views ( )
if not self . views :
raise NoViewsError ( )
self . db = self . load_db_data ( self . models )
self . project_context = self . load_project_context ( )
in_memory_result = self . process_views ( )
if in_memory :
result = in_memory_result
else : # dump the in - memory output to files
file_count = self . dump_in_memory_result ( in_memory_result , output_path )
logger . info ( 'Wrote %d output file(s) to folder: %s' , file_count , output_path )
# copy any assets across , recursively
self . copy_assets ( output_path )
result = file_count
logger . info ( "Success!" )
except StatikError as exc :
logger . debug ( traceback . format_exc ( ) )
logger . error ( exc . render ( ) )
# re - raise the error to stop execution
raise exc
except Exception as exc :
logger . debug ( traceback . format_exc ( ) )
_exc = StatikError ( message = "Failed to build project. Run Statik in verbose mode (-v) to see " + "additional traceback information about this error." , orig_exc = exc , context = self . error_context )
logger . error ( _exc . render ( ) )
raise _exc
finally :
try : # make sure to destroy the database engine ( to provide for the possibility of
# database engine reloads when watching for changes )
if self . db is not None :
self . db . shutdown ( )
except Exception as e :
logger . exception ( "Unable to clean up properly: %s" , e )
return result |
def _obtem_doctype ( self , xml ) :
"""Obtém DOCTYPE do XML
Determina o tipo de arquivo que vai ser assinado , procurando pela tag
correspondente .""" | doctype = None
# XML da NF - e
if '</NFe>' in xml :
doctype = '<!DOCTYPE NFe [<!ATTLIST infNFe Id ID #IMPLIED>]>'
elif '</cancNFe>' in xml :
doctype = '<!DOCTYPE cancNFe [<!ATTLIST infCanc Id ID #IMPLIED>]>'
elif '</inutNFe>' in xml :
doctype = '<!DOCTYPE inutNFe [<!ATTLIST infInut Id ID #IMPLIED>]>'
elif '</infEvento>' in xml :
doctype = '<!DOCTYPE evento [<!ATTLIST infEvento Id ID #IMPLIED>]>'
# XML do CT - e
elif '</CTe>' in xml :
doctype = '<!DOCTYPE CTe [<!ATTLIST infCte Id ID #IMPLIED>]>'
elif '</cancCTe>' in xml :
doctype = '<!DOCTYPE cancCTe [<!ATTLIST infCanc Id ID #IMPLIED>]>'
elif '</inutCTe>' in xml :
doctype = '<!DOCTYPE inutCTe [<!ATTLIST infInut Id ID #IMPLIED>]>'
# elif ' infEvento ' in xml :
# doctype = ' < ! DOCTYPE evento [ < ! ATTLIST infEvento Id ID # IMPLIED > ] > '
# XML do MDF - e
elif '</MDFe>' in xml :
doctype = '<!DOCTYPE MDFe [<!ATTLIST infNFe Id ID #IMPLIED>]>'
# XML da NFS - e
elif 'ReqEnvioLoteRPS' in xml :
doctype = '<!DOCTYPE Lote [<!ATTLIST Lote Id ID #IMPLIED>]>'
elif 'EnviarLoteRpsEnvio' in xml :
doctype = '<!DOCTYPE EnviarLoteRpsEnvio>'
elif 'CancelarNfseEnvio' in xml :
doctype = '<!DOCTYPE CancelarNfseEnvio>'
# Esocial
elif '</evtInfoEmpregador>' in xml :
doctype = '<!DOCTYPE eSocial [<!ATTLIST evtInfoEmpregador Id ID #IMPLIED>]>'
# EFD / Reinf
elif '</evtInfoContri>' in xml :
doctype = '<!DOCTYPE Reinf [<!ATTLIST evtInfoContri Id ID #IMPLIED>]>'
elif '</evtServTom>' in xml :
doctype = '<!DOCTYPE Reinf [<!ATTLIST evtServTom Id ID #IMPLIED>]>'
elif '</evtFechaEvPer>' in xml :
doctype = '<!DOCTYPE Reinf [<!ATTLIST evtFechaEvPer Id ID #IMPLIED>]>'
else :
raise ValueError ( 'Tipo de arquivo desconhecido para assinatura/validacao' )
return doctype |
def libvlc_media_player_set_rate ( p_mi , rate ) :
'''Set movie play rate .
@ param p _ mi : the Media Player .
@ param rate : movie play rate to set .
@ return : - 1 if an error was detected , 0 otherwise ( but even then , it might not actually work depending on the underlying media protocol ) .''' | f = _Cfunctions . get ( 'libvlc_media_player_set_rate' , None ) or _Cfunction ( 'libvlc_media_player_set_rate' , ( ( 1 , ) , ( 1 , ) , ) , None , ctypes . c_int , MediaPlayer , ctypes . c_float )
return f ( p_mi , rate ) |
def create_radius_stops ( breaks , min_radius , max_radius ) :
"""Convert a data breaks into a radius ramp""" | num_breaks = len ( breaks )
radius_breaks = scale_between ( min_radius , max_radius , num_breaks )
stops = [ ]
for i , b in enumerate ( breaks ) :
stops . append ( [ b , radius_breaks [ i ] ] )
return stops |
def stop_instances ( self , instance_ids = None , force = False ) :
"""Stop the instances specified
: type instance _ ids : list
: param instance _ ids : A list of strings of the Instance IDs to stop
: type force : bool
: param force : Forces the instance to stop
: rtype : list
: return : A list of the instances stopped""" | params = { }
if force :
params [ 'Force' ] = 'true'
if instance_ids :
self . build_list_params ( params , instance_ids , 'InstanceId' )
return self . get_list ( 'StopInstances' , params , [ ( 'item' , Instance ) ] , verb = 'POST' ) |
def end_y ( self ) :
"""Return the Y - position of the end point of this connector , in English
Metric Units ( as a | Length | object ) .""" | cxnSp = self . _element
y , cy , flipV = cxnSp . y , cxnSp . cy , cxnSp . flipV
end_y = y if flipV else y + cy
return Emu ( end_y ) |
def get_position_p ( self ) :
"""Get the P value of the current PID for position""" | data = [ ]
data . append ( 0x09 )
data . append ( self . servoid )
data . append ( RAM_READ_REQ )
data . append ( POSITION_KP_RAM )
data . append ( BYTE2 )
send_data ( data )
rxdata = [ ]
try :
rxdata = SERPORT . read ( 13 )
return ( ord ( rxdata [ 10 ] ) * 256 ) + ( ord ( rxdata [ 9 ] ) & 0xff )
except HerkulexError :
raise HerkulexError ( "could not communicate with motors" ) |
def create ( self , ** kwargs ) :
'''Create a cluster of BIG - IP ® devices .
: param kwargs : dict - - keyword arguments for cluster manager''' | try :
cluster = getattr ( self , "cluster" , None )
except NoClusterToManage :
cluster = None
if cluster is not None :
msg = 'The ClusterManager is already managing a cluster.'
raise AlreadyManagingCluster ( msg )
self . _check_device_number ( kwargs [ 'devices' ] )
self . trust_domain . create ( devices = kwargs [ 'devices' ] , partition = kwargs [ 'device_group_partition' ] )
self . device_group . create ( ** kwargs )
self . cluster = Cluster ( ** kwargs ) |
def _dump ( config_instance , dict_type = OrderedDict ) :
"""Dumps an instance from ` ` instance ` ` to a dictionary type mapping .
: param object instance : The instance to serialized to a dictionary
: param object dict _ type : Some dictionary type , defaults to ` ` OrderedDict ` `
: return : Dumped dictionary
: rtype : collections . OrderedDict ( or instance of ` ` dict _ type ` ` )""" | if not is_config ( config_instance ) :
raise ValueError ( f"cannot dump instance {config_instance!r} to dict, " "instance is not a config class" )
result = dict_type ( )
for var in attr . fields ( config_instance . __class__ ) :
if not is_config_var ( var ) :
continue
entry = var . metadata [ CONFIG_KEY ]
dump_key = entry . name if entry . name else var . name
dump_default = var . default if var . default else None
if callable ( entry . encoder ) :
result [ dump_key ] = entry . encoder ( getattr ( config_instance , var . name , dump_default ) )
continue
if is_array_type ( entry . type ) :
items = getattr ( config_instance , var . name , [ ] )
if items is not None :
result [ dump_key ] = [ ( _dump ( item , dict_type = dict_type ) if is_config ( item ) else item ) for item in items ]
elif is_enum_type ( entry . type ) :
dump_value = getattr ( config_instance , var . name , dump_default )
result [ dump_key ] = ( dump_value . value if dump_value in entry . type else dump_value )
elif is_bytes_type ( entry . type ) :
result [ dump_key ] = encode_bytes ( getattr ( config_instance , var . name , dump_default ) )
else :
if is_config_type ( entry . type ) :
result [ dump_key ] = _dump ( getattr ( config_instance , var . name , { } ) , dict_type = dict_type )
else :
dump_value = getattr ( config_instance , var . name , dump_default )
if is_object_type ( type ( dump_value ) ) :
dump_value = { key : ( _dump ( value , dict_type = dict_type ) if is_config ( value ) else value ) for ( key , value ) in dump_value . items ( ) }
if dump_value is not None :
result [ dump_key ] = dump_value
return result |
def addSuccess ( self , test , capt ) :
"""After test completion , we want to record testcase run information .""" | self . __insert_test_result ( constants . State . PASS , test ) |
def probe_enable ( cls , resource ) :
"""Activate a probe on a webaccelerator""" | oper = cls . call ( 'hosting.rproxy.probe.enable' , cls . usable_id ( resource ) )
cls . echo ( 'Activating probe on %s' % resource )
cls . display_progress ( oper )
cls . echo ( 'The probe have been activated' )
return oper |
def mkdir_p ( path ) :
"""Emulates ' mkdir - p ' in bash
: param path : ( str ) Path to create
: return : None
: raises CommandError""" | log = logging . getLogger ( mod_logger + '.mkdir_p' )
if not isinstance ( path , basestring ) :
msg = 'path argument is not a string'
log . error ( msg )
raise CommandError ( msg )
log . info ( 'Attempting to create directory: %s' , path )
try :
os . makedirs ( path )
except OSError as e :
if e . errno == errno . EEXIST and os . path . isdir ( path ) :
pass
else :
msg = 'Unable to create directory: {p}' . format ( p = path )
log . error ( msg )
raise CommandError ( msg ) |
def abi_to_fasta ( input , output ) :
'''Converts ABI or AB1 files to FASTA format .
Args :
input ( str ) : Path to a file or directory containing abi / ab1 files or
zip archives of abi / ab1 files
output ( str ) : Path to a directory for the output FASTA files''' | direcs = [ input , ]
# unzip any zip archives
zip_files = list_files ( input , [ 'zip' ] )
if zip_files :
direcs . extend ( _process_zip_files ( zip_files ) )
# convert files
for d in direcs :
files = list_files ( d , [ 'ab1' , 'abi' ] )
seqs = [ SeqIO . read ( open ( f , 'rb' ) , 'abi' ) for f in files ]
# seqs = list ( chain . from _ iterable ( seqs ) )
fastas = [ '>{}\n{}' . format ( s . id , str ( s . seq ) ) for s in seqs ]
ofile = os . path . basename ( os . path . normpath ( d ) ) + '.fasta'
opath = os . path . join ( output , ofile )
open ( opath , 'w' ) . write ( '\n' . join ( fastas ) ) |
def contents ( self , path ) :
"""Reads the given path of current ref _ head and returns its content as utf - 8""" | try :
out , code , err = self . command_exec ( [ 'cat-file' , '-p' , self . ref_head + ':' + path ] )
if not code :
return out . decode ( 'utf-8' )
except Exception :
pass
return None |
def disconnect ( self , cback , subscribers = None , instance = None ) :
"""Remove a previously added function or method from the set of the
signal ' s handlers .
: param cback : the callback ( or * handler * ) to be added to the set
: returns : ` ` None ` ` or the value returned by the corresponding wrapper""" | if subscribers is None :
subscribers = self . subscribers
# wrapper
if self . _fdisconnect is not None :
def _disconnect ( cback ) :
self . _disconnect ( subscribers , cback )
notify = partial ( self . _notify_one , instance )
if instance is not None :
result = self . _fdisconnect ( instance , cback , subscribers , _disconnect , notify )
else :
result = self . _fdisconnect ( cback , subscribers , _disconnect , notify )
if inspect . isawaitable ( result ) :
result = pull_result ( result )
else :
self . _disconnect ( subscribers , cback )
result = None
return result |
def hourly_dew_point_values ( self , dry_bulb_condition ) :
"""Get a list of dew points ( C ) at each hour over the design day .
args :
dry _ bulb _ condition : The dry bulb condition for the day .""" | hourly_dew_point = [ ]
max_dpt = self . dew_point ( dry_bulb_condition . dry_bulb_max )
for db in dry_bulb_condition . hourly_values :
if db >= max_dpt :
hourly_dew_point . append ( max_dpt )
else :
hourly_dew_point . append ( db )
return hourly_dew_point |
def add_schedule ( self , name , activation_date , day_period = 'one_time' , final_action = 'ALERT_FAILURE' , activated = True , minute_period = 'one_time' , day_mask = None , repeat_until_date = None , comment = None ) :
"""Add a schedule to an existing task .
: param str name : name for this schedule
: param int activation _ date : when to start this task . Activation date
should be a UTC time represented in milliseconds .
: param str day _ period : when this task should be run . Valid options :
' one _ time ' , ' daily ' , ' weekly ' , ' monthly ' , ' yearly ' . If ' daily ' is
selected , you can also provide a value for ' minute _ period ' .
( default : ' one _ time ' )
: param str minute _ period : only required if day _ period is set to ' daily ' .
Valid options : ' each _ quarter ' ( 15 min ) , ' each _ half ' ( 30 minutes ) , or
' hourly ' , ' one _ time ' ( default : ' one _ time ' )
: param int day _ mask : If the task day _ period = weekly , then specify the day
or days for repeating . Day masks are : sun = 1 , mon = 2 , tue = 4 , wed = 8,
thu = 16 , fri = 32 , sat = 64 . To repeat for instance every Monday , Wednesday
and Friday , the value must be 2 + 8 + 32 = 42
: param str final _ action : what type of action to perform after the
scheduled task runs . Options are : ' ALERT _ FAILURE ' , ' ALERT ' , or
' NO _ ACTION ' ( default : ALERT _ FAILURE )
: param bool activated : whether to activate the schedule ( default : True )
: param str repeat _ until _ date : if this is anything but a one time task run ,
you can specify the date when this task should end . The format is the
same as the ` activation _ date ` param .
: param str comment : optional comment
: raises ActionCommandFailed : failed adding schedule
: return : None""" | json = { 'name' : name , 'activation_date' : activation_date , 'day_period' : day_period , 'day_mask' : day_mask , 'activated' : activated , 'final_action' : final_action , 'minute_period' : minute_period , 'repeat_until_date' : repeat_until_date if repeat_until_date else None , 'comment' : comment }
if 'daily' in day_period :
minute_period = minute_period if minute_period != 'one_time' else 'hourly'
json [ 'minute_period' ] = minute_period
return self . make_request ( ActionCommandFailed , method = 'create' , resource = 'task_schedule' , json = json ) |
def _multiseries ( self , col , x , y , ctype , rsum , rmean ) :
"""Chart multiple series from a column distinct values""" | self . autoprint = False
x , y = self . _check_fields ( x , y )
chart = None
series = self . split_ ( col )
for key in series :
instance = series [ key ]
if rsum is not None :
instance . rsum ( rsum , index_col = x )
if rmean is not None :
instance . rmean ( rmean , index_col = x )
instance . chart ( x , y )
self . scolor ( )
c = None
label = str ( key )
if ctype == "point" :
c = instance . point_ ( label )
if ctype == "line" :
instance . zero_nan ( y )
c = instance . line_ ( label )
if ctype == "bar" :
c = instance . bar_ ( label )
if ctype == "area" :
c = instance . area_ ( label )
if c is None :
self . warning ( "Chart type " + ctype + " not supported, aborting" )
return
if chart is None :
chart = c
else :
chart = chart * c
return chart |
def mils_standard ( T , K , P , f , g , c , d , h , a , M , UB , phi ) :
"""mils _ standard : standard formulation for the multi - item , multi - stage lot - sizing problem
Parameters :
- T : number of periods
- K : set of resources
- P : set of items
- f [ t , p ] : set - up costs ( on period t , for product p )
- g [ t , p ] : set - up times
- c [ t , p ] : variable costs
- d [ t , p ] : demand values
- h [ t , p ] : holding costs
- a [ t , k , p ] : amount of resource k for producing p in period t
- M [ t , k ] : resource k upper bound on period t
- UB [ t , p ] : upper bound of production time of product p in period t
- phi [ ( i , j ) ] : units of i required to produce a unit of j ( j parent of i )""" | model = Model ( "multi-stage lotsizing -- standard formulation" )
y , x , I = { } , { } , { }
Ts = range ( 1 , T + 1 )
for p in P :
for t in Ts :
y [ t , p ] = model . addVar ( vtype = "B" , name = "y(%s,%s)" % ( t , p ) )
x [ t , p ] = model . addVar ( vtype = "C" , name = "x(%s,%s)" % ( t , p ) )
I [ t , p ] = model . addVar ( vtype = "C" , name = "I(%s,%s)" % ( t , p ) )
I [ 0 , p ] = model . addVar ( name = "I(%s,%s)" % ( 0 , p ) )
for t in Ts :
for p in P : # flow conservation constraints
model . addCons ( I [ t - 1 , p ] + x [ t , p ] == quicksum ( phi [ p , q ] * x [ t , q ] for ( p2 , q ) in phi if p2 == p ) + I [ t , p ] + d [ t , p ] , "FlowCons(%s,%s)" % ( t , p ) )
# capacity connection constraints
model . addCons ( x [ t , p ] <= UB [ t , p ] * y [ t , p ] , "ConstrUB(%s,%s)" % ( t , p ) )
# time capacity constraints
for k in K :
model . addCons ( quicksum ( a [ t , k , p ] * x [ t , p ] + g [ t , p ] * y [ t , p ] for p in P ) <= M [ t , k ] , "TimeUB(%s,%s)" % ( t , k ) )
# initial inventory quantities
for p in P :
model . addCons ( I [ 0 , p ] == 0 , "InventInit(%s)" % ( p ) )
model . setObjective ( quicksum ( f [ t , p ] * y [ t , p ] + c [ t , p ] * x [ t , p ] + h [ t , p ] * I [ t , p ] for t in Ts for p in P ) , "minimize" )
model . data = y , x , I
return model |
def exists ( self , filename ) :
"""Report whether a file exists on the distribution point .
Determines file type by extension .
Args :
filename : Filename you wish to check . ( No path ! e . g . :
" AdobeFlashPlayer - 14.0.0.176 . pkg " )""" | if is_package ( filename ) :
filepath = os . path . join ( self . connection [ "mount_point" ] , "Packages" , filename )
else :
filepath = os . path . join ( self . connection [ "mount_point" ] , "Scripts" , filename )
return os . path . exists ( filepath ) |
def copy ( self ) :
"""Copy constructor for Sequence objects .""" | return Sequence ( self . name , self . sequenceData , self . start , self . end , self . strand , self . remaining , self . meta_data , self . mutableString ) |
def create_column_index ( annotations ) :
"""Create a pd . MultiIndex using the column names and any categorical rows .
Note that also non - main columns will be assigned a default category ' ' .""" | _column_index = OrderedDict ( { 'Column Name' : annotations [ 'Column Name' ] } )
categorical_rows = annotation_rows ( 'C:' , annotations )
_column_index . update ( categorical_rows )
numerical_rows = { name : [ float ( x ) if x != '' else float ( 'NaN' ) for x in values ] for name , values in annotation_rows ( 'N:' , annotations ) . items ( ) }
# to floats
_column_index . update ( numerical_rows )
column_index = pd . MultiIndex . from_tuples ( list ( zip ( * _column_index . values ( ) ) ) , names = list ( _column_index . keys ( ) ) )
if len ( column_index . names ) == 1 : # flatten single - level index
name = column_index . names [ 0 ]
column_index = column_index . get_level_values ( name )
return column_index |
def _sampleLocationOnSide ( self ) :
"""Helper method to sample from the lateral surface of a cylinder .""" | z = random . uniform ( - 1 , 1 ) * self . height / 2.
sampledAngle = 2 * random . random ( ) * pi
x , y = self . radius * cos ( sampledAngle ) , self . radius * sin ( sampledAngle )
return [ x , y , z ] |
def addInkAnnot ( self , list ) :
"""Add a ' handwriting ' as a list of list of point - likes . Each sublist forms an independent stroke .""" | CheckParent ( self )
val = _fitz . Page_addInkAnnot ( self , list )
if not val :
return
val . thisown = True
val . parent = weakref . proxy ( self )
self . _annot_refs [ id ( val ) ] = val
return val |
def libvlc_vlm_add_vod ( p_instance , psz_name , psz_input , i_options , ppsz_options , b_enabled , psz_mux ) :
'''Add a vod , with one input .
@ param p _ instance : the instance .
@ param psz _ name : the name of the new vod media .
@ param psz _ input : the input MRL .
@ param i _ options : number of additional options .
@ param ppsz _ options : additional options .
@ param b _ enabled : boolean for enabling the new vod .
@ param psz _ mux : the muxer of the vod media .
@ return : 0 on success , - 1 on error .''' | f = _Cfunctions . get ( 'libvlc_vlm_add_vod' , None ) or _Cfunction ( 'libvlc_vlm_add_vod' , ( ( 1 , ) , ( 1 , ) , ( 1 , ) , ( 1 , ) , ( 1 , ) , ( 1 , ) , ( 1 , ) , ) , None , ctypes . c_int , Instance , ctypes . c_char_p , ctypes . c_char_p , ctypes . c_int , ListPOINTER ( ctypes . c_char_p ) , ctypes . c_int , ctypes . c_char_p )
return f ( p_instance , psz_name , psz_input , i_options , ppsz_options , b_enabled , psz_mux ) |
def fft_mesh ( self , kpoint , band , spin = 0 , shift = True ) :
"""Places the coefficients of a wavefunction onto an fft mesh .
Once the mesh has been obtained , a discrete fourier transform can be
used to obtain real - space evaluation of the wavefunction . The output
of this function can be passed directly to numpy ' s fft function . For
example :
mesh = Wavecar ( ' WAVECAR ' ) . fft _ mesh ( kpoint , band )
evals = np . fft . ifftn ( mesh )
Args :
kpoint ( int ) : the index of the kpoint where the wavefunction
will be evaluated
band ( int ) : the index of the band where the wavefunction will be
evaluated
spin ( int ) : the spin of the wavefunction for the desired
wavefunction ( only for ISPIN = 2 , default = 0)
shift ( bool ) : determines if the zero frequency coefficient is
placed at index ( 0 , 0 , 0 ) or centered
Returns :
a numpy ndarray representing the 3D mesh of coefficients""" | mesh = np . zeros ( tuple ( self . ng ) , dtype = np . complex )
tcoeffs = self . coeffs [ spin ] [ kpoint ] [ band ] if self . spin == 2 else self . coeffs [ kpoint ] [ band ]
for gp , coeff in zip ( self . Gpoints [ kpoint ] , tcoeffs ) :
t = tuple ( gp . astype ( np . int ) + ( self . ng / 2 ) . astype ( np . int ) )
mesh [ t ] = coeff
if shift :
return np . fft . ifftshift ( mesh )
else :
return mesh |
def highlight_block ( self , text ) :
"""Implement specific highlight for Python .""" | text = to_text_string ( text )
prev_state = tbh . get_state ( self . currentBlock ( ) . previous ( ) )
if prev_state == self . INSIDE_DQ3STRING :
offset = - 4
text = r'""" ' + text
elif prev_state == self . INSIDE_SQ3STRING :
offset = - 4
text = r"''' " + text
elif prev_state == self . INSIDE_DQSTRING :
offset = - 2
text = r'" ' + text
elif prev_state == self . INSIDE_SQSTRING :
offset = - 2
text = r"' " + text
else :
offset = 0
prev_state = self . NORMAL
oedata = None
import_stmt = None
self . setFormat ( 0 , len ( text ) , self . formats [ "normal" ] )
state = self . NORMAL
match = self . PROG . search ( text )
while match :
for key , value in list ( match . groupdict ( ) . items ( ) ) :
if value :
start , end = match . span ( key )
start = max ( [ 0 , start + offset ] )
end = max ( [ 0 , end + offset ] )
if key == "uf_sq3string" :
self . setFormat ( start , end - start , self . formats [ "string" ] )
state = self . INSIDE_SQ3STRING
elif key == "uf_dq3string" :
self . setFormat ( start , end - start , self . formats [ "string" ] )
state = self . INSIDE_DQ3STRING
elif key == "uf_sqstring" :
self . setFormat ( start , end - start , self . formats [ "string" ] )
state = self . INSIDE_SQSTRING
elif key == "uf_dqstring" :
self . setFormat ( start , end - start , self . formats [ "string" ] )
state = self . INSIDE_DQSTRING
else :
self . setFormat ( start , end - start , self . formats [ key ] )
if key == "comment" :
if text . lstrip ( ) . startswith ( self . cell_separators ) :
self . found_cell_separators = True
oedata = OutlineExplorerData ( )
oedata . text = to_text_string ( text ) . strip ( )
# cell _ head : string contaning the first group
# of ' % ' s in the cell header
cell_head = re . search ( r"%+|$" , text . lstrip ( ) ) . group ( )
if cell_head == '' :
oedata . cell_level = 0
else :
oedata . cell_level = len ( cell_head ) - 2
oedata . fold_level = start
oedata . def_type = OutlineExplorerData . CELL
oedata . def_name = get_code_cell_name ( text )
elif self . OECOMMENT . match ( text . lstrip ( ) ) :
oedata = OutlineExplorerData ( )
oedata . text = to_text_string ( text ) . strip ( )
oedata . fold_level = start
oedata . def_type = OutlineExplorerData . COMMENT
oedata . def_name = text . strip ( )
elif key == "keyword" :
if value in ( "def" , "class" ) :
match1 = self . IDPROG . match ( text , end )
if match1 :
start1 , end1 = match1 . span ( 1 )
self . setFormat ( start1 , end1 - start1 , self . formats [ "definition" ] )
oedata = OutlineExplorerData ( )
oedata . text = to_text_string ( text )
oedata . fold_level = ( len ( text ) - len ( text . lstrip ( ) ) )
oedata . def_type = self . DEF_TYPES [ to_text_string ( value ) ]
oedata . def_name = text [ start1 : end1 ]
oedata . color = self . formats [ "definition" ]
elif value in ( "elif" , "else" , "except" , "finally" , "for" , "if" , "try" , "while" , "with" ) :
if text . lstrip ( ) . startswith ( value ) :
oedata = OutlineExplorerData ( )
oedata . text = to_text_string ( text ) . strip ( )
oedata . fold_level = start
oedata . def_type = OutlineExplorerData . STATEMENT
oedata . def_name = text . strip ( )
elif value == "import" :
import_stmt = text . strip ( )
# color all the " as " words on same line , except
# if in a comment ; cheap approximation to the
# truth
if '#' in text :
endpos = text . index ( '#' )
else :
endpos = len ( text )
while True :
match1 = self . ASPROG . match ( text , end , endpos )
if not match1 :
break
start , end = match1 . span ( 1 )
self . setFormat ( start , end - start , self . formats [ "keyword" ] )
match = self . PROG . search ( text , match . end ( ) )
tbh . set_state ( self . currentBlock ( ) , state )
# Use normal format for indentation and trailing spaces .
self . formats [ 'leading' ] = self . formats [ 'normal' ]
self . formats [ 'trailing' ] = self . formats [ 'normal' ]
self . highlight_spaces ( text , offset )
if oedata is not None :
block_nb = self . currentBlock ( ) . blockNumber ( )
self . outlineexplorer_data [ block_nb ] = oedata
self . outlineexplorer_data [ 'found_cell_separators' ] = self . found_cell_separators
if import_stmt is not None :
block_nb = self . currentBlock ( ) . blockNumber ( )
self . import_statements [ block_nb ] = import_stmt |
def get ( self , hook_id ) :
"""Get a webhook .""" | path = '/' . join ( [ 'notification' , 'webhook' , hook_id ] )
return self . rachio . get ( path ) |
def partial_results ( self ) :
'''The results that the RPC has received * so far *
This may also be the complete results if : attr : ` complete ` is ` ` True ` ` .''' | results = [ ]
for r in self . _results :
if isinstance ( r , Exception ) :
results . append ( type ( r ) ( * deepcopy ( r . args ) ) )
elif hasattr ( r , "__iter__" ) and not hasattr ( r , "__len__" ) : # pass generators straight through
results . append ( r )
else :
results . append ( deepcopy ( r ) )
return results |
def _columns_for_table ( table_name ) :
"""Return all of the columns registered for a given table .
Parameters
table _ name : str
Returns
columns : dict of column wrappers
Keys will be column names .""" | return { cname : col for ( tname , cname ) , col in _COLUMNS . items ( ) if tname == table_name } |
def create_cancel_operation ( cls , cancel_operation , ** kwargs ) :
"""Create CancelOperation
Create a new CancelOperation
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . create _ cancel _ operation ( cancel _ operation , async = True )
> > > result = thread . get ( )
: param async bool
: param CancelOperation cancel _ operation : Attributes of cancelOperation to create ( required )
: return : CancelOperation
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _create_cancel_operation_with_http_info ( cancel_operation , ** kwargs )
else :
( data ) = cls . _create_cancel_operation_with_http_info ( cancel_operation , ** kwargs )
return data |
def value ( self ) :
"""Return the current evaluation of a condition statement""" | return '' . join ( map ( str , self . evaluate ( self . trigger . user ) ) ) |
def _add_rule ( self , state , rule ) :
"""Parse rule and add it to machine ( for internal use ) .""" | if rule . strip ( ) == "-" :
parsed_rule = None
else :
parsed_rule = rule . split ( ',' )
if ( len ( parsed_rule ) != 3 or parsed_rule [ 1 ] not in [ 'L' , 'N' , 'R' ] or len ( parsed_rule [ 2 ] ) > 1 ) :
raise SyntaxError ( 'Wrong format of rule: ' + rule )
if parsed_rule [ 0 ] == "" :
parsed_rule [ 0 ] = self . alphabet [ len ( self . states [ state ] ) ]
if parsed_rule [ 2 ] == "" :
parsed_rule [ 2 ] = state
self . states [ state ] . append ( parsed_rule ) |
def sg_log ( tensor , opt ) :
r"""Log transform a dense tensor
See ` tf . log ( ) ` in tensorflow .
Args :
tensor : A ` Tensor ` ( automatically given by chain )
opt :
name : If provided , replace current tensor ' s name .
Returns :
A ` Tensor ` .""" | return tf . log ( tensor + tf . sg_eps , name = opt . name ) |
def instance ( self ) :
"""Content instance of the wrapped object""" | if self . _instance is None :
logger . debug ( "SuperModel::instance: *Wakup object*" )
self . _instance = api . get_object ( self . brain )
return self . _instance |
def remove ( self ) :
'''a method to remove collection and all records in the collection
: return : string with confirmation of deletion''' | _title = '%s.remove' % self . __class__ . __name__
# TODO error handling is turned off to avoid system blocking
# fix potential to create artifacts in the system
# remove collection tree
try :
import shutil
shutil . rmtree ( self . collection_folder , ignore_errors = True )
except :
raise Exception ( '%s failed to remove %s collection from app data.' % ( _title , self . collection_folder ) )
exit_msg = '%s collection has been removed from app data.' % self . collection_folder
return exit_msg |
def start ( self , num_processes : Optional [ int ] = 1 , max_restarts : int = None ) -> None :
"""Starts this server in the ` . IOLoop ` .
By default , we run the server in this process and do not fork any
additional child process .
If num _ processes is ` ` None ` ` or < = 0 , we detect the number of cores
available on this machine and fork that number of child
processes . If num _ processes is given and > 1 , we fork that
specific number of sub - processes .
Since we use processes and not threads , there is no shared memory
between any server code .
Note that multiple processes are not compatible with the autoreload
module ( or the ` ` autoreload = True ` ` option to ` tornado . web . Application `
which defaults to True when ` ` debug = True ` ` ) .
When using multiple processes , no IOLoops can be created or
referenced until after the call to ` ` TCPServer . start ( n ) ` ` .
Values of ` ` num _ processes ` ` other than 1 are not supported on Windows .
The ` ` max _ restarts ` ` argument is passed to ` . fork _ processes ` .
. . versionchanged : : 6.0
Added ` ` max _ restarts ` ` argument .""" | assert not self . _started
self . _started = True
if num_processes != 1 :
process . fork_processes ( num_processes , max_restarts )
sockets = self . _pending_sockets
self . _pending_sockets = [ ]
self . add_sockets ( sockets ) |
def post ( self , url , post_params = None ) :
"""Make an HTTP POST request to the Parser API .
: param url : url to which to make the request
: param post _ params : POST data to send along . Expected to be a dict .""" | post_params [ 'token' ] = self . token
params = urlencode ( post_params )
logger . debug ( 'Making POST request to %s with body %s' , url , params )
return requests . post ( url , data = params ) |
def _GetRow ( self , columns = None ) :
"""Returns the current row as a tuple .""" | row = self . _table [ self . _row_index ]
if columns :
result = [ ]
for col in columns :
if col not in self . header :
raise TableError ( "Column header %s not known in table." % col )
result . append ( row [ self . header . index ( col ) ] )
row = result
return row |
def get_all_related_many_to_many_objects ( opts ) :
"""Django 1.8 changed meta api , see docstr in compat . get _ all _ related _ objects ( )
: param opts : Options instance
: return : list of many - to - many relations""" | if django . VERSION < ( 1 , 9 ) :
return opts . get_all_related_many_to_many_objects ( )
else :
return [ r for r in opts . related_objects if r . field . many_to_many ] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.