signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def close ( self ) :
"""Close the sockets""" | self . _socket . close ( )
if self . _async_socket_cache :
self . _async_socket_cache . close ( )
self . _async_socket_cache = None |
def header ( self ) :
"""Wea header .""" | return "place %s\n" % self . location . city + "latitude %.2f\n" % self . location . latitude + "longitude %.2f\n" % - self . location . longitude + "time_zone %d\n" % ( - self . location . time_zone * 15 ) + "site_elevation %.1f\n" % self . location . elevation + "weather_data_file_units 1\n" |
def search_task_views ( self , user , search_string ) :
"""invokes TouchWorksMagicConstants . ACTION _ GET _ ENCOUNTER _ LIST _ FOR _ PATIENT action
: return : JSON response""" | magic = self . _magic_json ( action = TouchWorksMagicConstants . ACTION_SEARCH_TASK_VIEWS , parameter1 = user , parameter2 = search_string )
response = self . _http_request ( TouchWorksEndPoints . MAGIC_JSON , data = magic )
result = self . _get_results_or_raise_if_magic_invalid ( magic , response , TouchWorksMagicConstants . RESULT_SEARCH_TASK_VIEWS )
return result |
def _countDigits ( self , text ) :
"""Count digits at start of text""" | index = 0
while index < len ( text ) :
if not text [ index ] . isdigit ( ) :
break
index += 1
return index |
def _plt_pydot ( self , fout_img ) :
"""Plot using the pydot graphics engine .""" | dag = self . _get_pydot_graph ( )
img_fmt = os . path . splitext ( fout_img ) [ 1 ] [ 1 : ]
dag . write ( fout_img , format = img_fmt )
self . log . write ( " {GO_USR:>3} usr {GO_ALL:>3} GOs WROTE: {F}\n" . format ( F = fout_img , GO_USR = len ( self . godag . go_sources ) , GO_ALL = len ( self . godag . go2obj ) ) ) |
def update_time_reset_passwd ( user_name , the_time ) :
'''Update the time when user reset passwd .''' | entry = TabMember . update ( time_reset_passwd = the_time , ) . where ( TabMember . user_name == user_name )
try :
entry . execute ( )
return True
except :
return False |
def skip ( self , content ) :
"""Get whether to skip this I { content } .
Should be skipped when the content is optional and value is either None
or an empty list .
@ param content : Content to skip .
@ type content : L { Object }
@ return : True if content is to be skipped .
@ rtype : bool""" | if self . optional ( content ) :
v = content . value
if v is None :
return True
if isinstance ( v , ( list , tuple ) ) and not v :
return True
return False |
def compute_control_digit ( clabe : str ) -> str :
"""Compute CLABE control digit according to
https : / / es . wikipedia . org / wiki / CLABE # D . C3 . ADgito _ control""" | clabe = [ int ( i ) for i in clabe ]
weighted = [ c * w % 10 for c , w in zip ( clabe [ : CLABE_LENGTH - 1 ] , CLABE_WEIGHTS ) ]
summed = sum ( weighted ) % 10
control_digit = ( 10 - summed ) % 10
return str ( control_digit ) |
def cors ( origins , methods = [ 'HEAD' , 'OPTIONS' , 'GET' , 'POST' , 'PUT' , 'PATCH' , 'DELETE' ] , headers = [ 'Accept' , 'Accept-Language' , 'Content-Language' , 'Content-Type' , 'X-Requested-With' ] , max_age = None ) :
"""Adds CORS headers to the decorated view function .
: param origins : Allowed origins ( see below )
: param methods : A list of allowed HTTP methods
: param headers : A list of allowed HTTP headers
: param max _ age : Duration in seconds for which the CORS response may be cached
The : obj : ` origins ` parameter may be one of :
1 . A callable that receives the origin as a parameter .
2 . A list of origins .
3 . ` ` * ` ` , indicating that this resource is accessible by any origin .
Example use : :
from flask import Flask , Response
from coaster . views import cors
app = Flask ( _ _ name _ _ )
@ app . route ( ' / any ' )
@ cors ( ' * ' )
def any _ origin ( ) :
return Response ( )
@ app . route ( ' / static ' , methods = [ ' GET ' , ' POST ' ] )
@ cors ( [ ' https : / / hasgeek . com ' ] , methods = [ ' GET ' ] , headers = [ ' Content - Type ' , ' X - Requested - With ' ] ,
max _ age = 3600)
def static _ list ( ) :
return Response ( )
def check _ origin ( origin ) :
# check if origin should be allowed
return True
@ app . route ( ' / callable ' )
@ cors ( check _ origin )
def callable _ function ( ) :
return Response ( )""" | def inner ( f ) :
@ wraps ( f )
def wrapper ( * args , ** kwargs ) :
origin = request . headers . get ( 'Origin' )
if request . method not in methods :
abort ( 405 )
if origins == '*' :
pass
elif is_collection ( origins ) and origin in origins :
pass
elif callable ( origins ) and origins ( origin ) :
pass
else :
abort ( 403 )
if request . method == 'OPTIONS' : # pre - flight request
resp = Response ( )
else :
result = f ( * args , ** kwargs )
resp = make_response ( result ) if not isinstance ( result , ( Response , WerkzeugResponse , current_app . response_class ) ) else result
resp . headers [ 'Access-Control-Allow-Origin' ] = origin if origin else ''
resp . headers [ 'Access-Control-Allow-Methods' ] = ', ' . join ( methods )
resp . headers [ 'Access-Control-Allow-Headers' ] = ', ' . join ( headers )
if max_age :
resp . headers [ 'Access-Control-Max-Age' ] = str ( max_age )
# Add ' Origin ' to the Vary header since response will vary by origin
if 'Vary' in resp . headers :
vary_values = [ item . strip ( ) for item in resp . headers [ 'Vary' ] . split ( ',' ) ]
if 'Origin' not in vary_values :
vary_values . append ( 'Origin' )
resp . headers [ 'Vary' ] = ', ' . join ( vary_values )
else :
resp . headers [ 'Vary' ] = 'Origin'
return resp
return wrapper
return inner |
def return_dat ( self , chan , begsam , endsam ) :
"""Return the data as 2D numpy . ndarray .
Parameters
chan : list
indices of the channels to read
begsam : int
index of the first sample
endsam : int
index of the last sample
Returns
numpy . ndarray
A 2d matrix , with dimension chan X samples
Notes
This format is tricky for both channels and samples . For the samples ,
we just use the boundaries in the block header . For the channels , we
assume that there are max two signals , one EEG and one PIB box . We
just use the boundary between them to define if a channel belongs to
the first group or to the second .
TODO
use wonambi . ioeeg . utils . _ select _ blocks here , but you need to test it
with a PIB box .""" | assert begsam < endsam
data = empty ( ( len ( chan ) , endsam - begsam ) )
data . fill ( NaN )
chan = asarray ( chan )
# we assume there are only two signals
signals_to_read = [ ]
if ( chan < self . _nchan_signal1 ) . any ( ) :
signals_to_read . append ( 0 )
if ( chan >= self . _nchan_signal1 ) . any ( ) :
signals_to_read . append ( 1 )
for one_signal in signals_to_read :
if one_signal == 0 :
i_chan_data = chan < self . _nchan_signal1
i_chan_rec = chan [ i_chan_data ]
if one_signal == 1 :
i_chan_data = chan >= self . _nchan_signal1
i_chan_rec = chan [ i_chan_data ] - self . _nchan_signal1
x = self . _n_samples [ one_signal ]
x1 = cumsum ( append ( 0 , x ) )
# begrec is - 1 when begsam is before start of the recordings
begrec = where ( begsam < x1 ) [ 0 ] [ 0 ] - 1
try :
endrec = where ( endsam < x1 ) [ 0 ] [ 0 ] - 1
except IndexError :
endrec = len ( x )
f = self . _signal [ one_signal ] . open ( 'rb' )
i0 = 0
for rec in range ( begrec , endrec + 1 ) : # if begsam is before start of the recordings , we just shift the baseline
if rec == - 1 :
i0 = - begsam
continue
# if endsam is after end of the recordings , we just stop here
if rec == len ( self . _n_samples [ one_signal ] ) :
break
if rec == begrec :
begpos_rec = begsam - x1 [ rec ]
else :
begpos_rec = 0
if rec == endrec :
endpos_rec = endsam - x1 [ rec ]
else :
endpos_rec = x [ rec ]
i1 = i0 + endpos_rec - begpos_rec
lg . debug ( 'data {: 8d}-{: 8d}, rec ({}) {: 5d} - {: 5d}' . format ( i0 , i1 , rec , begpos_rec , endpos_rec ) )
rec_dat = _read_block ( f , self . _block_hdr [ one_signal ] [ rec ] , self . _i_data [ one_signal ] [ rec ] )
data [ i_chan_data , i0 : i1 ] = rec_dat [ i_chan_rec , begpos_rec : endpos_rec ]
i0 = i1
f . close ( )
return data |
def _find_paths ( self , current_dir , patterns ) :
"""Recursively generates absolute paths whose components
underneath current _ dir match the corresponding pattern in
patterns""" | pattern = patterns [ 0 ]
patterns = patterns [ 1 : ]
has_wildcard = is_pattern ( pattern )
using_globstar = pattern == "**"
# This avoids os . listdir ( ) for performance
if has_wildcard :
entries = [ x . name for x in scandir ( current_dir ) ]
else :
entries = [ pattern ]
if using_globstar :
matching_subdirs = map ( lambda x : x [ 0 ] , walk ( current_dir ) )
else :
subdirs = [ e for e in entries if os . path . isdir ( os . path . join ( current_dir , e ) ) ]
matching_subdirs = match_entries ( subdirs , pattern )
# For terminal globstar , add a pattern for all files in subdirs
if using_globstar and not patterns :
patterns = [ '*' ]
if patterns : # we ' ve still got more directories to traverse
for subdir in matching_subdirs :
absolute_path = os . path . join ( current_dir , subdir )
for match in self . _find_paths ( absolute_path , patterns ) :
yield match
else : # we ' ve got the last pattern
if not has_wildcard :
entries = [ pattern + '.wsp' , pattern + '.wsp.gz' ]
files = [ e for e in entries if os . path . isfile ( os . path . join ( current_dir , e ) ) ]
matching_files = match_entries ( files , pattern + '.*' )
for _basename in matching_files + matching_subdirs :
yield os . path . join ( current_dir , _basename ) |
def description ( self ) :
"""This read - only attribute is a sequence of 7 - item sequences .
Each of these sequences contains information describing one result column :
- name
- type _ code
- display _ size ( None in current implementation )
- internal _ size ( None in current implementation )
- precision ( None in current implementation )
- scale ( None in current implementation )
- null _ ok ( always True in current implementation )
The ` ` type _ code ` ` can be interpreted by comparing it to the Type Objects specified in the
section below .""" | # Sleep until we ' re done or we got the columns
if self . _columns is None :
return [ ]
return [ # name , type _ code , display _ size , internal _ size , precision , scale , null _ ok
( col [ 0 ] , col [ 1 ] , None , None , None , None , True ) for col in self . _columns ] |
def find_interface_by_mac ( self , ** kwargs ) :
"""Find the interface through which a MAC can be reached .
Args :
mac _ address ( str ) : A MAC address in ' xx : xx : xx : xx : xx : xx ' format .
Returns :
list [ dict ] : a list of mac table data .
Raises :
KeyError : if ` mac _ address ` is not specified .
Examples :
> > > from pprint import pprint
> > > import pynos . device
> > > conn = ( ' 10.24.39.211 ' , ' 22 ' )
> > > auth = ( ' admin ' , ' password ' )
> > > with pynos . device . Device ( conn = conn , auth = auth ) as dev :
. . . x = dev . find _ interface _ by _ mac (
. . . mac _ address = ' 10:23:45:67:89 : ab ' )
. . . pprint ( x ) # doctest : + ELLIPSIS
[ { ' interface ' . . . ' mac _ address ' . . . ' state ' . . . ' type ' . . . ' vlan ' . . . } ]""" | mac = kwargs . pop ( 'mac_address' )
results = [ x for x in self . mac_table if x [ 'mac_address' ] == mac ]
return results |
def get_defaults_str ( raw = None , after = 'Defaults::' ) :
"""Get the string YAML representation of configuration defaults .""" | if raw is None :
raw = __doc__
return unicode ( textwrap . dedent ( raw . split ( after ) [ - 1 ] ) . strip ( ) ) |
def CMFR ( t , C_initial , C_influent ) :
"""Calculate the effluent concentration of a conversative ( non - reacting )
material with continuous input to a completely mixed flow reactor .
Note : time t = 0 is the time at which the material starts to flow into the
reactor .
: param C _ initial : The concentration in the CMFR at time t = 0.
: type C _ initial : float
: param C _ influent : The concentration entering the CMFR .
: type C _ influent : float
: param t : The time ( s ) at which to calculate the effluent concentration . Time can be made dimensionless by dividing by the residence time of the CMFR .
: type t : float or numpy . array
: return : Effluent concentration
: rtype : float
: Examples :
> > > from aguaclara . research . environmental _ processes _ analysis import CMFR
> > > from aguaclara . core . units import unit _ registry as u
> > > round ( CMFR ( 0.1 , 0 * u . mg / u . L , 10 * u . mg / u . L ) , 7)
< Quantity ( 0.9516258 , ' milligram / liter ' ) >
> > > round ( CMFR ( 0.9 , 5 * u . mg / u . L , 10 * u . mg / u . L ) , 7)
< Quantity ( 7.9671517 , ' milligram / liter ' ) >""" | return C_influent * ( 1 - np . exp ( - t ) ) + C_initial * np . exp ( - t ) |
def make_directory_if_needed ( directory_path ) :
"""Make the directory path , if needed .""" | if os . path . exists ( directory_path ) :
if not os . path . isdir ( directory_path ) :
raise OSError ( "Path is not a directory:" , directory_path )
else :
os . makedirs ( directory_path ) |
def _create_clock ( self ) :
"""If the clock property is not set , then create one based on frequency .""" | trading_o_and_c = self . trading_calendar . schedule . ix [ self . sim_params . sessions ]
market_closes = trading_o_and_c [ 'market_close' ]
minutely_emission = False
if self . sim_params . data_frequency == 'minute' :
market_opens = trading_o_and_c [ 'market_open' ]
minutely_emission = self . sim_params . emission_rate == "minute"
# The calendar ' s execution times are the minutes over which we
# actually want to run the clock . Typically the execution times
# simply adhere to the market open and close times . In the case of
# the futures calendar , for example , we only want to simulate over
# a subset of the full 24 hour calendar , so the execution times
# dictate a market open time of 6:31am US / Eastern and a close of
# 5:00pm US / Eastern .
execution_opens = self . trading_calendar . execution_time_from_open ( market_opens )
execution_closes = self . trading_calendar . execution_time_from_close ( market_closes )
else : # in daily mode , we want to have one bar per session , timestamped
# as the last minute of the session .
execution_closes = self . trading_calendar . execution_time_from_close ( market_closes )
execution_opens = execution_closes
# FIXME generalize these values
before_trading_start_minutes = days_at_time ( self . sim_params . sessions , time ( 8 , 45 ) , "US/Eastern" )
return MinuteSimulationClock ( self . sim_params . sessions , execution_opens , execution_closes , before_trading_start_minutes , minute_emission = minutely_emission , ) |
def new ( cls , starting_domino = None , starting_player = 0 ) :
''': param Domino starting _ domino : the domino that should be played
to start the game . The player
with this domino in their hand
will play first .
: param int starting _ player : the player that should play first .
This value is ignored if a starting
domino is provided . Players are
referred to by their indexes : 0 , 1,
2 , and 3 . 0 and 2 are on one team ,
and 1 and 3 are on another team .
: return : a new game , initialized according to
starting _ domino and starting _ player
: raises NoSuchDominoException : if starting _ domino is invalid
: raises NoSuchPlayerException : if starting _ player is invalid''' | board = dominoes . Board ( )
hands = _randomized_hands ( )
moves = [ ]
result = None
if starting_domino is None :
_validate_player ( starting_player )
valid_moves = tuple ( ( d , True ) for d in hands [ starting_player ] )
game = cls ( board , hands , moves , starting_player , valid_moves , starting_player , result )
else :
starting_player = _domino_hand ( starting_domino , hands )
valid_moves = ( ( starting_domino , True ) , )
game = cls ( board , hands , moves , starting_player , valid_moves , starting_player , result )
game . make_move ( * valid_moves [ 0 ] )
return game |
def v_center_cell_text ( cell ) :
"""Vertically center the text within the cell ' s grid .
Like this : :
| foobar | | |
| | - - > | foobar |
Parameters
cell : dashtable . data2rst . Cell
Returns
cell : dashtable . data2rst . Cell""" | lines = cell . text . split ( '\n' )
cell_width = len ( lines [ 0 ] ) - 2
truncated_lines = [ ]
for i in range ( 1 , len ( lines ) - 1 ) :
truncated = lines [ i ] [ 1 : len ( lines [ i ] ) - 1 ]
truncated_lines . append ( truncated )
total_height = len ( truncated_lines )
empty_lines_above = 0
for i in range ( len ( truncated_lines ) ) :
if truncated_lines [ i ] . rstrip ( ) == '' :
empty_lines_above += 1
else :
break
empty_lines_below = 0
for i in reversed ( range ( len ( truncated_lines ) ) ) :
if truncated_lines [ i ] . rstrip ( ) == '' :
empty_lines_below += 1
else :
break
significant_lines = truncated_lines [ empty_lines_above : len ( truncated_lines ) - empty_lines_below ]
remainder = total_height - len ( significant_lines )
blank = cell_width * ' '
above_height = math . floor ( remainder / 2 )
for i in range ( 0 , above_height ) :
significant_lines . insert ( 0 , blank )
below_height = math . ceil ( remainder / 2 )
for i in range ( 0 , below_height ) :
significant_lines . append ( blank )
for i in range ( len ( significant_lines ) ) :
lines [ i + 1 ] = '' . join ( [ lines [ i + 1 ] [ 0 ] + significant_lines [ i ] + lines [ i + 1 ] [ - 1 ] ] )
cell . text = '\n' . join ( lines )
return cell |
def fetch ( destination ) :
"""Fetch TLE from internet and save it to ` destination ` .""" | with io . open ( destination , mode = "w" , encoding = "utf-8" ) as dest :
for url in TLE_URLS :
response = urlopen ( url )
dest . write ( response . read ( ) . decode ( "utf-8" ) ) |
def check_connection_state ( self ) :
"""Check the state of the connection using connection state request .
This sends a CONNECTION _ STATE _ REQUEST . This method will only return
True , if the connection is established and no error code is returned
from the KNX / IP gateway""" | if not self . connected :
self . connection_state = - 1
return False
frame = KNXIPFrame ( KNXIPFrame . CONNECTIONSTATE_REQUEST )
frame . body = self . hpai_body ( )
# Send maximum 3 connection state requests with a 10 second timeout
res = False
self . connection_state = 0
maximum_retry = 3
for retry_counter in range ( 0 , maximum_retry ) :
logging . debug ( "Heartbeat: Send connection state request" )
# Suggestion :
# Carve the Control Socket out of the KNXIPTunnel
# Class and Public only the Send and Receive
# function and Implement in there the Heartbeat so we
# can block when other Functions want to send
self . control_socket . settimeout ( 10 )
# Kind of a quirks
self . control_socket . sendto ( bytes ( frame . to_frame ( ) ) , ( self . remote_ip , self . remote_port ) )
try :
self . control_socket . sendto ( bytes ( frame . to_frame ( ) ) , ( self . remote_ip , self . remote_port ) )
receive = self . control_socket . recv ( 1024 )
except socket . timeout :
logging . info ( "Heartbeat: No response, Retry Counter %d/%d" , retry_counter , maximum_retry )
break
frame = KNXIPFrame . from_frame ( receive )
if frame . service_type_id == KNXIPFrame . CONNECTIONSTATE_RESPONSE :
if frame . body [ 1 ] == KNXIPFrame . E_NO_ERROR :
logging . debug ( "Heartbeat: Successful" )
res = True
break
if frame . body [ 1 ] == KNXIPFrame . E_CONNECTION_ID :
logging . error ( "Heartbeat: Response No active " "connection found for Channel:%d " , self . channel )
if frame . body [ 1 ] == KNXIPFrame . E_DATA_CONNECTION :
logging . error ( "Heartbeat: Response Data Connection Error Response " "for Channel:%d " , self . channel )
if frame . body [ 1 ] == KNXIPFrame . E_DATA_CONNECTION :
logging . error ( "Heartbeat: Response KNX Sub Network Error Response " "for Channel:%d " , self . channel )
else :
logging . error ( "Heartbeat: Invalid Response!" )
if self . connection_state != 0 :
logging . info ( "Heartbeat: Connection state was %s" , self . connection_state )
res = False
if not res :
if self . connection_state == 0 :
self . connection_state = - 1
self . disconnect ( )
return False
return True |
def set_iscsi_info ( self , target_name , lun , ip_address , port = '3260' , auth_method = None , username = None , password = None ) :
"""Set iscsi details of the system in uefi boot mode .
The initiator system is set with the target details like
IQN , LUN , IP , Port etc .
: param target _ name : Target Name for iscsi .
: param lun : logical unit number .
: param ip _ address : IP address of the target .
: param port : port of the target .
: param auth _ method : either None or CHAP .
: param username : CHAP Username for authentication .
: param password : CHAP secret .
: raises : IloError , on an error from iLO .
: raises : IloCommandNotSupportedInBiosError , if the system is
in the bios boot mode .""" | raise exception . IloCommandNotSupportedError ( ERRMSG ) |
def get_serializer_class ( self ) :
"""gets the class type of the serializer
: return : ` rest _ framework . Serializer `""" | klass = None
lookup_url_kwarg = self . lookup_url_kwarg or self . lookup_field
if lookup_url_kwarg in self . kwargs : # Looks like this is a detail . . .
klass = self . get_object ( ) . __class__
elif "doctype" in self . request . REQUEST :
base = self . model . get_base_class ( )
doctypes = indexable_registry . families [ base ]
try :
klass = doctypes [ self . request . REQUEST [ "doctype" ] ]
except KeyError :
raise Http404
if hasattr ( klass , "get_serializer_class" ) :
return klass . get_serializer_class ( )
# TODO : fix deprecation warning here - - ` get _ serializer _ class ` is going away soon !
return super ( ContentViewSet , self ) . get_serializer_class ( ) |
def ensure_parent_dir ( filename ) :
"""< Purpose >
To ensure existence of the parent directory of ' filename ' . If the parent
directory of ' name ' does not exist , create it .
Example : If ' filename ' is ' / a / b / c / d . txt ' , and only the directory ' / a / b / '
exists , then directory ' / a / b / c / d / ' will be created .
< Arguments >
filename :
A path string .
< Exceptions >
securesystemslib . exceptions . FormatError : If ' filename ' is improperly
formatted .
< Side Effects >
A directory is created whenever the parent directory of ' filename ' does not
exist .
< Return >
None .""" | # Ensure ' filename ' corresponds to ' PATH _ SCHEMA ' .
# Raise ' securesystemslib . exceptions . FormatError ' on a mismatch .
securesystemslib . formats . PATH_SCHEMA . check_match ( filename )
# Split ' filename ' into head and tail , check if head exists .
directory = os . path . split ( filename ) [ 0 ]
if directory and not os . path . exists ( directory ) : # mode = ' rwx - - - - - ' . 448 ( decimal ) is 700 in octal .
os . makedirs ( directory , 448 ) |
def _dict_compare ( d1 , d2 ) :
"""We care if one of two things happens :
* d2 has added a new key
* a ( value for the same key ) in d2 has a different value than d1
We don ' t care if this stuff happens :
* A key is deleted from the dict
Should return a list of keys that either have been added or have a different value than they used to""" | keys_added = set ( d2 . keys ( ) ) - set ( d1 . keys ( ) )
keys_changed = [ k for k in d1 . keys ( ) if k in d2 . keys ( ) and d1 [ k ] != d2 [ k ] ]
return list ( keys_added ) + keys_changed |
def ClearCallHistory ( self , Username = 'ALL' , Type = chsAllCalls ) :
"""Clears the call history .
: Parameters :
Username : str
Skypename of the user . A special value of ' ALL ' means that entries of all users should
be removed .
Type : ` enums ` . clt *
Call type .""" | cmd = 'CLEAR CALLHISTORY %s %s' % ( str ( Type ) , Username )
self . _DoCommand ( cmd , cmd ) |
def quantized ( values , steps , input_min = 0 , input_max = 1 ) :
"""Returns * values * quantized to * steps * increments . All items in * values * are
assumed to be between * input _ min * and * input _ max * ( which default to 0 and
1 respectively ) , and the output will be in the same range .
For example , to quantize values between 0 and 1 to 5 " steps " ( 0.0 , 0.25,
0.5 , 0.75 , 1.0 ) : :
from gpiozero import PWMLED , MCP3008
from gpiozero . tools import quantized
from signal import pause
led = PWMLED ( 4)
pot = MCP3008 ( channel = 0)
led . source = quantized ( pot , 4)
pause ( )""" | values = _normalize ( values )
if steps < 1 :
raise ValueError ( "steps must be 1 or larger" )
if input_min >= input_max :
raise ValueError ( 'input_min must be smaller than input_max' )
input_size = input_max - input_min
for v in scaled ( values , 0 , 1 , input_min , input_max ) :
yield ( ( int ( v * steps ) / steps ) * input_size ) + input_min |
def new_window ( self , type_hint = None ) :
"""Switches to a new top - level browsing context .
The type hint can be one of " tab " or " window " . If not specified the
browser will automatically select it .
: Usage :
driver . switch _ to . new _ window ( ' tab ' )""" | value = self . _driver . execute ( Command . NEW_WINDOW , { 'type' : type_hint } ) [ 'value' ]
self . _w3c_window ( value [ 'handle' ] ) |
def _get_module_via_sys_modules ( self , fullname ) :
"""Attempt to fetch source code via sys . modules . This is specifically to
support _ _ main _ _ , but it may catch a few more cases .""" | module = sys . modules . get ( fullname )
LOG . debug ( '_get_module_via_sys_modules(%r) -> %r' , fullname , module )
if not isinstance ( module , types . ModuleType ) :
LOG . debug ( 'sys.modules[%r] absent or not a regular module' , fullname )
return
path = self . _py_filename ( getattr ( module , '__file__' , '' ) )
if not path :
return
is_pkg = hasattr ( module , '__path__' )
try :
source = inspect . getsource ( module )
except IOError : # Work around inspect . getsourcelines ( ) bug for 0 - byte _ _ init _ _ . py
# files .
if not is_pkg :
raise
source = '\n'
if isinstance ( source , mitogen . core . UnicodeType ) : # get _ source ( ) returns " string " according to PEP - 302 , which was
# reinterpreted for Python 3 to mean a Unicode string .
source = source . encode ( 'utf-8' )
return path , source , is_pkg |
def reshuffle_batches ( self , indices , rng ) :
"""Permutes global batches
: param indices : torch . tensor with batch indices
: param rng : instance of torch . Generator""" | indices = indices . view ( - 1 , self . global_batch_size )
num_batches = indices . shape [ 0 ]
order = torch . randperm ( num_batches , generator = rng )
indices = indices [ order , : ]
indices = indices . view ( - 1 )
return indices |
def save ( self , filePath ) :
"""save the CSV to a file""" | self . filename = filePath
f = open ( filePath , 'w' )
f . write ( self . toStr ( ) )
f . flush ( )
f . close ( ) |
def add_badge_roles ( app ) :
"""Add ` ` badge ` ` role to your sphinx documents . It can create
a colorful badge inline .""" | from docutils . nodes import inline , make_id
from docutils . parsers . rst . roles import set_classes
def create_badge_role ( color = None ) :
def badge_role ( name , rawtext , text , lineno , inliner , options = None , content = None ) :
options = options or { }
set_classes ( options )
classes = [ 'badge' ]
if color is None :
classes . append ( 'badge-' + make_id ( text ) )
else :
classes . append ( 'badge-' + color )
if len ( text ) == 1 :
classes . append ( 'badge-one' )
options [ 'classes' ] = classes
node = inline ( rawtext , text , ** options )
return [ node ] , [ ]
return badge_role
app . add_role ( 'badge' , create_badge_role ( ) )
app . add_role ( 'badge-red' , create_badge_role ( 'red' ) )
app . add_role ( 'badge-blue' , create_badge_role ( 'blue' ) )
app . add_role ( 'badge-green' , create_badge_role ( 'green' ) )
app . add_role ( 'badge-yellow' , create_badge_role ( 'yellow' ) ) |
def scopes ( self ) :
"""Gets the Scopes API client .
Returns :
Scopes :""" | if not self . __scopes :
self . __scopes = Scopes ( self . __connection )
return self . __scopes |
def mavlink_packet ( self , msg ) :
'''handle an incoming mavlink packet''' | # check for any closed graphs
for i in range ( len ( self . graphs ) - 1 , - 1 , - 1 ) :
if not self . graphs [ i ] . is_alive ( ) :
self . graphs [ i ] . close ( )
self . graphs . pop ( i )
# add data to the rest
for g in self . graphs :
g . add_mavlink_packet ( msg ) |
def Nu_cylinder_Perkins_Leppert_1964 ( Re , Pr , mu = None , muw = None ) :
r'''Calculates Nusselt number for crossflow across a single tube as shown
in [ 1 ] _ at a specified ` Re ` and ` Pr ` , both evaluated at the free stream
temperature . Recommends a viscosity exponent correction of 0.25 , which is
applied only if provided . Also shown in [ 2 ] _ .
. . math : :
Nu = \ left [ 0.31Re ^ { 0.5 } + 0.11Re ^ { 0.67 } \ right ] Pr ^ { 0.4}
\ left ( \ frac { \ mu } { \ mu _ w } \ right ) ^ { 0.25}
Parameters
Re : float
Reynolds number with respect to cylinder diameter , [ - ]
Pr : float
Prandtl number at free stream temperature , [ - ]
mu : float , optional
Viscosity of fluid at the free stream temperature [ Pa * s ]
muw : float , optional
Viscosity of fluid at the wall temperature [ Pa * s ]
Returns
Nu : float
Nusselt number with respect to cylinder diameter , [ - ]
Notes
Considers new data since ` Nu _ cylinder _ Perkins _ Leppert _ 1962 ` , Re from 2E3 to
1.2E5 , Pr from 1 to 7 , and surface to bulk temperature differences of
11 to 66.
Examples
> > > Nu _ cylinder _ Perkins _ Leppert _ 1964(6071 , 0.7)
53.61767038619986
References
. . [ 1 ] Perkins Jr . , H . C . , and G . Leppert . " Local Heat - Transfer
Coefficients on a Uniformly Heated Cylinder . " International Journal of
Heat and Mass Transfer 7 , no . 2 ( February 1964 ) : 143-158.
doi : 10.1016/0017-9310(64)90079-1.
. . [ 2 ] Sanitjai , S . , and R . J . Goldstein . " Forced Convection Heat Transfer
from a Circular Cylinder in Crossflow to Air and Liquids . " International
Journal of Heat and Mass Transfer 47 , no . 22 ( October 2004 ) : 4795-4805.
doi : 10.1016 / j . ijheatmasstransfer . 2004.05.012.''' | Nu = ( 0.31 * Re ** 0.5 + 0.11 * Re ** 0.67 ) * Pr ** 0.4
if mu and muw :
Nu *= ( mu / muw ) ** 0.25
return Nu |
def DEFINE_multichoice ( self , name , default , choices , help , constant = False ) :
"""Choose multiple options from a list .""" | self . AddOption ( type_info . MultiChoice ( name = name , default = default , choices = choices , description = help ) , constant = constant ) |
def directory ( self , key ) :
'''Retrieves directory entries for given key .''' | if key . name != 'directory' :
key = key . instance ( 'directory' )
return self . get ( key ) or [ ] |
def get_result ( self , decorated_function , * args , ** kwargs ) :
"""Get result from storage for specified function . Will raise an exception
( : class : ` . WCacheStorage . CacheMissedException ` ) if there is no cached result .
: param decorated _ function : called function ( original )
: param args : args with which function is called
: param kwargs : kwargs with which function is called
: return : ( any type , even None )""" | cache_entry = self . get_cache ( decorated_function , * args , ** kwargs )
if cache_entry . has_value is False :
raise WCacheStorage . CacheMissedException ( 'No cache record found' )
return cache_entry . cached_value |
def build ( self , limit_states , discretization , steps_per_interval ) :
""": param limit _ states : a sequence of limit states
: param discretization : continouos fragility discretization parameter
: param steps _ per _ interval : steps _ per _ interval parameter
: returns : a populated FragilityFunctionList instance""" | new = copy . copy ( self )
add_zero = ( self . format == 'discrete' and self . nodamage and self . nodamage <= self . imls [ 0 ] )
new . imls = build_imls ( new , discretization )
if steps_per_interval > 1 :
new . interp_imls = build_imls ( # passed to classical _ damage
new , discretization , steps_per_interval )
for i , ls in enumerate ( limit_states ) :
data = self . array [ i ]
if self . format == 'discrete' :
if add_zero :
new . append ( FragilityFunctionDiscrete ( ls , [ self . nodamage ] + self . imls , numpy . concatenate ( [ [ 0. ] , data ] ) , self . nodamage ) )
else :
new . append ( FragilityFunctionDiscrete ( ls , self . imls , data , self . nodamage ) )
else : # continuous
new . append ( FragilityFunctionContinuous ( ls , data [ 'mean' ] , data [ 'stddev' ] ) )
return new |
def start ( self ) :
"""Starts the event dispatcher .
Initiates executor and start polling events .
Raises :
IllegalStateError : Can ' t start a dispatcher again when it ' s already
running .""" | if not self . started :
self . started = True
self . executor = ThreadPoolExecutor ( max_workers = 32 )
self . poller = self . executor . submit ( self . poll_events )
else :
raise IllegalStateError ( "Dispatcher is already started." ) |
def stats_view ( request , activity_id = None ) :
"""If a the GET parameter ` year ` is set , it uses stats from given year
with the following caveats :
- If it ' s the current year and start _ date is set , start _ date is ignored
- If it ' s the current year , stats will only show up to today - they won ' t
go into the future .
` all _ years ` ( obviously ) displays all years .""" | if not ( request . user . is_eighth_admin or request . user . is_teacher ) :
return render ( request , "error/403.html" , { "reason" : "You do not have permission to view statistics for this activity." } , status = 403 )
activity = get_object_or_404 ( EighthActivity , id = activity_id )
if request . GET . get ( "print" , False ) :
response = HttpResponse ( content_type = "application/pdf" )
buf = generate_statistics_pdf ( [ activity ] , year = int ( request . GET . get ( "year" , 0 ) ) or None )
response . write ( buf . getvalue ( ) )
buf . close ( )
return response
current_year = current_school_year ( )
if EighthBlock . objects . count ( ) == 0 :
earliest_year = current_year
else :
earliest_year = EighthBlock . objects . order_by ( "date" ) . first ( ) . date . year
if request . GET . get ( "year" , False ) :
year = int ( request . GET . get ( "year" ) )
else :
year = None
future = request . GET . get ( "future" , False )
context = { "activity" : activity , "years" : list ( reversed ( range ( earliest_year , current_year + 1 ) ) ) , "year" : year , "future" : future }
if year :
context . update ( calculate_statistics ( activity , year = year , future = future ) )
else :
context . update ( calculate_statistics ( activity , get_start_date ( request ) , future = future ) )
return render ( request , "eighth/statistics.html" , context ) |
def os_walk_pre_35 ( top , topdown = True , onerror = None , followlinks = False ) :
"""Pre Python 3.5 implementation of os . walk ( ) that doesn ' t use scandir .""" | islink , join , isdir = os . path . islink , os . path . join , os . path . isdir
try :
names = os . listdir ( top )
except OSError as err :
if onerror is not None :
onerror ( err )
return
dirs , nondirs = [ ] , [ ]
for name in names :
if isdir ( join ( top , name ) ) :
dirs . append ( name )
else :
nondirs . append ( name )
if topdown :
yield top , dirs , nondirs
for name in dirs :
new_path = join ( top , name )
if followlinks or not islink ( new_path ) :
for x in os_walk_pre_35 ( new_path , topdown , onerror , followlinks ) :
yield x
if not topdown :
yield top , dirs , nondirs |
def calculate_statistics ( self ) :
"Jam some data through to generate statistics" | rev_ids = range ( 0 , 100 , 1 )
feature_values = zip ( rev_ids , [ 0 ] * 100 )
scores = [ self . score ( f ) for f in feature_values ]
labels = [ s [ 'prediction' ] for s in scores ]
statistics = Classification ( labels , threshold_ndigits = 1 , decision_key = 'probability' )
score_labels = list ( zip ( scores , labels ) )
statistics . fit ( score_labels )
return statistics |
def _get_mounts ( fs_type = None ) :
'''List mounted filesystems .''' | mounts = { }
with salt . utils . files . fopen ( '/proc/mounts' ) as fhr :
for line in fhr . readlines ( ) :
line = salt . utils . stringutils . to_unicode ( line )
device , mntpnt , fstype , options , fs_freq , fs_passno = line . strip ( ) . split ( " " )
if fs_type and fstype != fs_type :
continue
if mounts . get ( device ) is None :
mounts [ device ] = [ ]
data = { 'mount_point' : mntpnt , 'options' : options . split ( "," ) }
if not fs_type :
data [ 'type' ] = fstype
mounts [ device ] . append ( data )
return mounts |
def get_composition_admin_session_for_repository ( self , repository_id = None ) :
"""Gets a composiiton administrative session for the given
repository .
arg : repository _ id ( osid . id . Id ) : the Id of the repository
return : ( osid . repository . CompositionAdminSession ) - a
CompositionAdminSession
raise : NotFound - repository _ id not found
raise : NullArgument - repository _ id is null
raise : OperationFailed - unable to complete request
raise : Unimplemented - supports _ composition _ admin ( ) or
supports _ visible _ federation ( ) is false
compliance : optional - This method must be implemented if
supports _ composition _ admin ( ) and
supports _ visible _ federation ( ) are true .""" | if repository_id is None :
raise NullArgument ( )
if not self . supports_composition_admin ( ) or not self . supports_visible_federation ( ) :
raise Unimplemented ( )
try :
from . import sessions
except ImportError :
raise
# OperationFailed ( )
try :
session = sessions . CompositionSearchSession ( repository_id , proxy = self . _proxy , runtime = self . _runtime )
except AttributeError :
raise
# OperationFailed ( )
return session |
def indicate_last ( items ) :
"""iterate through list and indicate which item is the last , intended to
assist tree displays of hierarchical content .
: return : yielding ( < bool > , < item > ) where bool is True only on last entry
: rtype : generator""" | last_index = len ( items ) - 1
for ( i , item ) in enumerate ( items ) :
yield ( i == last_index , item ) |
def match_owner ( self , url ) :
"""Finds the first entity , with keys in the key jar , with an
identifier that matches the given URL . The match is a leading
substring match .
: param url : A URL
: return : An issue entity ID that exists in the Key jar""" | for owner in self . issuer_keys . keys ( ) :
if owner . startswith ( url ) :
return owner
raise KeyError ( "No keys for '{}' in this keyjar" . format ( url ) ) |
def get_relationships ( manager , handle_id1 , handle_id2 , rel_type = None , legacy = True ) :
"""Takes a start and an end node with an optional relationship
type .
Returns the relationships between the nodes or an empty list .""" | if rel_type :
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""" . format ( rel_type = rel_type )
else :
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager . session as s :
if legacy :
relationships = s . run ( q , { 'handle_id1' : handle_id1 , 'handle_id2' : handle_id2 } ) . single ( ) [ 'relationships' ]
return [ relationship . id for relationship in relationships ]
return s . run ( q , { 'handle_id1' : handle_id1 , 'handle_id2' : handle_id2 } ) . single ( ) [ 'relationships' ] |
async def get_clan ( self , * tags ) :
'''Get a clan object using tag ( s )''' | url = '{0.BASE}/clan/{1}' . format ( self , ',' . join ( tags ) )
data = await self . request ( url )
if isinstance ( data , list ) :
return [ Clan ( self , c ) for c in data ]
else :
return Clan ( self , data ) |
def restoreDefaultWCS ( imageObjectList , output_wcs ) :
"""Restore WCS information to default values , and update imageObject
accordingly .""" | if not isinstance ( imageObjectList , list ) :
imageObjectList = [ imageObjectList ]
output_wcs . restoreWCS ( )
updateImageWCS ( imageObjectList , output_wcs ) |
def getLogs ( self , CorpNum , MgtKey ) :
"""문서이력 조회
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 문서관리번호
return
문서 이력 목록 as List
raise
PopbillException""" | if MgtKey == None or MgtKey == "" :
raise PopbillException ( - 99999999 , "관리번호가 입력되지 않았습니다." )
return self . _httpget ( '/Cashbill/' + MgtKey + '/Logs' , CorpNum ) |
def fit ( self , data , debug = False ) :
"""Fit each segment . Segments that have not already been explicitly
added will be automatically added with default model and ytransform .
Parameters
data : pandas . DataFrame
Must have a column with the same name as ` segmentation _ col ` .
debug : bool
If set to true will pass debug to the fit method of each model .
Returns
fits : dict of statsmodels . regression . linear _ model . OLSResults
Keys are the segment names .""" | data = util . apply_filter_query ( data , self . fit_filters )
unique = data [ self . segmentation_col ] . unique ( )
value_counts = data [ self . segmentation_col ] . value_counts ( )
# Remove any existing segments that may no longer have counterparts
# in the data . This can happen when loading a saved model and then
# calling this method with data that no longer has segments that
# were there the last time this was called .
gone = set ( self . _group . models ) - set ( unique )
for g in gone :
del self . _group . models [ g ]
for x in unique :
if x not in self . _group . models and value_counts [ x ] > self . min_segment_size :
self . add_segment ( x )
with log_start_finish ( 'fitting models in segmented model {}' . format ( self . name ) , logger ) :
return self . _group . fit ( data , debug = debug ) |
def p_throttling ( p ) :
"""throttling : THROTTLING COLON NONE
| THROTTLING COLON TAIL _ DROP OPEN _ BRACKET NUMBER CLOSE _ BRACKET""" | throttling = NoThrottlingSettings ( )
if len ( p ) == 7 :
throttling = TailDropSettings ( int ( p [ 5 ] ) )
p [ 0 ] = { "throttling" : throttling } |
def export_image ( self , bbox , zoomlevel , imagepath ) :
"""Writes to ` ` imagepath ` ` the tiles for the specified bounding box and zoomlevel .""" | assert has_pil , _ ( "Cannot export image without python PIL" )
grid = self . grid_tiles ( bbox , zoomlevel )
width = len ( grid [ 0 ] )
height = len ( grid )
widthpix = width * self . tile_size
heightpix = height * self . tile_size
result = Image . new ( "RGBA" , ( widthpix , heightpix ) )
offset = ( 0 , 0 )
for i , row in enumerate ( grid ) :
for j , ( x , y ) in enumerate ( row ) :
offset = ( j * self . tile_size , i * self . tile_size )
img = self . _tile_image ( self . tile ( ( zoomlevel , x , y ) ) )
result . paste ( img , offset )
logger . info ( _ ( "Save resulting image to '%s'" ) % imagepath )
result . save ( imagepath ) |
def eval_option_value ( self , option ) :
"""Evaluates an option
: param option : a string
: return : an object of type str , bool , int , float or list""" | try :
value = eval ( option , { } , { } )
except ( SyntaxError , NameError , TypeError ) :
return option
if type ( value ) in ( str , bool , int , float ) :
return value
elif type ( value ) in ( list , tuple ) :
for v in value :
if type ( v ) not in ( str , bool , int , float ) :
self . _write_error ( "Value of element of list object has wrong type %s" % v )
return value
return option |
def parse_band_log ( self , message ) :
"""Process incoming logging messages from the service .""" | if "payload" in message and hasattr ( message [ "payload" ] , "name" ) :
record = message [ "payload" ]
for k in dir ( record ) :
if k . startswith ( "workflows_exc_" ) :
setattr ( record , k [ 14 : ] , getattr ( record , k ) )
delattr ( record , k )
for k , v in self . get_status ( ) . items ( ) :
setattr ( record , "workflows_" + k , v )
logging . getLogger ( record . name ) . handle ( record )
else :
self . log . warning ( "Received broken record on log band\n" + "Message: %s\nRecord: %s" , str ( message ) , str ( hasattr ( message . get ( "payload" ) , "__dict__" ) and message [ "payload" ] . __dict__ ) , ) |
def sepconv_relu_sepconv ( inputs , filter_size , output_size , first_kernel_size = ( 1 , 1 ) , second_kernel_size = ( 1 , 1 ) , padding = "LEFT" , nonpadding_mask = None , dropout = 0.0 , name = None ) :
"""Hidden layer with RELU activation followed by linear projection .""" | with tf . variable_scope ( name , "sepconv_relu_sepconv" , [ inputs ] ) :
inputs = maybe_zero_out_padding ( inputs , first_kernel_size , nonpadding_mask )
if inputs . get_shape ( ) . ndims == 3 :
is_3d = True
inputs = tf . expand_dims ( inputs , 2 )
else :
is_3d = False
h = separable_conv ( inputs , filter_size , first_kernel_size , activation = tf . nn . relu , padding = padding , name = "conv1" )
if dropout != 0.0 :
h = tf . nn . dropout ( h , 1.0 - dropout )
h = maybe_zero_out_padding ( h , second_kernel_size , nonpadding_mask )
ret = separable_conv ( h , output_size , second_kernel_size , padding = padding , name = "conv2" )
if is_3d :
ret = tf . squeeze ( ret , 2 )
return ret |
def hicpro_pairing_chart ( self ) :
"""Generate Pairing chart""" | # Specify the order of the different possible categories
keys = OrderedDict ( )
keys [ 'Unique_paired_alignments' ] = { 'color' : '#005ce6' , 'name' : 'Uniquely Aligned' }
keys [ 'Low_qual_pairs' ] = { 'color' : '#b97b35' , 'name' : 'Low Quality' }
keys [ 'Pairs_with_singleton' ] = { 'color' : '#ff9933' , 'name' : 'Singleton' }
keys [ 'Multiple_pairs_alignments' ] = { 'color' : '#e67300' , 'name' : 'Multi Aligned' }
keys [ 'Unmapped_airs' ] = { 'color' : '#a9a2a2' , 'name' : 'Failed To Align' }
# Config for the plot
config = { 'id' : 'hicpro_pairing_stats_plot' , 'title' : 'HiC-Pro: Pairing Statistics' , 'ylab' : '# Reads' , 'cpswitch_counts_label' : 'Number of Reads' }
return bargraph . plot ( self . hicpro_data , keys , config ) |
def _compute_distance_term ( self , C , mag , dists ) :
"""Computes the distance scaling term , as contained within equation ( 1b )""" | return ( ( C [ 'theta2' ] + C [ 'theta14' ] + C [ 'theta3' ] * ( mag - 7.8 ) ) * np . log ( dists . rhypo + self . CONSTS [ 'c4' ] * np . exp ( ( mag - 6. ) * self . CONSTS [ 'theta9' ] ) ) + ( C [ 'theta6' ] * dists . rhypo ) ) + C [ "theta10" ] |
def close_other_windows ( self ) :
"""Closes all not current windows . Useful for tests - after each test you
can automatically close all windows .""" | main_window_handle = self . current_window_handle
for window_handle in self . window_handles :
if window_handle == main_window_handle :
continue
self . switch_to_window ( window_handle )
self . close ( )
self . switch_to_window ( main_window_handle ) |
def export_process_template ( self , id , ** kwargs ) :
"""ExportProcessTemplate .
[ Preview API ] Returns requested process template .
: param str id : The ID of the process
: rtype : object""" | route_values = { }
if id is not None :
route_values [ 'id' ] = self . _serialize . url ( 'id' , id , 'str' )
route_values [ 'action' ] = 'Export'
response = self . _send ( http_method = 'GET' , location_id = '29e1f38d-9e9c-4358-86a5-cdf9896a5759' , version = '5.0-preview.1' , route_values = route_values , accept_media_type = 'application/zip' )
if "callback" in kwargs :
callback = kwargs [ "callback" ]
else :
callback = None
return self . _client . stream_download ( response , callback = callback ) |
def fallback_move ( fobj , dest , src , count , BUFFER_SIZE = 2 ** 16 ) :
"""Moves data around using read ( ) / write ( ) .
Args :
fileobj ( fileobj )
dest ( int ) : The destination offset
src ( int ) : The source offset
count ( int ) The amount of data to move
Raises :
IOError : In case an operation on the fileobj fails
ValueError : In case invalid parameters were given""" | if dest < 0 or src < 0 or count < 0 :
raise ValueError
fobj . seek ( 0 , 2 )
filesize = fobj . tell ( )
if max ( dest , src ) + count > filesize :
raise ValueError ( "area outside of file" )
if src > dest :
moved = 0
while count - moved :
this_move = min ( BUFFER_SIZE , count - moved )
fobj . seek ( src + moved )
buf = fobj . read ( this_move )
fobj . seek ( dest + moved )
fobj . write ( buf )
moved += this_move
fobj . flush ( )
else :
while count :
this_move = min ( BUFFER_SIZE , count )
fobj . seek ( src + count - this_move )
buf = fobj . read ( this_move )
fobj . seek ( count + dest - this_move )
fobj . write ( buf )
count -= this_move
fobj . flush ( ) |
def upsert_multi ( db , collection , object , match_params = None ) :
"""Wrapper for pymongo . insert _ many ( ) and update _ many ( )
: param db : db connection
: param collection : collection to update
: param object : the modifications to apply
: param match _ params : a query that matches the documents to update
: return : ids of inserted / updated document""" | if isinstance ( object , list ) and len ( object ) > 0 :
return str ( db [ collection ] . insert_many ( object ) . inserted_ids )
elif isinstance ( object , dict ) :
return str ( db [ collection ] . update_many ( match_params , { "$set" : object } , upsert = False ) . upserted_id ) |
def get_cached_zone_variable ( self , zone_id , variable , default = None ) :
"""Retrieve the current value of a zone variable from the cache or
return the default value if the variable is not present .""" | try :
return self . _retrieve_cached_zone_variable ( zone_id , variable )
except UncachedVariable :
return default |
def _timestep_cull ( self , timestep ) :
"""Cull out values that do not fit a timestep .""" | new_values = [ ]
new_datetimes = [ ]
mins_per_step = int ( 60 / timestep )
for i , date_t in enumerate ( self . datetimes ) :
if date_t . moy % mins_per_step == 0 :
new_datetimes . append ( date_t )
new_values . append ( self . values [ i ] )
a_per = self . header . analysis_period
new_ap = AnalysisPeriod ( a_per . st_month , a_per . st_day , a_per . st_hour , a_per . end_month , a_per . end_day , a_per . end_hour , timestep , a_per . is_leap_year )
return new_ap , new_values , new_datetimes |
def getServiceEndpoints ( self , yadis_url , service_element ) :
"""Generate all endpoint objects for all of the subfilters of
this filter and return their concatenation .""" | endpoints = [ ]
for subfilter in self . subfilters :
endpoints . extend ( subfilter . getServiceEndpoints ( yadis_url , service_element ) )
return endpoints |
def path ( self , name ) :
"""Look for files in subdirectory of MEDIA _ ROOT using the tenant ' s
domain _ url value as the specifier .""" | if name is None :
name = ''
try :
location = safe_join ( self . location , connection . tenant . domain_url )
except AttributeError :
location = self . location
try :
path = safe_join ( location , name )
except ValueError :
raise SuspiciousOperation ( "Attempted access to '%s' denied." % name )
return os . path . normpath ( path ) |
def set_row_heights ( self ) :
"""the row height is defined by following factors :
* how many facts are there in the day
* does the fact have description / tags
This func creates a list of row start positions to be able to
quickly determine what to display""" | if not self . height :
return
y , pos , heights = 0 , [ ] , [ ]
for date , facts in self . days :
height = 0
for fact in facts :
fact_height = self . fact_row . height ( fact )
fact . y = y + height
fact . height = fact_height
height += fact . height
height += self . day_padding
if not facts :
height = 10
else :
height = max ( height , 60 )
pos . append ( y )
heights . append ( height )
y += height
self . row_positions , self . row_heights = pos , heights
maxy = max ( y , 1 )
if self . vadjustment :
self . vadjustment . set_lower ( 0 )
self . vadjustment . set_upper ( max ( maxy , self . height ) )
self . vadjustment . set_page_size ( self . height ) |
def qt_at_least ( needed_version , test_version = None ) :
"""Check if the installed Qt version is greater than the requested
: param needed _ version : minimally needed Qt version in format like 4.8.4
: type needed _ version : str
: param test _ version : Qt version as returned from Qt . QT _ VERSION . As in
0x040100 This is used only for tests
: type test _ version : int
: returns : True if the installed Qt version is greater than the requested
: rtype : bool""" | major , minor , patch = needed_version . split ( '.' )
needed_version = '0x0%s0%s0%s' % ( major , minor , patch )
needed_version = int ( needed_version , 0 )
installed_version = Qt . QT_VERSION
if test_version is not None :
installed_version = test_version
if needed_version <= installed_version :
return True
else :
return False |
def workgroup ( name ) :
'''. . versionadded : : 2019.2.0
Manage the workgroup of the computer
name
The workgroup to set
Example :
. . code - block : : yaml
set workgroup :
system . workgroup :
- name : local''' | ret = { 'name' : name . upper ( ) , 'result' : False , 'changes' : { } , 'comment' : '' }
# Grab the current domain / workgroup
out = __salt__ [ 'system.get_domain_workgroup' ] ( )
current_workgroup = out [ 'Domain' ] if 'Domain' in out else out [ 'Workgroup' ] if 'Workgroup' in out else ''
# Notify the user if the requested workgroup is the same
if current_workgroup . upper ( ) == name . upper ( ) :
ret [ 'result' ] = True
ret [ 'comment' ] = "Workgroup is already set to '{0}'" . format ( name . upper ( ) )
return ret
# If being run in test - mode , inform the user what is supposed to happen
if __opts__ [ 'test' ] :
ret [ 'result' ] = None
ret [ 'changes' ] = { }
ret [ 'comment' ] = 'Computer will be joined to workgroup \'{0}\'' . format ( name )
return ret
# Set our new workgroup , and then immediately ask the machine what it
# is again to validate the change
res = __salt__ [ 'system.set_domain_workgroup' ] ( name . upper ( ) )
out = __salt__ [ 'system.get_domain_workgroup' ] ( )
changed_workgroup = out [ 'Domain' ] if 'Domain' in out else out [ 'Workgroup' ] if 'Workgroup' in out else ''
# Return our results based on the changes
ret = { }
if res and current_workgroup . upper ( ) == changed_workgroup . upper ( ) :
ret [ 'result' ] = True
ret [ 'comment' ] = "The new workgroup '{0}' is the same as '{1}'" . format ( current_workgroup . upper ( ) , changed_workgroup . upper ( ) )
elif res :
ret [ 'result' ] = True
ret [ 'comment' ] = "The workgroup has been changed from '{0}' to '{1}'" . format ( current_workgroup . upper ( ) , changed_workgroup . upper ( ) )
ret [ 'changes' ] = { 'old' : current_workgroup . upper ( ) , 'new' : changed_workgroup . upper ( ) }
else :
ret [ 'result' ] = False
ret [ 'comment' ] = "Unable to join the requested workgroup '{0}'" . format ( changed_workgroup . upper ( ) )
return ret |
def remove_input_data_port ( self , data_port_id , force = False , destroy = True ) :
"""Remove an input data port from the state
: param int data _ port _ id : the id or the output data port to remove
: param bool force : if the removal should be forced without checking constraints
: raises exceptions . AttributeError : if the specified input data port does not exist""" | if data_port_id in self . _input_data_ports :
if destroy :
self . remove_data_flows_with_data_port_id ( data_port_id )
self . _input_data_ports [ data_port_id ] . parent = None
return self . _input_data_ports . pop ( data_port_id )
else :
raise AttributeError ( "input data port with name %s does not exit" , data_port_id ) |
def dispatch_strict ( self , stream , * args , ** kwargs ) :
"""Dispatch to function held internally depending upon the value of stream .
Matching on directories is strict . This means dictionaries will
match if they are exactly the same .""" | for f , pat in self . functions :
matched , matched_stream = self . _match ( stream , pat , { 'strict' : True } , { } )
if matched :
return f ( matched_stream , * args , ** kwargs )
raise DispatchFailed ( ) |
def get_datasets ( catalog , filter_in = None , filter_out = None , meta_field = None , exclude_meta_fields = None , only_time_series = False ) :
"""Devuelve una lista de datasets del catálogo o de uno de sus metadatos .
Args :
catalog ( dict , str or DataJson ) : Representación externa / interna de un
catálogo . Una representación _ externa _ es un path local o una
URL remota a un archivo con la metadata de un catálogo , en
formato JSON o XLSX . La representación _ interna _ de un catálogo
es un diccionario . Ejemplos : http : / / datos . gob . ar / data . json ,
http : / / www . ign . gob . ar / descargas / geodatos / catalog . xlsx ,
" / energia / catalog . xlsx " .
filter _ in ( dict ) : Devuelve los datasets cuyos atributos coinciden con
los pasados en este diccionario . Ejemplo : :
" dataset " : {
" publisher " : { " name " : " Ministerio de Ambiente " }
Sólo se devolverán los datasets de ese publisher _ name .
filter _ out ( dict ) : Devuelve los datasets cuyos atributos no coinciden
con los pasados en este diccionario . Ejemplo : :
" dataset " : {
" publisher " : { " name " : " Ministerio de Ambiente " }
Sólo se devolverán los datasets que no sean de ese publisher _ name .
meta _ field ( str ) : Nombre de un metadato de Dataset . En lugar de
devolver los objetos completos " Dataset " , devuelve una lista de
valores para ese metadato presentes en el catálogo .
exclude _ meta _ fields ( list ) : Metadatos de Dataset que se quieren excluir
de los objetos Dataset devueltos .
only _ time _ series ( bool ) : Si es verdadero , sólo devuelve datasets que
tengan por lo menos una distribución de series de tiempo .""" | filter_in = filter_in or { }
filter_out = filter_out or { }
catalog = read_catalog_obj ( catalog )
if filter_in or filter_out :
filtered_datasets = [ dataset for dataset in catalog [ "dataset" ] if _filter_dictionary ( dataset , filter_in . get ( "dataset" ) , filter_out . get ( "dataset" ) ) ]
else :
filtered_datasets = catalog [ "dataset" ]
# realiza filtros especiales
if only_time_series :
filtered_datasets = [ dataset for dataset in filtered_datasets if dataset_has_time_series ( dataset ) ]
if meta_field :
return [ dataset [ meta_field ] for dataset in filtered_datasets if meta_field in dataset ]
if exclude_meta_fields :
meta_filtered_datasets = [ ]
for dataset in filtered_datasets :
dataset_meta_filtered = dataset . copy ( )
for excluded_meta_field in exclude_meta_fields :
dataset_meta_filtered . pop ( excluded_meta_field , None )
meta_filtered_datasets . append ( dataset_meta_filtered )
return meta_filtered_datasets
else :
return filtered_datasets |
def chunk_string ( string , length ) :
"""Splits a string into fixed - length chunks .
This function returns a generator , using a generator comprehension . The
generator returns the string sliced , from 0 + a multiple of the length
of the chunks , to the length of the chunks + a multiple of the length
of the chunks .
Reference : http : / / stackoverflow . com / questions / 18854620""" | return ( string [ 0 + i : length + i ] for i in range ( 0 , len ( string ) , length ) ) |
def stop ( ) :
'''Stops lazarus , regardless of which mode it was started in .
For example :
> > > import lazarus
> > > lazarus . default ( )
> > > lazarus . stop ( )''' | global _active
if not _active :
msg = 'lazarus is not active'
raise RuntimeWarning ( msg )
_observer . stop ( )
_observer . join ( )
_deactivate ( ) |
def undo_nested_group ( self ) :
"""Performs the last group opened , or the top group on the undo stack .
Creates a redo group with the same name .""" | if self . _undoing or self . _redoing :
raise RuntimeError
if self . _open :
group = self . _open . pop ( )
elif self . _undo :
group = self . _undo . pop ( )
else :
return
self . _undoing = True
self . begin_grouping ( )
group . perform ( )
self . set_action_name ( group . name )
self . end_grouping ( )
self . _undoing = False
self . notify ( ) |
def start ( self , any_zone ) :
"""Start the event listener listening on the local machine at port 1400
( default )
Make sure that your firewall allows connections to this port
Args :
any _ zone ( SoCo ) : Any Sonos device on the network . It does not
matter which device . It is used only to find a local IP address
reachable by the Sonos net .
Note :
The port on which the event listener listens is configurable .
See ` config . EVENT _ LISTENER _ PORT `""" | # Find our local network IP address which is accessible to the
# Sonos net , see http : / / stackoverflow . com / q / 166506
with self . _start_lock :
if not self . is_running : # Use configured IP address if there is one , else detect
# automatically .
if config . EVENT_LISTENER_IP :
ip_address = config . EVENT_LISTENER_IP
else :
temp_sock = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
temp_sock . connect ( ( any_zone . ip_address , config . EVENT_LISTENER_PORT ) )
ip_address = temp_sock . getsockname ( ) [ 0 ]
temp_sock . close ( )
# Start the event listener server in a separate thread .
self . address = ( ip_address , config . EVENT_LISTENER_PORT )
self . _listener_thread = EventServerThread ( self . address )
self . _listener_thread . daemon = True
self . _listener_thread . start ( )
self . is_running = True
log . info ( "Event listener started" ) |
def wait_for_capture ( self , timeout = None ) :
"""See base class documentation""" | if self . _process is None :
raise sniffer . InvalidOperationError ( "Trying to wait on a non-started process" )
try :
utils . wait_for_standing_subprocess ( self . _process , timeout )
self . _post_process ( )
except subprocess . TimeoutExpired :
self . stop_capture ( ) |
def get_drive ( self , drive_id ) :
"""Returns a Drive instance
: param drive _ id : the drive _ id to be retrieved
: return : Drive for the id
: rtype : Drive""" | if not drive_id :
return None
url = self . build_url ( self . _endpoints . get ( 'get_drive' ) . format ( id = drive_id ) )
response = self . con . get ( url )
if not response :
return None
drive = response . json ( )
# Everything received from cloud must be passed as self . _ cloud _ data _ key
return self . drive_constructor ( con = self . con , protocol = self . protocol , main_resource = self . main_resource , ** { self . _cloud_data_key : drive } ) |
def get_vpnv4fs_table ( self ) :
"""Returns global VPNv4 Flow Specification table .
Creates the table if it does not exist .""" | vpnv4fs_table = self . _global_tables . get ( RF_VPNv4_FLOWSPEC )
# Lazy initialization of the table .
if not vpnv4fs_table :
vpnv4fs_table = VPNv4FlowSpecTable ( self . _core_service , self . _signal_bus )
self . _global_tables [ RF_VPNv4_FLOWSPEC ] = vpnv4fs_table
self . _tables [ ( None , RF_VPNv4_FLOWSPEC ) ] = vpnv4fs_table
return vpnv4fs_table |
def clause_indices ( self ) :
"""The list of clause indices in ` ` words ` ` layer .
The indices are unique only in the boundary of a single sentence .""" | if not self . is_tagged ( CLAUSE_ANNOTATION ) :
self . tag_clause_annotations ( )
return [ word . get ( CLAUSE_IDX , None ) for word in self [ WORDS ] ] |
def get_preview_name ( self ) :
"""Returns . SAFE name of full resolution L1C preview
: return : name of preview file
: rtype : str""" | if self . safe_type == EsaSafeType . OLD_TYPE :
name = _edit_name ( self . tile_id , AwsConstants . PVI , delete_end = True )
else :
name = '_' . join ( [ self . tile_id . split ( '_' ) [ 1 ] , self . get_datatake_time ( ) , AwsConstants . PVI ] )
return '{}.jp2' . format ( name ) |
def build_collision_table ( aliases , levels = COLLISION_CHECK_LEVEL_DEPTH ) :
"""Build the collision table according to the alias configuration file against the entire command table .
self . collided _ alias is structured as :
' collided _ alias ' : [ the command level at which collision happens ]
For example :
' account ' : [ 1 , 2]
This means that ' account ' is a reserved command in level 1 and level 2 of the command tree because
( az account . . . ) and ( az storage account . . . )
lvl 1 lvl 2
Args :
levels : the amount of levels we tranverse through the command table tree .""" | collided_alias = defaultdict ( list )
for alias in aliases : # Only care about the first word in the alias because alias
# cannot have spaces ( unless they have positional arguments )
word = alias . split ( ) [ 0 ]
for level in range ( 1 , levels + 1 ) :
collision_regex = r'^{}{}($|\s)' . format ( r'([a-z\-]*\s)' * ( level - 1 ) , word . lower ( ) )
if list ( filter ( re . compile ( collision_regex ) . match , azext_alias . cached_reserved_commands ) ) and level not in collided_alias [ word ] :
collided_alias [ word ] . append ( level )
telemetry . set_collided_aliases ( list ( collided_alias . keys ( ) ) )
return collided_alias |
def pylint_raw ( options ) :
"""Use check _ output to run pylint .
Because pylint changes the exit code based on the code score ,
we have to wrap it in a try / except block .
: param options :
: return :""" | command = [ 'pylint' ]
command . extend ( options )
proc = subprocess . Popen ( command , stdout = subprocess . PIPE , stderr = subprocess . PIPE )
outs , __ = proc . communicate ( )
return outs . decode ( ) |
def _get_converter_type ( identifier ) :
"""Return the converter type for ` identifier ` .""" | if isinstance ( identifier , str ) :
return ConverterType [ identifier ]
if isinstance ( identifier , ConverterType ) :
return identifier
return ConverterType ( identifier ) |
def update_task_db ( self , row ) :
'''更新数据库中的任务信息''' | sql = '''UPDATE tasks SET
currsize=?, state=?, statename=?, humansize=?, percent=?
WHERE fsid=?
'''
self . cursor . execute ( sql , [ row [ CURRSIZE_COL ] , row [ STATE_COL ] , row [ STATENAME_COL ] , row [ HUMANSIZE_COL ] , row [ PERCENT_COL ] , row [ FSID_COL ] ] )
self . check_commit ( ) |
def get_proficiency_search_session_for_objective_bank ( self , objective_bank_id , proxy ) :
"""Gets the ` ` OsidSession ` ` associated with the proficiency search service for the given objective bank .
: param objective _ bank _ id : the ` ` Id ` ` of the ` ` ObjectiveBank ` `
: type objective _ bank _ id : ` ` osid . id . Id ` `
: param proxy : a proxy
: type proxy : ` ` osid . proxy . Proxy ` `
: return : a ` ` ProficiencySearchSession ` `
: rtype : ` ` osid . learning . ProficiencySearchSession ` `
: raise : ` ` NotFound ` ` - - no objective bank found by the given ` ` Id ` `
: raise : ` ` NullArgument ` ` - - ` ` objective _ bank _ id ` ` or ` ` proxy ` ` is ` ` null ` `
: raise : ` ` OperationFailed ` ` - - unable to complete request
: raise : ` ` Unimplemented ` ` - - ` ` supports _ proficiency _ search ( ) ` ` or ` ` supports _ visible _ federation ( ) ` ` is ` ` false ` `
* compliance : optional - - This method must be implemented if ` ` supports _ proficiency _ search ( ) ` ` and ` ` supports _ visible _ federation ( ) ` ` are ` ` true ` ` *""" | if not objective_bank_id :
raise NullArgument
if not self . supports_proficiency_search ( ) :
raise Unimplemented ( )
try :
from . import sessions
except ImportError :
raise OperationFailed ( )
proxy = self . _convert_proxy ( proxy )
try :
session = sessions . ProficiencySearchSession ( objective_bank_id = objective_bank_id , proxy = proxy , runtime = self . _runtime )
except AttributeError :
raise OperationFailed ( )
return session |
def _format_data ( data ) : # type : ( Union [ str , IO ] ) - > Union [ Tuple [ None , str ] , Tuple [ Optional [ str ] , IO , str ] ]
"""Format field data according to whether it is a stream or
a string for a form - data request .
: param data : The request field data .
: type data : str or file - like object .""" | if hasattr ( data , 'read' ) :
data = cast ( IO , data )
data_name = None
try :
if data . name [ 0 ] != '<' and data . name [ - 1 ] != '>' :
data_name = os . path . basename ( data . name )
except ( AttributeError , TypeError ) :
pass
return ( data_name , data , "application/octet-stream" )
return ( None , cast ( str , data ) ) |
def _density_par_approx_higherorder ( self , Oparb , lowbindx , _return_array = False , gaussxpolyInt = None ) :
"""Contribution from non - linear spline terms""" | spline_order = self . _kick_interpdOpar_raw . _eval_args [ 2 ]
if spline_order == 1 :
return 0.
# Form all Gaussian - like integrals necessary
ll = ( numpy . roll ( Oparb , - 1 ) [ : - 1 ] - self . _kick_interpdOpar_poly . c [ - 1 ] - self . _meandO - self . _kick_interpdOpar_poly . c [ - 2 ] * self . _timpact * ( Oparb - numpy . roll ( Oparb , - 1 ) ) [ : - 1 ] ) / numpy . sqrt ( 2. * self . _sortedSigOEig [ 2 ] )
ul = ( Oparb [ : - 1 ] - self . _kick_interpdOpar_poly . c [ - 1 ] - self . _meandO ) / numpy . sqrt ( 2. * self . _sortedSigOEig [ 2 ] )
if gaussxpolyInt is None :
gaussxpolyInt = self . _densMoments_approx_higherorder_gaussxpolyInts ( ll , ul , spline_order + 1 )
# Now multiply in the coefficients for each order
powers = numpy . tile ( numpy . arange ( spline_order + 1 ) [ : : - 1 ] , ( len ( ul ) , 1 ) ) . T
gaussxpolyInt *= - 0.5 * ( - numpy . sqrt ( 2. ) ) ** ( powers + 1 ) * self . _sortedSigOEig [ 2 ] ** ( 0.5 * ( powers - 1 ) )
powers = numpy . tile ( numpy . arange ( spline_order + 1 ) [ : : - 1 ] [ : - 2 ] , ( len ( ul ) , 1 ) ) . T
for jj in range ( spline_order + 1 ) :
gaussxpolyInt [ - jj - 1 ] *= numpy . sum ( self . _kick_interpdOpar_poly . c [ : - 2 ] * self . _timpact ** powers / ( 1. + self . _kick_interpdOpar_poly . c [ - 2 ] * self . _timpact ) ** ( powers + 1 ) * special . binom ( powers , jj ) * ( Oparb [ : - 1 ] - self . _kick_interpdOpar_poly . c [ - 1 ] - self . _meandO ) ** ( powers - jj ) , axis = 0 )
if _return_array :
return numpy . sum ( gaussxpolyInt , axis = 0 )
else :
return numpy . sum ( gaussxpolyInt [ : , : lowbindx + 1 ] ) |
def reset ( self ) :
"""Reset all resolver configuration to the defaults .""" | self . domain = dns . name . Name ( dns . name . from_text ( socket . gethostname ( ) ) [ 1 : ] )
if len ( self . domain ) == 0 :
self . domain = dns . name . root
self . nameservers = [ ]
self . localhosts = set ( [ 'localhost' , 'loopback' , '127.0.0.1' , '0.0.0.0' , '::1' , 'ip6-localhost' , 'ip6-loopback' , ] )
# connected and active network interfaces
self . interfaces = set ( )
self . search = set ( )
self . search_patterns = [ 'www.%s.com' , 'www.%s.org' , 'www.%s.net' , ]
self . port = 53
self . timeout = 2.0
self . lifetime = 30.0
self . keyring = None
self . keyname = None
self . keyalgorithm = dns . tsig . default_algorithm
self . edns = - 1
self . ednsflags = 0
self . payload = 0
self . cache = None |
def create ( self , session ) :
"""caches the session and caches an entry to associate the cached session
with the subject""" | sessionid = super ( ) . create ( session )
# calls _ do _ create and verify
self . _cache ( session , sessionid )
return sessionid |
def check_connection ( self ) :
"""Open a telnet connection and try to login . Expected login
label is " login : " , expected password label is " Password : " .""" | self . url_connection = telnetlib . Telnet ( timeout = self . aggregate . config [ "timeout" ] )
if log . is_debug ( LOG_CHECK ) :
self . url_connection . set_debuglevel ( 1 )
self . url_connection . open ( self . host , self . port )
if self . user :
self . url_connection . read_until ( "login: " , 10 )
self . url_connection . write ( encode ( self . user ) + "\n" )
if self . password :
self . url_connection . read_until ( "Password: " , 10 )
self . url_connection . write ( encode ( self . password ) + "\n" )
# XXX how to tell if we are logged in ? ?
self . url_connection . write ( "exit\n" ) |
def check_bot ( task_type = SYSTEM_TASK ) :
"""wxpy bot 健康检查任务""" | if glb . wxbot . bot . alive :
msg = generate_run_info ( )
message = Message ( content = msg , receivers = 'status' )
glb . wxbot . send_msg ( message )
_logger . info ( '{0} Send status message {1} at {2:%Y-%m-%d %H:%M:%S}' . format ( task_type , msg , datetime . datetime . now ( ) ) )
else : # todo
pass |
def fs_cache ( app_name = '' , cache_type = '' , idx = 1 , expires = DEFAULT_EXPIRES , cache_dir = '' , helper_class = _FSCacher ) :
"""A decorator to cache results of functions returning
pd . DataFrame or pd . Series objects under :
< cache _ dir > / < app _ name > / < cache _ type > / < func _ name > . < param _ string > . csv ,
missing parts , like app _ name and cache _ type , will be omitted
If cache _ dir is omitted , stutils ' ST _ FS _ CACHE _ PATH ' conf dir will be used .
If ' ST _ FS _ CACHE _ PATH ' is not configured , a temporary directory
will be created .
: param app _ name : if present , cache files for this application will be
stored in a separate folder
: param idx : number of columns to use as an index
: param cache _ type : if present , cache files within app directory will be
separated into different folders by their cache _ type .
: param expires : cache duration in seconds
: param cache _ dir : set custom file cache path""" | def decorator ( func ) :
return helper_class ( func , cache_dir , app_name , cache_type , idx , expires )
return decorator |
def satisfy_custom_matcher ( self , args , kwargs ) :
"""Return a boolean indicating if the args satisfy the stub
: return : Whether or not the stub accepts the provided arguments .
: rtype : bool""" | if not self . _custom_matcher :
return False
try :
return self . _custom_matcher ( * args , ** kwargs )
except Exception :
return False |
def _setBatchSystemEnvVars ( self ) :
"""Sets the environment variables required by the job store and those passed on command line .""" | for envDict in ( self . _jobStore . getEnv ( ) , self . config . environment ) :
for k , v in iteritems ( envDict ) :
self . _batchSystem . setEnv ( k , v ) |
def remove_override ( self , key ) :
"""Remove a setting override , if one exists .""" | keys = key . split ( '.' )
if len ( keys ) > 1 :
raise NotImplementedError
elif key in self . overrides :
del self . overrides [ key ]
self . _uncache ( key ) |
def animate ( self , duration = None , easing = None , on_complete = None , on_update = None , round = False , ** kwargs ) :
"""Request parent Scene to Interpolate attributes using the internal tweener .
Specify sprite ' s attributes that need changing .
` duration ` defaults to 0.4 seconds and ` easing ` to cubic in - out
( for others see pytweener . Easing class ) .
Example : :
# tween some _ sprite to coordinates ( 50,100 ) using default duration and easing
self . animate ( x = 50 , y = 100)""" | scene = self . get_scene ( )
if scene :
return scene . animate ( self , duration , easing , on_complete , on_update , round , ** kwargs )
else :
for key , val in kwargs . items ( ) :
setattr ( self , key , val )
return None |
def return_features_numpy ( self , names = 'all' ) :
"""Returns a 2d numpy array of extracted features
Parameters
names : list of strings , a list of feature names which are to be retrieved from the database , if equal to ' all ' ,
all features will be returned , default value : ' all '
Returns
A numpy array of features , each row corresponds to a single datapoint . If a single feature is a 1d numpy array ,
then it will be unrolled into the resulting array . Higher - dimensional numpy arrays are not supported .""" | if self . _prepopulated is False :
raise errors . EmptyDatabase ( self . dbpath )
else :
return return_features_numpy_base ( self . dbpath , self . _set_object , self . points_amt , names ) |
def bulkCmd ( snmpDispatcher , authData , transportTarget , nonRepeaters , maxRepetitions , * varBinds , ** options ) :
"""Creates a generator to perform one or more SNMP GETBULK queries .
On each iteration , new SNMP GETBULK request is send
( : RFC : ` 1905 # section - 4.2.3 ` ) . The iterator blocks waiting for response
to arrive or error to occur .
Parameters
snmpDispatcher : : py : class : ` ~ pysnmp . hlapi . snmpDispatcher `
Class instance representing SNMP engine .
authData : : py : class : ` ~ pysnmp . hlapi . CommunityData ` or : py : class : ` ~ pysnmp . hlapi . UsmUserData `
Class instance representing SNMP credentials .
transportTarget : : py : class : ` ~ pysnmp . hlapi . asyncore . UdpTransportTarget ` or : py : class : ` ~ pysnmp . hlapi . asyncore . Udp6TransportTarget `
Class instance representing transport type along with SNMP peer address .
nonRepeaters : int
One MIB variable is requested in response for the first
` nonRepeaters ` MIB variables in request .
maxRepetitions : int
` maxRepetitions ` MIB variables are requested in response for each
of the remaining MIB variables in the request ( e . g . excluding
` nonRepeaters ` ) . Remote SNMP engine may choose lesser value than
requested .
\*varBinds : :py:class:`~pysnmp.smi.rfc1902.ObjectType`
One or more class instances representing MIB variables to place
into SNMP request .
Other Parameters
\*\*options :
Request options :
* ` lookupMib ` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance . Default is ` True ` .
Default is ` True ` .
* ` lexicographicMode ` - walk SNMP agent ' s MIB till the end ( if ` True ` ) ,
otherwise ( if ` False ` ) stop iteration when all response MIB
variables leave the scope of initial MIB variables in
` varBinds ` . Default is ` True ` .
* ` ignoreNonIncreasingOid ` - continue iteration even if response
MIB variables ( OIDs ) are not greater then request MIB variables .
Be aware that setting it to ` True ` may cause infinite loop between
SNMP management and agent applications . Default is ` False ` .
* ` maxRows ` - stop iteration once this generator instance processed
` maxRows ` of SNMP conceptual table . Default is ` 0 ` ( no limit ) .
* ` maxCalls ` - stop iteration once this generator instance processed
` maxCalls ` responses . Default is 0 ( no limit ) .
Yields
errorIndication : str
True value indicates SNMP engine error .
errorStatus : str
True value indicates SNMP PDU error .
errorIndex : int
Non - zero value refers to \ * varBinds [ errorIndex - 1]
varBinds : tuple
A sequence of : py : class : ` ~ pysnmp . smi . rfc1902 . ObjectType ` class
instances representing MIB variables returned in SNMP response .
Raises
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation .
Notes
The ` bulkCmd ` generator will be exhausted on any of the following
conditions :
* SNMP engine error occurs thus ` errorIndication ` is ` True `
* SNMP PDU ` errorStatus ` is reported as ` True `
* SNMP : py : class : ` ~ pysnmp . proto . rfc1905 . EndOfMibView ` values
( also known as * SNMP exception values * ) are reported for all
MIB variables in ` varBinds `
* * lexicographicMode * option is ` True ` and SNMP agent reports
end - of - mib or * lexicographicMode * is ` False ` and all
response MIB variables leave the scope of ` varBinds `
At any moment a new sequence of ` varBinds ` could be send back into
running generator ( supported since Python 2.6 ) .
Setting ` maxRepetitions ` value to 15 . . 50 might significantly improve
system performance , as many MIB variables get packed into a single
response message at once .
Examples
> > > from pysnmp . hlapi . v1arch import *
> > > g = bulkCmd ( snmpDispatcher ( ) ,
> > > CommunityData ( ' public ' ) ,
> > > UdpTransportTarget ( ( ' demo . snmplabs . com ' , 161 ) ) ,
> > > 0 , 25,
> > > ObjectType ( ObjectIdentity ( ' SNMPv2 - MIB ' , ' sysDescr ' ) ) )
> > > next ( g )
( None , 0 , 0 , [ [ ObjectType ( ObjectIdentity ( ObjectName ( ' 1.3.6.1.2.1.1.1.0 ' ) ) , DisplayString ( ' SunOS zeus . snmplabs . com 4.1.3 _ U1 1 sun4m ' ) ) ] ] )
> > > g . send ( [ ObjectType ( ObjectIdentity ( ' IF - MIB ' , ' ifInOctets ' ) ) ] )
( None , 0 , 0 , [ [ ( ObjectName ( ' 1.3.6.1.2.1.2.2.1.10.1 ' ) , Counter32(284817787 ) ) ] ] )""" | def cbFun ( * args , ** kwargs ) :
response [ : ] = args + ( kwargs . get ( 'nextVarBinds' , ( ) ) , )
options [ 'cbFun' ] = cbFun
lexicographicMode = options . pop ( 'lexicographicMode' , True )
maxRows = options . pop ( 'maxRows' , 0 )
maxCalls = options . pop ( 'maxCalls' , 0 )
initialVarBinds = VB_PROCESSOR . makeVarBinds ( snmpDispatcher . cache , varBinds )
nullVarBinds = [ False ] * len ( initialVarBinds )
totalRows = totalCalls = 0
errorIndication , errorStatus , errorIndex , varBindTable = None , 0 , 0 , ( )
response = [ ]
stopFlag = False
while not stopFlag :
if not varBinds :
yield ( errorIndication , errorStatus , errorIndex , varBinds )
return
if maxRows and totalRows < maxRows :
maxRepetitions = min ( maxRepetitions , maxRows - totalRows )
cmdgen . bulkCmd ( snmpDispatcher , authData , transportTarget , nonRepeaters , maxRepetitions , * [ ( x [ 0 ] , Null ( '' ) ) for x in varBinds ] , ** options )
snmpDispatcher . transportDispatcher . runDispatcher ( )
errorIndication , errorStatus , errorIndex , varBindTable , varBinds = response
if errorIndication :
yield ( errorIndication , errorStatus , errorIndex , ( ) )
return
elif errorStatus :
if errorStatus == 2 : # Hide SNMPv1 noSuchName error which leaks in here
# from SNMPv1 Agent through internal pysnmp proxy .
errorStatus = errorStatus . clone ( 0 )
errorIndex = errorIndex . clone ( 0 )
yield ( errorIndication , errorStatus , errorIndex , varBindTable and varBindTable [ 0 ] or [ ] )
return
else :
for rowIdx , varBindRow in enumerate ( varBindTable ) :
stopFlag = True
if len ( varBindRow ) != len ( initialVarBinds ) :
varBindTable = rowIdx and varBindTable [ : rowIdx - 1 ] or [ ]
break
for colIdx , varBind in enumerate ( varBindRow ) :
name , val = varBind
if nullVarBinds [ colIdx ] :
varBindRow [ colIdx ] = name , endOfMibView
continue
stopFlag = False
if isinstance ( val , Null ) :
nullVarBinds [ colIdx ] = True
elif not lexicographicMode and not initialVarBinds [ colIdx ] [ 0 ] . isPrefixOf ( name ) :
varBindRow [ colIdx ] = name , endOfMibView
nullVarBinds [ colIdx ] = True
if stopFlag :
varBindTable = rowIdx and varBindTable [ : rowIdx - 1 ] or [ ]
break
totalRows += len ( varBindTable )
totalCalls += 1
if maxRows and totalRows >= maxRows :
if totalRows > maxRows :
varBindTable = varBindTable [ : - ( totalRows - maxRows ) ]
stopFlag = True
if maxCalls and totalCalls >= maxCalls :
stopFlag = True
for varBindRow in varBindTable :
nextVarBinds = ( yield errorIndication , errorStatus , errorIndex , varBindRow )
if nextVarBinds :
initialVarBinds = varBinds = VB_PROCESSOR . makeVarBinds ( snmpDispatcher . cache , nextVarBinds ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.