signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def _nullify_stdio ( self ) :
"""Open / dev / null to replace stdin , and stdout / stderr temporarily . In case
of odd startup , assume we may be allocated a standard handle .""" | fd = os . open ( '/dev/null' , os . O_RDWR )
try :
for stdfd in ( 0 , 1 , 2 ) :
if fd != stdfd :
os . dup2 ( fd , stdfd )
finally :
if fd not in ( 0 , 1 , 2 ) :
os . close ( fd ) |
def set_implicitly_wait ( self ) :
"""Read implicitly timeout from configuration properties and configure driver implicitly wait""" | implicitly_wait = self . driver_wrapper . config . get_optional ( 'Driver' , 'implicitly_wait' )
if implicitly_wait :
self . driver_wrapper . driver . implicitly_wait ( implicitly_wait ) |
def Main ( ) :
"""The main program function .
Returns :
bool : True if successful or False if not .""" | argument_parser = argparse . ArgumentParser ( description = 'Validates dtFabric format definitions.' )
argument_parser . add_argument ( 'source' , nargs = '?' , action = 'store' , metavar = 'PATH' , default = None , help = ( 'path of the file or directory containing the dtFabric format ' 'definitions.' ) )
options = argument_parser . parse_args ( )
if not options . source :
print ( 'Source value is missing.' )
print ( '' )
argument_parser . print_help ( )
print ( '' )
return False
if not os . path . exists ( options . source ) :
print ( 'No such file: {0:s}' . format ( options . source ) )
print ( '' )
return False
logging . basicConfig ( level = logging . INFO , format = '[%(levelname)s] %(message)s' )
source_is_directory = os . path . isdir ( options . source )
validator = DefinitionsValidator ( )
if source_is_directory :
source_description = os . path . join ( options . source , '*.yaml' )
else :
source_description = options . source
print ( 'Validating dtFabric definitions in: {0:s}' . format ( source_description ) )
if source_is_directory :
result = validator . CheckDirectory ( options . source )
else :
result = validator . CheckFile ( options . source )
if not result :
print ( 'FAILURE' )
else :
print ( 'SUCCESS' )
return result |
def get_doc_counts ( self , cache = True ) :
"""Get the number of related documents of various types for the artist .
The types include audio , biographies , blogs , images , news , reviews , songs , videos .
Note that these documents can be retrieved by calling artist . < document type > , for example ,
artist . biographies .
Args :
Kwargs :
cache ( bool ) : A boolean indicating whether or not the cached value should be used ( if available ) .
Defaults to True .
Returns :
A dictionary with one key for each document type , mapped to an integer count of documents .
Example :
> > > a = artist . Artist ( " The Kinks " )
> > > a . get _ doc _ counts ( )
{ u ' audio ' : 194,
u ' biographies ' : 9,
u ' blogs ' : 379,
u ' images ' : 177,
u ' news ' : 84,
u ' reviews ' : 110,
u ' songs ' : 499,
u ' videos ' : 340}""" | if not cache or not ( 'doc_counts' in self . cache ) :
response = self . get_attribute ( "profile" , bucket = 'doc_counts' )
self . cache [ 'doc_counts' ] = response [ 'artist' ] [ 'doc_counts' ]
return self . cache [ 'doc_counts' ] |
def know ( self , what , confidence ) :
"""Know something with the given confidence , and return self for chaining .
If confidence is higher than that of what we already know , replace
what we already know with what you ' re telling us .""" | if confidence > self . confidence :
self . best = what
self . confidence = confidence
return self |
def unique_element ( ll ) :
"""returns unique elements from a list preserving the original order""" | seen = { }
result = [ ]
for item in ll :
if item in seen :
continue
seen [ item ] = 1
result . append ( item )
return result |
def request ( self , uri , method = GET , headers = None , cookies = None , params = None , data = None , post_files = None , ** kwargs ) :
"""Makes a request using requests
@ param uri : The uri to send request
@ param method : Method to use to send request
@ param headers : Any headers to send with request
@ param cookies : Request cookies ( in addition to session cookies )
@ param params : Request parameters
@ param data : Request data
@ param kwargs : other options to pass to underlying request
@ rtype : requests . Response
@ return : The response""" | coyote_args = { 'headers' : headers , 'cookies' : cookies , 'params' : params , 'files' : post_files , 'data' : data , 'verify' : self . verify_certificates , }
coyote_args . update ( kwargs )
if method == self . POST :
response = self . session . post ( uri , ** coyote_args )
elif method == self . PUT :
response = self . session . put ( uri , ** coyote_args )
elif method == self . PATCH :
response = self . session . patch ( uri , ** coyote_args )
elif method == self . DELETE :
response = self . session . delete ( uri , ** coyote_args )
else : # Default to GET
response = self . session . get ( uri , ** coyote_args )
self . responses . append ( response )
while len ( self . responses ) > self . max_response_history :
self . responses . popleft ( )
return response |
def readEncodedU32 ( self ) :
"""Read a encoded unsigned int""" | self . reset_bits_pending ( ) ;
result = self . readUI8 ( ) ;
if result & 0x80 != 0 :
result = ( result & 0x7f ) | ( self . readUI8 ( ) << 7 )
if result & 0x4000 != 0 :
result = ( result & 0x3fff ) | ( self . readUI8 ( ) << 14 )
if result & 0x200000 != 0 :
result = ( result & 0x1fffff ) | ( self . readUI8 ( ) << 21 )
if result & 0x10000000 != 0 :
result = ( result & 0xfffffff ) | ( self . readUI8 ( ) << 28 )
return result |
def prepare_framework_container_def ( model , instance_type , s3_operations ) :
"""Prepare the framework model container information . Specify related S3 operations for Airflow to perform .
( Upload ` source _ dir ` )
Args :
model ( sagemaker . model . FrameworkModel ) : The framework model
instance _ type ( str ) : The EC2 instance type to deploy this Model to . For example , ' ml . p2 . xlarge ' .
s3 _ operations ( dict ) : The dict to specify S3 operations ( upload ` source _ dir ` ) .
Returns :
dict : The container information of this framework model .""" | deploy_image = model . image
if not deploy_image :
region_name = model . sagemaker_session . boto_session . region_name
deploy_image = fw_utils . create_image_uri ( region_name , model . __framework_name__ , instance_type , model . framework_version , model . py_version )
base_name = utils . base_name_from_image ( deploy_image )
model . name = model . name or utils . name_from_base ( base_name )
bucket = model . bucket or model . sagemaker_session . _default_bucket
script = os . path . basename ( model . entry_point )
key = '{}/source/sourcedir.tar.gz' . format ( model . name )
if model . source_dir and model . source_dir . lower ( ) . startswith ( 's3://' ) :
code_dir = model . source_dir
model . uploaded_code = fw_utils . UploadedCode ( s3_prefix = code_dir , script_name = script )
else :
code_dir = 's3://{}/{}' . format ( bucket , key )
model . uploaded_code = fw_utils . UploadedCode ( s3_prefix = code_dir , script_name = script )
s3_operations [ 'S3Upload' ] = [ { 'Path' : model . source_dir or script , 'Bucket' : bucket , 'Key' : key , 'Tar' : True } ]
deploy_env = dict ( model . env )
deploy_env . update ( model . _framework_env_vars ( ) )
try :
if model . model_server_workers :
deploy_env [ sagemaker . model . MODEL_SERVER_WORKERS_PARAM_NAME . upper ( ) ] = str ( model . model_server_workers )
except AttributeError : # This applies to a FrameworkModel which is not SageMaker Deep Learning Framework Model
pass
return sagemaker . container_def ( deploy_image , model . model_data , deploy_env ) |
def _render_op ( self , identifier , hs = None , dagger = False , args = None , superop = False ) :
"""Render an operator
Args :
identifier ( str or SymbolicLabelBase ) : The identifier ( name / symbol )
of the operator . May include a subscript , denoted by ' _ ' .
hs ( HilbertSpace ) : The Hilbert space in which the operator is
defined
dagger ( bool ) : Whether the operator should be daggered
args ( list ) : A list of expressions that will be rendered with
: meth : ` doprint ` , joined with commas , enclosed in parenthesis
superop ( bool ) : Whether the operator is a super - operator""" | hs_label = None
if hs is not None and self . _settings [ 'show_hs_label' ] :
hs_label = self . _render_hs_label ( hs )
name , total_subscript , total_superscript , args_str = self . _split_op ( identifier , hs_label , dagger , args )
if self . _settings [ 'unicode_op_hats' ] and len ( name ) == 1 :
if superop :
res = name
else :
res = modifier_dict [ 'hat' ] ( name )
else :
res = name
res = render_unicode_sub_super ( res , [ total_subscript ] , [ total_superscript ] , sub_first = True , translate_symbols = True , unicode_sub_super = self . _settings [ 'unicode_sub_super' ] )
res += args_str
return res |
def write_lines ( self , lines , level = 0 ) :
"""Append multiple new lines""" | for line in lines :
self . write_line ( line , level ) |
def pdf_rotate ( input : str , counter_clockwise : bool = False , pages : [ str ] = None , output : str = None , ) :
"""Rotate the given Pdf files clockwise or counter clockwise .
: param inputs : pdf files
: param counter _ clockwise : rotate counter clockwise if true else clockwise
: param pages : list of page numbers to rotate , if None all pages will be
rotated""" | infile = open ( input , "rb" )
reader = PdfFileReader ( infile )
writer = PdfFileWriter ( )
# get pages from source depending on pages parameter
if pages is None :
source_pages = reader . pages
else :
pages = parse_rangearg ( pages , len ( reader . pages ) )
source_pages = [ reader . getPage ( i ) for i in pages ]
# rotate pages and add to writer
for i , page in enumerate ( source_pages ) :
if pages is None or i in pages :
if counter_clockwise :
writer . addPage ( page . rotateCounterClockwise ( 90 ) )
else :
writer . addPage ( page . rotateClockwise ( 90 ) )
else :
writer . addPage ( page )
# Open output file or temporary file for writing
if output is None :
outfile = NamedTemporaryFile ( delete = False )
else :
if not os . path . isfile ( output ) or overwrite_dlg ( output ) :
outfile = open ( output , "wb" )
else :
return
# Write to file
writer . write ( outfile )
infile . close ( )
outfile . close ( )
# If no output defined move temporary file to input
if output is None :
if overwrite_dlg ( input ) :
os . remove ( input )
move ( outfile . name , input )
else :
os . remove ( outfile . name ) |
async def on_raw_kick ( self , message ) :
"""KICK command .""" | kicker , kickermeta = self . _parse_user ( message . source )
self . _sync_user ( kicker , kickermeta )
if len ( message . params ) > 2 :
channels , targets , reason = message . params
else :
channels , targets = message . params
reason = None
channels = channels . split ( ',' )
targets = targets . split ( ',' )
for channel , target in itertools . product ( channels , targets ) :
target , targetmeta = self . _parse_user ( target )
self . _sync_user ( target , targetmeta )
if self . is_same_nick ( target , self . nickname ) :
self . _destroy_channel ( channel )
else : # Update nick list on channel .
if self . in_channel ( channel ) :
self . _destroy_user ( target , channel )
await self . on_kick ( channel , target , kicker , reason ) |
def beacon ( config ) :
'''Watch the configured files
Example Config
. . code - block : : yaml
beacons :
inotify :
- files :
/ path / to / file / or / dir :
mask :
- open
- create
- close _ write
recurse : True
auto _ add : True
exclude :
- / path / to / file / or / dir / exclude1
- / path / to / file / or / dir / exclude2
- / path / to / file / or / dir / regex [ a - m ] * $ :
regex : True
- coalesce : True
The mask list can contain the following events ( the default mask is create ,
delete , and modify ) :
* access - File accessed
* attrib - File metadata changed
* close _ nowrite - Unwritable file closed
* close _ write - Writable file closed
* create - File created in watched directory
* delete - File deleted from watched directory
* delete _ self - Watched file or directory deleted
* modify - File modified
* moved _ from - File moved out of watched directory
* moved _ to - File moved into watched directory
* move _ self - Watched file moved
* open - File opened
The mask can also contain the following options :
* dont _ follow - Don ' t dereference symbolic links
* excl _ unlink - Omit events for children after they have been unlinked
* oneshot - Remove watch after one event
* onlydir - Operate only if name is directory
recurse :
Recursively watch files in the directory
auto _ add :
Automatically start watching files that are created in the watched directory
exclude :
Exclude directories or files from triggering events in the watched directory .
Can use regex if regex is set to True
coalesce :
If this coalescing option is enabled , events are filtered based on
their unicity , only unique events are enqueued , doublons are discarded .
An event is unique when the combination of its fields ( wd , mask ,
cookie , name ) is unique among events of a same batch . After a batch of
events is processed any events are accepted again .
This option is top - level ( at the same level as the path ) and therefore
affects all paths that are being watched . This is due to this option
being at the Notifier level in pyinotify .''' | _config = { }
list ( map ( _config . update , config ) )
ret = [ ]
notifier = _get_notifier ( _config )
wm = notifier . _watch_manager
# Read in existing events
if notifier . check_events ( 1 ) :
notifier . read_events ( )
notifier . process_events ( )
queue = __context__ [ 'inotify.queue' ]
while queue :
event = queue . popleft ( )
_append = True
# Find the matching path in config
path = event . path
while path != '/' :
if path in _config . get ( 'files' , { } ) :
break
path = os . path . dirname ( path )
excludes = _config [ 'files' ] [ path ] . get ( 'exclude' , '' )
if excludes and isinstance ( excludes , list ) :
for exclude in excludes :
if isinstance ( exclude , dict ) :
_exclude = next ( iter ( exclude ) )
if exclude [ _exclude ] . get ( 'regex' , False ) :
try :
if re . search ( _exclude , event . pathname ) :
_append = False
except Exception :
log . warning ( 'Failed to compile regex: %s' , _exclude )
else :
exclude = _exclude
elif '*' in exclude :
if fnmatch . fnmatch ( event . pathname , exclude ) :
_append = False
else :
if event . pathname . startswith ( exclude ) :
_append = False
if _append :
sub = { 'tag' : event . path , 'path' : event . pathname , 'change' : event . maskname }
ret . append ( sub )
else :
log . info ( 'Excluding %s from event for %s' , event . pathname , path )
# Get paths currently being watched
current = set ( )
for wd in wm . watches :
current . add ( wm . watches [ wd ] . path )
# Update existing watches and add new ones
# TODO : make the config handle more options
for path in _config . get ( 'files' , ( ) ) :
if isinstance ( _config [ 'files' ] [ path ] , dict ) :
mask = _config [ 'files' ] [ path ] . get ( 'mask' , DEFAULT_MASK )
if isinstance ( mask , list ) :
r_mask = 0
for sub in mask :
r_mask |= _get_mask ( sub )
elif isinstance ( mask , salt . ext . six . binary_type ) :
r_mask = _get_mask ( mask )
else :
r_mask = mask
mask = r_mask
rec = _config [ 'files' ] [ path ] . get ( 'recurse' , False )
auto_add = _config [ 'files' ] [ path ] . get ( 'auto_add' , False )
else :
mask = DEFAULT_MASK
rec = False
auto_add = False
if path in current :
for wd in wm . watches :
if path == wm . watches [ wd ] . path :
update = False
if wm . watches [ wd ] . mask != mask :
update = True
if wm . watches [ wd ] . auto_add != auto_add :
update = True
if update :
wm . update_watch ( wd , mask = mask , rec = rec , auto_add = auto_add )
elif os . path . exists ( path ) :
excludes = _config [ 'files' ] [ path ] . get ( 'exclude' , '' )
excl = None
if isinstance ( excludes , list ) :
excl = [ ]
for exclude in excludes :
if isinstance ( exclude , dict ) :
excl . append ( list ( exclude ) [ 0 ] )
else :
excl . append ( exclude )
excl = pyinotify . ExcludeFilter ( excl )
wm . add_watch ( path , mask , rec = rec , auto_add = auto_add , exclude_filter = excl )
# Return event data
return ret |
def update_data_display ( self , data ) :
"""Triggered when the selected device / IMU is changed . Updates the background
colours of the text widgets in the UI to indicate which sensors have calibration
data available ( green ) and which do not ( red )""" | acc_cal = self . ACC_TIMESTAMP in data
mag_cal = self . MAG_TIMESTAMP in data
gyro_cal = self . GYRO_TIMESTAMP in data
uncal = 'QLineEdit {background-color: #cc3333;};'
cal = 'QLineEdit {background-color: #33cc33;};'
if acc_cal :
self . dataAccX . setStyleSheet ( cal )
self . dataAccY . setStyleSheet ( cal )
self . dataAccZ . setStyleSheet ( cal )
else :
self . dataAccX . setStyleSheet ( uncal )
self . dataAccY . setStyleSheet ( uncal )
self . dataAccZ . setStyleSheet ( uncal )
if gyro_cal :
self . dataGyroX . setStyleSheet ( cal )
self . dataGyroY . setStyleSheet ( cal )
self . dataGyroZ . setStyleSheet ( cal )
else :
self . dataGyroX . setStyleSheet ( uncal )
self . dataGyroY . setStyleSheet ( uncal )
self . dataGyroZ . setStyleSheet ( uncal )
if mag_cal :
self . dataMagX . setStyleSheet ( cal )
self . dataMagY . setStyleSheet ( cal )
self . dataMagZ . setStyleSheet ( cal )
else :
self . dataMagX . setStyleSheet ( uncal )
self . dataMagY . setStyleSheet ( uncal )
self . dataMagZ . setStyleSheet ( uncal ) |
def purchase_ip ( self , debug = False ) :
"""Return an ip object representing a new bought IP
@ param debug [ Boolean ] if true , request and response will be printed
@ return ( Ip ) : Ip object""" | json_scheme = self . gen_def_json_scheme ( 'SetPurchaseIpAddress' )
json_obj = self . call_method_post ( method = 'SetPurchaseIpAddress' , json_scheme = json_scheme , debug = debug )
try :
ip = Ip ( )
ip . ip_addr = json_obj [ 'Value' ] [ 'Value' ]
ip . resid = json_obj [ 'Value' ] [ 'ResourceId' ]
return ip
except :
raise Exception ( 'Unknown error retrieving IP.' ) |
def to_json ( df , columns , confidence = { } ) :
"""Transforms dataframe to properly formatted json response""" | records = [ ]
display_cols = list ( columns . keys ( ) )
if not display_cols :
display_cols = list ( df . columns )
bounds = { }
for c in confidence :
bounds [ c ] = { "min" : df [ confidence [ c ] [ "lower" ] ] . min ( ) , "max" : df [ confidence [ c ] [ "upper" ] ] . max ( ) , "lower" : confidence [ c ] [ "lower" ] , "upper" : confidence [ c ] [ "upper" ] }
labels = { }
for c in display_cols :
if "label" in columns [ c ] :
labels [ c ] = columns [ c ] [ "label" ]
else :
labels [ c ] = c
for i , row in df . iterrows ( ) :
row_ = DataTable . format_row ( row , bounds , columns )
records . append ( { labels [ c ] : row_ [ c ] for c in display_cols } )
return { "data" : records , "columns" : [ { "data" : labels [ c ] } for c in display_cols ] } |
def addAnalysis ( self , analysis , position = None ) :
"""- add the analysis to self . Analyses ( ) .
- position is overruled if a slot for this analysis ' parent exists
- if position is None , next available pos is used .""" | # Cannot add an analysis if not open , unless a retest
if api . get_review_status ( self ) not in [ "open" , "to_be_verified" ] :
retracted = analysis . getRetestOf ( )
if retracted not in self . getAnalyses ( ) :
return
# Cannot add an analysis that is assigned already
if analysis . getWorksheet ( ) :
return
# Just in case
analyses = self . getAnalyses ( )
if analysis in analyses :
analyses = filter ( lambda an : an != analysis , analyses )
self . setAnalyses ( analyses )
self . updateLayout ( )
# Cannot add an analysis if the assign transition is not possible
# We need to bypass the guard ' s check for current context !
api . get_request ( ) . set ( "ws_uid" , api . get_uid ( self ) )
if not isTransitionAllowed ( analysis , "assign" ) :
return
# Assign the instrument from the worksheet to the analysis , if possible
instrument = self . getInstrument ( )
if instrument and analysis . isInstrumentAllowed ( instrument ) : # TODO Analysis Instrument + Method assignment
methods = instrument . getMethods ( )
if methods : # Set the first method assigned to the selected instrument
analysis . setMethod ( methods [ 0 ] )
analysis . setInstrument ( instrument )
elif not instrument : # If the ws doesn ' t have an instrument try to set the method
method = self . getMethod ( )
if method and analysis . isMethodAllowed ( method ) :
analysis . setMethod ( method )
# Transition analysis to " assigned "
actions_pool = ActionHandlerPool . get_instance ( )
actions_pool . queue_pool ( )
doActionFor ( analysis , "assign" )
self . setAnalyses ( analyses + [ analysis ] )
self . addToLayout ( analysis , position )
# Try to rollback the worksheet to prevent inconsistencies
doActionFor ( self , "rollback_to_open" )
# Reindex Worksheet
idxs = [ "getAnalysesUIDs" ]
push_reindex_to_actions_pool ( self , idxs = idxs )
# Reindex Analysis Request , if any
if IRequestAnalysis . providedBy ( analysis ) :
idxs = [ 'assigned_state' , 'getDueDate' ]
push_reindex_to_actions_pool ( analysis . getRequest ( ) , idxs = idxs )
# Resume the actions pool
actions_pool . resume ( ) |
def calculate_nf ( sample_frame , ref_targets , ref_sample ) :
"""Calculates a normalization factor from the geometric mean of the
expression of all ref _ targets , normalized to a reference sample .
: param DataFrame sample _ frame : A sample data frame .
: param iterable ref _ targets : A list or Series of target names .
: param string ref _ sample : The name of the sample to normalize against .
: return : a Series indexed by sample name containing normalization factors
for each sample .""" | grouped = sample_frame . groupby ( [ 'Target' , 'Sample' ] ) [ 'Cq' ] . aggregate ( average_cq )
samples = sample_frame [ 'Sample' ] . unique ( )
nfs = gmean ( [ pow ( 2 , - grouped . ix [ zip ( repeat ( ref_gene ) , samples ) ] + grouped . ix [ ref_gene , ref_sample ] ) for ref_gene in ref_targets ] )
return pd . Series ( nfs , index = samples ) |
def print_experiments ( experiments ) :
"""Prints job details in a table . Includes urls and mode parameters""" | headers = [ "JOB NAME" , "CREATED" , "STATUS" , "DURATION(s)" , "INSTANCE" , "DESCRIPTION" , "METRICS" ]
expt_list = [ ]
for experiment in experiments :
expt_list . append ( [ normalize_job_name ( experiment . name ) , experiment . created_pretty , experiment . state , experiment . duration_rounded , experiment . instance_type_trimmed , experiment . description , format_metrics ( experiment . latest_metrics ) ] )
floyd_logger . info ( tabulate ( expt_list , headers = headers ) ) |
def init_app ( self , app ) :
'''Configures an application . This registers an ` after _ request ` call , and
attaches this ` LoginManager ` to it as ` app . login _ manager ` .''' | self . _config = app . config . get ( 'LDAP' , { } )
app . ldap_login_manager = self
self . config . setdefault ( 'BIND_DN' , '' )
self . config . setdefault ( 'BIND_AUTH' , '' )
self . config . setdefault ( 'URI' , 'ldap://127.0.0.1' )
self . config . setdefault ( 'OPTIONS' , { } )
# Referrals are disabled by default
self . config [ 'OPTIONS' ] . setdefault ( ldap . OPT_REFERRALS , ldap . OPT_OFF )
if self . config . get ( 'USER_SEARCH' ) and not isinstance ( self . config [ 'USER_SEARCH' ] , list ) :
self . config [ 'USER_SEARCH' ] = [ self . config [ 'USER_SEARCH' ] ] |
def assemble_cx ( ) :
"""Assemble INDRA Statements and return CX network json .""" | if request . method == 'OPTIONS' :
return { }
response = request . body . read ( ) . decode ( 'utf-8' )
body = json . loads ( response )
stmts_json = body . get ( 'statements' )
stmts = stmts_from_json ( stmts_json )
ca = CxAssembler ( stmts )
model_str = ca . make_model ( )
res = { 'model' : model_str }
return res |
def resolve ( self , definitions ) :
"""Resolve named references to other WSDL objects . This includes
cross - linking information ( from ) the portType ( to ) the I { soap }
protocol information on the binding for each operation .
@ param definitions : A definitions object .
@ type definitions : L { Definitions }""" | self . resolveport ( definitions )
for op in self . operations . values ( ) :
self . resolvesoapbody ( definitions , op )
self . resolveheaders ( definitions , op )
self . resolvefaults ( definitions , op ) |
def create_layer_2_socket ( ) :
"""create _ layer _ 2 _ socket""" | # create a socket for recording layer 2 , 3 and 4 frames
s = None
try :
log . info ( "Creating l234 socket" )
s = socket . socket ( socket . AF_PACKET , socket . SOCK_RAW , socket . ntohs ( 0x0003 ) )
except socket . error as msg :
log . error ( ( "Socket could not be created ex={}" ) . format ( msg ) )
return s |
def logs_handle_build_job ( job_uuid : str , job_name : str , log_lines : Optional [ Union [ str , Iterable [ str ] ] ] , temp : bool = True ) -> None :
"""Task handling for sidecars logs .""" | handle_build_job_logs ( job_uuid = job_uuid , job_name = job_name , log_lines = log_lines , temp = temp ) |
def process_request ( request ) :
"""Adds a " mobile " attribute to the request which is True or False
depending on whether the request should be considered to come from a
small - screen device such as a phone or a PDA""" | if 'HTTP_X_OPERAMINI_FEATURES' in request . META : # Then it ' s running opera mini . ' Nuff said .
# Reference from :
# http : / / dev . opera . com / articles / view / opera - mini - request - headers /
request . mobile = True
return None
if 'HTTP_ACCEPT' in request . META :
s = request . META [ 'HTTP_ACCEPT' ] . lower ( )
if 'application/vnd.wap.xhtml+xml' in s : # Then it ' s a wap browser
request . mobile = True
return None
if 'HTTP_USER_AGENT' in request . META : # This takes the most processing . Surprisingly enough , when I
# Experimented on my own machine , this was the most efficient
# algorithm . Certainly more so than regexes .
# Also , Caching didn ' t help much , with real - world caches .
s = request . META [ 'HTTP_USER_AGENT' ] . lower ( )
for ua in search_strings :
if ua in s : # check if we are ignoring this user agent : ( IPad )
if not ignore_user_agent ( s ) :
request . mobile = True
if MOBI_DETECT_TABLET :
request . tablet = _is_tablet ( s )
return None
# Otherwise it ' s not a mobile
request . mobile = False
request . tablet = False
return None |
def is_direct_subclass ( class_ , of ) :
"""Check whether given class is a direct subclass of the other .
: param class _ : Class to check
: param of : Superclass to check against
: return : Boolean result of the test
. . versionadded : : 0.0.4""" | ensure_class ( class_ )
ensure_class ( of )
# TODO ( xion ) : support predicates in addition to classes
return of in class_ . __bases__ |
def set_rate ( self , param ) :
"""Models " Rate Command " functionality of device .
Sets the target rate of temperature change .
: param param : Rate of temperature change in C / min , multiplied by 100 , as a string .
Must be positive .
: return : Empty string .""" | # TODO : Is not having leading zeroes / 4 digits an error ?
rate = int ( param )
if 1 <= rate <= 15000 :
self . device . temperature_rate = rate / 100.0
return "" |
def json ( self ) :
"""Return a JSON - serializable representation of this result .
The output of this function can be converted to a serialized string
with : any : ` json . dumps ` .""" | data = super ( CalTRACKHourlyModel , self ) . json ( )
data . update ( { "occupancy_lookup" : self . occupancy_lookup . to_json ( orient = "split" ) , "temperature_bins" : self . temperature_bins . to_json ( orient = "split" ) , } )
return data |
def make_organisms ( self , genome_list , genome_dir ) :
'''Organism factory method .
Appends organisms to the organisms list .
Args
genome _ list ( list )
genome _ dir ( string )''' | for genome in genome_list :
genome_path = genome_dir + genome
handle = open ( genome_path , "rU" )
print 'Adding organism attributes for' , genome
try :
seq_record = SeqIO . read ( handle , "genbank" )
self . organisms . append ( Organism ( seq_record , genome_path , self ) )
del ( seq_record )
except ValueError , e :
print genome , str ( e )
except AssertionError , e :
print genome , str ( e )
except UnboundLocalError , e :
print genome , str ( e )
handle . close ( ) |
def scroll_to_horizontally ( self , obj , * args , ** selectors ) :
"""Scroll ( horizontally ) on the object : obj to specific UI object which has * selectors * attributes appears .
Return true if the UI object , else return false .
See ` Scroll To Vertically ` for more details .""" | return obj . scroll . horiz . to ( ** selectors ) |
def get_bounding_box ( self , lon , lat , trt = None , mag = None ) :
"""Build a bounding box around the given lon , lat by computing the
maximum _ distance at the given tectonic region type and magnitude .
: param lon : longitude
: param lat : latitude
: param trt : tectonic region type , possibly None
: param mag : magnitude , possibly None
: returns : min _ lon , min _ lat , max _ lon , max _ lat""" | if trt is None : # take the greatest integration distance
maxdist = max ( self ( trt , mag ) for trt in self . dic )
else : # get the integration distance for the given TRT
maxdist = self ( trt , mag )
a1 = min ( maxdist * KM_TO_DEGREES , 90 )
a2 = min ( angular_distance ( maxdist , lat ) , 180 )
return lon - a2 , lat - a1 , lon + a2 , lat + a1 |
def execute_notebook ( input_path , output_path , parameters = None , engine_name = None , prepare_only = False , kernel_name = None , progress_bar = True , log_output = False , start_timeout = 60 , report_mode = False , cwd = None , ) :
"""Executes a single notebook locally .
Parameters
input _ path : str
Path to input notebook
output _ path : str
Path to save executed notebook
parameters : dict , optional
Arbitrary keyword arguments to pass to the notebook parameters
engine _ name : str , optional
Name of execution engine to use
prepare _ only : bool , optional
Flag to determine if execution should occur or not
kernel _ name : str , optional
Name of kernel to execute the notebook against
progress _ bar : bool , optional
Flag for whether or not to show the progress bar .
log _ output : bool , optional
Flag for whether or not to write notebook output _ path to ` stderr `
start _ timeout : int , optional
Duration in seconds to wait for kernel start - up
report _ mode : bool , optional
Flag for whether or not to hide input .
cwd : str , optional
Working directory to use when executing the notebook
Returns
nb : NotebookNode
Executed notebook object""" | path_parameters = add_builtin_parameters ( parameters )
input_path = parameterize_path ( input_path , path_parameters )
output_path = parameterize_path ( output_path , path_parameters )
logger . info ( "Input Notebook: %s" % get_pretty_path ( input_path ) )
logger . info ( "Output Notebook: %s" % get_pretty_path ( output_path ) )
with local_file_io_cwd ( ) :
if cwd is not None :
logger . info ( "Working directory: {}" . format ( get_pretty_path ( cwd ) ) )
nb = load_notebook_node ( input_path )
# Parameterize the Notebook .
if parameters :
nb = parameterize_notebook ( nb , parameters , report_mode )
nb = prepare_notebook_metadata ( nb , input_path , output_path , report_mode )
if not prepare_only : # Fetch the kernel name if it ' s not supplied
kernel_name = kernel_name or nb . metadata . kernelspec . name
# Execute the Notebook in ` cwd ` if it is set
with chdir ( cwd ) :
nb = papermill_engines . execute_notebook_with_engine ( engine_name , nb , input_path = input_path , output_path = output_path , kernel_name = kernel_name , progress_bar = progress_bar , log_output = log_output , start_timeout = start_timeout , )
# Check for errors first ( it saves on error before raising )
raise_for_execution_errors ( nb , output_path )
# Write final output in case the engine didn ' t write it on cell completion .
write_ipynb ( nb , output_path )
return nb |
def call_hpp ( self , message , action , hmac_key = "" , ** kwargs ) :
"""This will call the adyen hpp . hmac _ key and platform are pulled from
root module level and or self object .
AdyenResult will be returned on 200 response .
Otherwise , an exception is raised .
Args :
request _ data ( dict ) : The dictionary of the request to place . This
should be in the structure of the Adyen API .
https : / / docs . adyen . com / manuals / api - manual
service ( str ) : This is the API service to be called .
action ( str ) : The specific action of the API service to be called
idempotency ( bool , optional ) : Whether the transaction should be
processed idempotently .
https : / / docs . adyen . com / manuals / api - manual # apiidempotency
Returns :
AdyenResult : The AdyenResult is returned when a request was
succesful .
: param message :
: param hmac _ key :""" | if not self . http_init :
self . http_client = HTTPClient ( self . app_name , self . USER_AGENT_SUFFIX , self . LIB_VERSION , self . http_force )
self . http_init = True
# hmac provided in function has highest priority . fallback to self then
# root module and ensure that it is set .
hmac = hmac_key
if self . hmac :
hmac = self . hmac
elif not hmac :
errorstring = """Please set an hmac with your Adyen.Adyen
class instance.
'Adyen.hmac = \"!WR#F@...\"' or as an additional
parameter in the function call ie.
'Adyen.hpp.directory_lookup(hmac=\"!WR#F@...\"'. Please reach
out to support@Adyen.com if the issue persists."""
raise AdyenInvalidRequestError ( errorstring )
# platform provided in self has highest priority ,
# fallback to root module and ensure that it is set .
platform = self . platform
if not isinstance ( platform , str ) :
errorstring = "'platform' must be type string"
raise TypeError ( errorstring )
elif platform . lower ( ) not in [ 'live' , 'test' ] :
errorstring = " 'platform' must be the value of 'live' or 'test' "
raise ValueError ( errorstring )
if 'skinCode' not in message :
message [ 'skinCode' ] = self . skin_code
if 'merchantAccount' not in message :
message [ 'merchantAccount' ] = self . merchant_account
if message [ 'merchantAccount' ] == "" :
message [ 'merchantAccount' ] = self . merchant_account
message [ "merchantSig" ] = util . generate_hpp_sig ( message , hmac )
url = self . _determine_hpp_url ( platform , action )
raw_response , raw_request , status_code , headers = self . http_client . request ( url , data = message , username = "" , password = "" , ** kwargs )
# Creates AdyenResponse if request was successful , raises error if not .
adyen_result = self . _handle_response ( url , raw_response , raw_request , status_code , headers , message )
return adyen_result |
def build_trees_from_text ( text , layer , ** kwargs ) :
'''Given a text object and the name of the layer where dependency syntactic
relations are stored , builds trees ( estnltk . syntax . utils . Tree objects )
from all the sentences of the text and returns as a list of Trees .
Uses the method build _ trees _ from _ sentence ( ) for acquiring trees of each
sentence ;
Note that there is one - to - many correspondence between EstNLTK ' s sentences
and dependency syntactic trees : one sentence can evoke multiple trees ;''' | from estnltk . text import Text
assert isinstance ( text , Text ) , '(!) Unexpected text argument! Should be Estnltk\'s Text object.'
assert layer in text , '(!) The layer ' + str ( layer ) + ' is missing from the input text.'
text_sentences = list ( text . divide ( layer = WORDS , by = SENTENCES ) )
all_sentence_trees = [ ]
# Collected sentence trees
prev_sent_id = - 1
# ( ! ) Note : if the Text object has been split into smaller Texts with split _ by ( ) ,
# SENT _ ID - s still refer to old text , and thus are not useful as indices
# anymore ;
# Therefore , we also use another variable - - norm _ prev _ sent _ id - - that always
# counts sentences starting from 0 , and use SENT _ ID / prev _ sent _ id only for
# deciding whether one sentence ends and another begins ;
norm_prev_sent_id = - 1
current_sentence = [ ]
k = 0
while k < len ( text [ layer ] ) :
node_desc = text [ layer ] [ k ]
if prev_sent_id != node_desc [ SENT_ID ] and current_sentence :
norm_prev_sent_id += 1
# If the index of the sentence has changed , and we have collected a sentence ,
# then build tree ( s ) from this sentence
assert norm_prev_sent_id < len ( text_sentences ) , '(!) Sentence with the index ' + str ( norm_prev_sent_id ) + ' not found from the input text.'
sentence = text_sentences [ norm_prev_sent_id ]
trees_of_sentence = build_trees_from_sentence ( sentence , current_sentence , layer , sentence_id = norm_prev_sent_id , ** kwargs )
# Record trees constructed from this sentence
all_sentence_trees . extend ( trees_of_sentence )
# Reset the sentence collector
current_sentence = [ ]
# Collect sentence
current_sentence . append ( node_desc )
prev_sent_id = node_desc [ SENT_ID ]
k += 1
if current_sentence :
norm_prev_sent_id += 1
assert norm_prev_sent_id < len ( text_sentences ) , '(!) Sentence with the index ' + str ( norm_prev_sent_id ) + ' not found from the input text.'
sentence = text_sentences [ norm_prev_sent_id ]
# If we have collected a sentence , then build tree ( s ) from this sentence
trees_of_sentence = build_trees_from_sentence ( sentence , current_sentence , layer , sentence_id = norm_prev_sent_id , ** kwargs )
# Record trees constructed from this sentence
all_sentence_trees . extend ( trees_of_sentence )
return all_sentence_trees |
def _setup_apikey_policy ( config , params ) :
"""Setup ` nefertari . ApiKeyAuthenticationPolicy ` .
Notes :
* User may provide model name in : params [ ' user _ model ' ] : do define
the name of the user model .
* ` auth _ model . get _ groups _ by _ token ` is used to perform username and
token check
* ` auth _ model . get _ token _ credentials ` is used to get username and
token from userid
* Also connects basic routes to perform authentication actions .
Arguments :
: config : Pyramid Configurator instance .
: params : Nefertari dictset which contains security scheme ` settings ` .""" | from nefertari . authentication . views import ( TokenAuthRegisterView , TokenAuthClaimView , TokenAuthResetView )
log . info ( 'Configuring ApiKey Authn policy' )
auth_model = config . registry . auth_model
params [ 'check' ] = auth_model . get_groups_by_token
params [ 'credentials_callback' ] = auth_model . get_token_credentials
params [ 'user_model' ] = auth_model
config . add_request_method ( auth_model . get_authuser_by_name , 'user' , reify = True )
policy = ApiKeyAuthenticationPolicy ( ** params )
RegisterViewBase = TokenAuthRegisterView
if config . registry . database_acls :
class RegisterViewBase ( ACLAssignRegisterMixin , TokenAuthRegisterView ) :
pass
class RamsesTokenAuthRegisterView ( RegisterViewBase ) :
Model = auth_model
class RamsesTokenAuthClaimView ( TokenAuthClaimView ) :
Model = auth_model
class RamsesTokenAuthResetView ( TokenAuthResetView ) :
Model = auth_model
common_kw = { 'prefix' : 'auth' , 'factory' : 'nefertari.acl.AuthenticationACL' , }
root = config . get_root_resource ( )
root . add ( 'register' , view = RamsesTokenAuthRegisterView , ** common_kw )
root . add ( 'token' , view = RamsesTokenAuthClaimView , ** common_kw )
root . add ( 'reset_token' , view = RamsesTokenAuthResetView , ** common_kw )
return policy |
def _model ( self , beta ) :
"""Creates the structure of the model ( model matrices etc )
Parameters
beta : np . ndarray
Contains untransformed starting values for the latent variables
Returns
mu : np . ndarray
Contains the predicted values ( location ) for the time series
Y : np . ndarray
Contains the length - adjusted time series ( accounting for lags )""" | Y = np . array ( self . data [ self . max_lag : ] )
# Transform latent variables
z = np . array ( [ self . latent_variables . z_list [ k ] . prior . transform ( beta [ k ] ) for k in range ( beta . shape [ 0 ] ) ] )
bias = z [ self . latent_variables . z_indices [ 'Bias' ] [ 'start' ] : self . latent_variables . z_indices [ 'Bias' ] [ 'end' ] + 1 ]
bias = np . reshape ( bias , ( - 1 , self . units ) )
output_bias = z [ self . latent_variables . z_indices [ 'Output bias' ] [ 'start' ] ]
input_weights = z [ self . latent_variables . z_indices [ 'Input weight' ] [ 'start' ] : self . latent_variables . z_indices [ 'Input weight' ] [ 'end' ] + 1 ]
input_weights = np . reshape ( input_weights , ( - 1 , self . units ) )
output_weights = z [ self . latent_variables . z_indices [ 'Output weight' ] [ 'start' ] : self . latent_variables . z_indices [ 'Output weight' ] [ 'end' ] + 1 ]
# Construct neural network
h = self . activation ( self . X . T . dot ( input_weights ) + bias [ 0 ] )
if self . layers > 1 :
hidden_weights = z [ self . latent_variables . z_indices [ 'Hidden weight' ] [ 'start' ] : self . latent_variables . z_indices [ 'Hidden weight' ] [ 'end' ] + 1 ]
hidden_weights = np . reshape ( hidden_weights , ( self . layers - 1 , self . units , - 1 ) )
for k in range ( 0 , self . layers - 1 ) :
h = self . activation ( h . dot ( hidden_weights [ k ] ) + bias [ 1 + k ] )
return h . dot ( output_weights ) + output_bias , Y |
def _parse_version ( self , value ) :
"""Parses ` version ` option value .
: param value :
: rtype : str""" | version = self . _parse_file ( value )
if version != value :
version = version . strip ( )
# Be strict about versions loaded from file because it ' s easy to
# accidentally include newlines and other unintended content
if isinstance ( parse ( version ) , LegacyVersion ) :
tmpl = ( 'Version loaded from {value} does not ' 'comply with PEP 440: {version}' )
raise DistutilsOptionError ( tmpl . format ( ** locals ( ) ) )
return version
version = self . _parse_attr ( value , self . package_dir )
if callable ( version ) :
version = version ( )
if not isinstance ( version , string_types ) :
if hasattr ( version , '__iter__' ) :
version = '.' . join ( map ( str , version ) )
else :
version = '%s' % version
return version |
def delegated_login ( self , login , admin_zc , duration = 0 ) :
"""Use another client to get logged in via delegated _ auth mechanism by an
already logged in admin .
: param admin _ zc : An already logged - in admin client
: type admin _ zc : ZimbraAdminClient
: param login : the user login ( or email ) you want to log as""" | # a duration of zero is interpretted literaly by the API . . .
selector = zobjects . Account ( name = login ) . to_selector ( )
delegate_args = { 'account' : selector }
if duration :
delegate_args [ 'duration' : duration ]
resp = admin_zc . request ( 'DelegateAuth' , delegate_args )
lifetime = resp [ 'lifetime' ]
authToken = resp [ 'authToken' ]
self . login_account = login
self . login_with_authToken ( authToken , lifetime ) |
def flux_matrix ( T , pi , qminus , qplus , netflux = True ) :
r"""Compute the TPT flux network for the reaction A - - > B .
Parameters
T : ( M , M ) ndarray
transition matrix
pi : ( M , ) ndarray
Stationary distribution corresponding to T
qminus : ( M , ) ndarray
Backward comittor
qplus : ( M , ) ndarray
Forward committor
netflux : boolean
True : net flux matrix will be computed
False : gross flux matrix will be computed
Returns
flux : ( M , M ) ndarray
Matrix of flux values between pairs of states .
Notes
Computation of the flux network relies on transition path theory
( TPT ) [ 1 ] . Here we use discrete transition path theory [ 2 ] in
the transition matrix formulation [ 3 ] .
See also
committor . forward _ committor , committor . backward _ committor
Notes
Computation of the flux network relies on transition path theory
( TPT ) . The central object used in transition path theory is the
forward and backward comittor function .
The TPT ( gross ) flux is defined as
. . math : : f _ { ij } = \ left \ { \ begin { array } { rl }
\ pi _ i q _ i ^ { ( - ) } p _ { ij } q _ j ^ { ( + ) } & i \ neq j \ \
0 & i = j \
\ end { array } \ right .
The TPT net flux is then defined as
. . math : : f _ { ij } = \ max \ { f _ { ij } - f _ { ji } , 0 \ } \ : \ : \ : \ forall i , j .
References
. . [ 1 ] W . E and E . Vanden - Eijnden .
Towards a theory of transition paths .
J . Stat . Phys . 123 : 503-523 ( 2006)
. . [ 2 ] P . Metzner , C . Schuette and E . Vanden - Eijnden .
Transition Path Theory for Markov Jump Processes .
Multiscale Model Simul 7 : 1192-1219 ( 2009)
. . [ 3 ] F . Noe , Ch . Schuette , E . Vanden - Eijnden , L . Reich and
T . Weikl : Constructing the Full Ensemble of Folding Pathways
from Short Off - Equilibrium Simulations .
Proc . Natl . Acad . Sci . USA , 106 , 19011-19016 ( 2009)""" | if issparse ( T ) :
return sparse . tpt . flux_matrix ( T , pi , qminus , qplus , netflux = netflux )
elif isdense ( T ) :
return dense . tpt . flux_matrix ( T , pi , qminus , qplus , netflux = netflux )
else :
raise _type_not_supported |
def get_license_metadata ( self ) :
"""Gets the metadata for the license .
return : ( osid . Metadata ) - metadata for the license
* compliance : mandatory - - This method must be implemented . *""" | metadata = dict ( self . _license_metadata )
metadata . update ( { 'existing_string_values' : self . my_osid_object_form . _my_map [ 'license' ] } )
return Metadata ( ** metadata ) |
def validate_url ( self , original_string ) :
"""Returns the original string if it was valid , raises an argument
error if it ' s not .""" | # nipped from stack overflow : http : / / stackoverflow . com / questions / 827557 / how - do - you - validate - a - url - with - a - regular - expression - in - python
# I preferred this to the thorough regex approach for simplicity and
# readability
pieces = urlparse . urlparse ( original_string )
try :
if self . path_only :
assert not any ( [ pieces . scheme , pieces . netloc ] )
assert pieces . path
else :
assert all ( [ pieces . scheme , pieces . netloc ] )
valid_chars = set ( string . letters + string . digits + ":-_." )
assert set ( pieces . netloc ) <= valid_chars
assert pieces . scheme in [ 'http' , 'https' ]
except AssertionError as e :
raise ArgumentError ( self . item_name , "The input you've provided is not a valid URL." )
return pieces |
def is_auth_alive ( self ) :
"Return true if the auth is not expired , else false" | model = self . model ( 'ir.model' )
try :
model . search ( [ ] , None , 1 , None )
except ClientError as err :
if err and err . message [ 'code' ] == 403 :
return False
raise
except Exception :
raise
else :
return True |
def stop ( self , api = None ) :
"""Stop automation run .
: param api : sevenbridges Api instance .
: return : AutomationRun object""" | api = api or self . _API
return api . post ( url = self . _URL [ 'actions' ] . format ( id = self . id , action = AutomationRunActions . STOP ) ) . content |
def ls ( self , path , offset = None , amount = None ) :
"""Return list of files / directories . Each item is a dict .
Keys : ' path ' , ' creationdate ' , ' displayname ' , ' length ' , ' lastmodified ' , ' isDir ' .""" | def parseContent ( content ) :
result = [ ]
root = ET . fromstring ( content )
for response in root . findall ( './/d:response' , namespaces = self . namespaces ) :
node = { 'path' : response . find ( "d:href" , namespaces = self . namespaces ) . text , 'creationdate' : response . find ( "d:propstat/d:prop/d:creationdate" , namespaces = self . namespaces ) . text , 'displayname' : response . find ( "d:propstat/d:prop/d:displayname" , namespaces = self . namespaces ) . text , 'lastmodified' : response . find ( "d:propstat/d:prop/d:getlastmodified" , namespaces = self . namespaces ) . text , 'isDir' : response . find ( "d:propstat/d:prop/d:resourcetype/d:collection" , namespaces = self . namespaces ) != None }
if not node [ 'isDir' ] :
node [ 'length' ] = response . find ( "d:propstat/d:prop/d:getcontentlength" , namespaces = self . namespaces ) . text
node [ 'etag' ] = response . find ( "d:propstat/d:prop/d:getetag" , namespaces = self . namespaces ) . text
node [ 'type' ] = response . find ( "d:propstat/d:prop/d:getcontenttype" , namespaces = self . namespaces ) . text
result . append ( node )
return result
url = path
if ( offset is not None ) and ( amount is not None ) :
url += "?offset={offset}&amount={amount}" . format ( offset = offset , amount = amount )
resp = self . _sendRequest ( "PROPFIND" , url , { 'Depth' : '1' } )
if resp . status_code == 207 :
return parseContent ( resp . content )
else :
raise YaDiskException ( resp . status_code , resp . content ) |
def _load_schema ( name , path = __file__ ) :
"""Load a schema from disk""" | path = os . path . join ( os . path . dirname ( path ) , name + '.yaml' )
with open ( path ) as handle :
schema = yaml . safe_load ( handle )
fast_schema = rapidjson . Validator ( rapidjson . dumps ( schema ) )
return path , ( schema , fast_schema ) |
def brpoplpush ( self , source , destination , timeout = 0 ) :
"""Emulate brpoplpush""" | transfer_item = self . brpop ( source , timeout )
if transfer_item is None :
return None
key , val = transfer_item
self . lpush ( destination , val )
return val |
def resolve_annotations ( raw_annotations : Dict [ str , AnyType ] , module_name : Optional [ str ] ) -> Dict [ str , AnyType ] :
"""Partially taken from typing . get _ type _ hints .
Resolve string or ForwardRef annotations into type objects if possible .""" | if module_name :
base_globals : Optional [ Dict [ str , Any ] ] = sys . modules [ module_name ] . __dict__
else :
base_globals = None
annotations = { }
for name , value in raw_annotations . items ( ) :
if isinstance ( value , str ) :
value = ForwardRef ( value , is_argument = False )
try :
value = _eval_type ( value , base_globals , None )
except NameError : # this is ok , it can be fixed with update _ forward _ refs
pass
annotations [ name ] = value
return annotations |
def get_randomness_stream ( self , decision_point : str , for_initialization : bool = False ) -> RandomnessStream :
"""Provides a new source of random numbers for the given decision point .
Parameters
decision _ point :
A unique identifier for a stream of random numbers . Typically represents
a decision that needs to be made each time step like ' moves _ left ' or
' gets _ disease ' .
for _ initialization :
A flag indicating whether this stream is used to generate key initialization information
that will be used to identify simulants in the Common Random Number framework . These streams
cannot be copied and should only be used to generate the state table columns specified
in ` ` builder . configuration . randomness . key _ columns ` ` .
Raises
RandomnessError :
If another location in the simulation has already created a randomness stream
with the same identifier .""" | if decision_point in self . _decision_points :
raise RandomnessError ( f"Two separate places are attempting to create " f"the same randomness stream for {decision_point}" )
stream = RandomnessStream ( key = decision_point , clock = self . _clock , seed = self . _seed , index_map = self . _key_mapping , manager = self , for_initialization = for_initialization )
self . _decision_points [ decision_point ] = stream
return stream |
def set_database_path ( dbfolder ) :
"""Use to write the database path into the config .
Parameters
dbfolder : str or pathlib . Path
Path to where pyciss will store the ISS images it downloads and receives .""" | configpath = get_configpath ( )
try :
d = get_config ( )
except IOError :
d = configparser . ConfigParser ( )
d [ 'pyciss_db' ] = { }
d [ 'pyciss_db' ] [ 'path' ] = dbfolder
with configpath . open ( 'w' ) as f :
d . write ( f )
print ( "Saved database path into {}." . format ( configpath ) ) |
def _compute_intra_event_std ( self , C , C_PGA , pga1100 , mag , vs30 , vs30measured ) :
"""Compute intra event standard deviation ( equation 24 ) as described
in the errata and not in the original paper .""" | sigma_b = self . _compute_sigma_b ( C , mag , vs30measured )
sigma_b_pga = self . _compute_sigma_b ( C_PGA , mag , vs30measured )
delta_amp = self . _compute_partial_derivative_site_amp ( C , pga1100 , vs30 )
std_intra = np . sqrt ( sigma_b ** 2 + self . CONSTS [ 'sigma_amp' ] ** 2 + ( delta_amp ** 2 ) * ( sigma_b_pga ** 2 ) + 2 * delta_amp * sigma_b * sigma_b_pga * C [ 'rho' ] )
return std_intra |
def pages ( self ) :
"""A generator of all pages in the stream .
Returns :
types . GeneratorType [ google . cloud . bigquery _ storage _ v1beta1 . ReadRowsPage ] :
A generator of pages .""" | # Each page is an iterator of rows . But also has num _ items , remaining ,
# and to _ dataframe .
avro_schema , column_names = _avro_schema ( self . _read_session )
for block in self . _reader :
self . _status = block . status
yield ReadRowsPage ( avro_schema , column_names , block ) |
def connectionMade ( self ) :
"""Register with the stomp server .""" | cmd = self . sm . connect ( )
self . transport . write ( cmd ) |
def pass_missingremoterelease_v1 ( self ) :
"""Update the outlet link sequence | dam _ senders . D | .""" | flu = self . sequences . fluxes . fastaccess
sen = self . sequences . senders . fastaccess
sen . d [ 0 ] += flu . missingremoterelease |
def get_instrument_variables ( ds ) :
'''Returns a list of instrument variables
: param netCDF4 . Dataset ds : An open netCDF4 Dataset''' | candidates = [ ]
for variable in ds . variables :
instrument = getattr ( ds . variables [ variable ] , 'instrument' , '' )
if instrument and instrument in ds . variables :
if instrument not in candidates :
candidates . append ( instrument )
instrument = getattr ( ds , 'instrument' , '' )
if instrument and instrument in ds . variables :
if instrument not in candidates :
candidates . append ( instrument )
return candidates |
def _init_taskqueue_stub ( self , ** stub_kwargs ) :
"""Initializes the taskqueue stub using nosegae config magic""" | task_args = { }
# root _ path is required so the stub can find ' queue . yaml ' or ' queue . yml '
if 'root_path' not in stub_kwargs :
for p in self . _app_path : # support - - gae - application values that may be a . yaml file
dir_ = os . path . dirname ( p ) if os . path . isfile ( p ) else p
if os . path . isfile ( os . path . join ( dir_ , 'queue.yaml' ) ) or os . path . isfile ( os . path . join ( dir_ , 'queue.yml' ) ) :
task_args [ 'root_path' ] = dir_
break
task_args . update ( stub_kwargs )
self . testbed . init_taskqueue_stub ( ** task_args ) |
def _get ( self , url , param_dict = { } , securityHandler = None , additional_headers = [ ] , handlers = [ ] , proxy_url = None , proxy_port = None , compress = True , custom_handlers = [ ] , out_folder = None , file_name = None ) :
"""Performs a GET operation
Inputs :
Output :
returns dictionary , string or None""" | self . _last_method = "GET"
CHUNK = 4056
param_dict , handler , cj = self . _processHandler ( securityHandler , param_dict )
headers = [ ] + additional_headers
if compress :
headers . append ( ( 'Accept-encoding' , 'gzip' ) )
else :
headers . append ( ( 'Accept-encoding' , '' ) )
headers . append ( ( 'User-Agent' , self . useragent ) )
if len ( param_dict . keys ( ) ) == 0 :
param_dict = None
if handlers is None :
handlers = [ ]
if handler is not None :
handlers . append ( handler )
handlers . append ( RedirectHandler ( ) )
if cj is not None :
handlers . append ( request . HTTPCookieProcessor ( cj ) )
if proxy_url is not None :
if proxy_port is None :
proxy_port = 80
proxies = { "http" : "http://%s:%s" % ( proxy_url , proxy_port ) , "https" : "https://%s:%s" % ( proxy_url , proxy_port ) }
proxy_support = request . ProxyHandler ( proxies )
handlers . append ( proxy_support )
opener = request . build_opener ( * handlers )
opener . addheaders = headers
if param_dict is None :
resp = opener . open ( url , data = param_dict )
elif len ( str ( urlencode ( param_dict ) ) ) + len ( url ) >= 1999 :
resp = opener . open ( url , data = urlencode ( param_dict ) )
else :
format_url = url + "?%s" % urlencode ( param_dict )
resp = opener . open ( fullurl = format_url )
self . _last_code = resp . getcode ( )
self . _last_url = resp . geturl ( )
# Get some headers from the response
maintype = self . _mainType ( resp )
contentDisposition = resp . headers . get ( 'content-disposition' )
contentEncoding = resp . headers . get ( 'content-encoding' )
contentType = resp . headers . get ( 'content-Type' ) . split ( ';' ) [ 0 ] . lower ( )
contentLength = resp . headers . get ( 'content-length' )
if maintype . lower ( ) in ( 'image' , 'application/x-zip-compressed' ) or contentType == 'application/x-zip-compressed' or ( contentDisposition is not None and contentDisposition . lower ( ) . find ( 'attachment;' ) > - 1 ) :
fname = self . _get_file_name ( contentDisposition = contentDisposition , url = url )
if out_folder is None :
out_folder = tempfile . gettempdir ( )
if contentLength is not None :
max_length = int ( contentLength )
if max_length < CHUNK :
CHUNK = max_length
file_name = os . path . join ( out_folder , fname )
with open ( file_name , 'wb' ) as writer :
for data in self . _chunk ( response = resp , size = CHUNK ) :
writer . write ( data )
writer . flush ( )
writer . flush ( )
del writer
return file_name
else :
read = ""
for data in self . _chunk ( response = resp , size = CHUNK ) :
if self . PY3 == True :
read += data . decode ( 'utf-8' )
else :
read += data
del data
try :
results = json . loads ( read )
if 'error' in results :
if 'message' in results [ 'error' ] :
if results [ 'error' ] [ 'message' ] == 'Request not made over ssl' :
if url . startswith ( 'http://' ) :
url = url . replace ( 'http://' , 'https://' )
return self . _get ( url , param_dict , securityHandler , additional_headers , handlers , proxy_url , proxy_port , compress , custom_handlers , out_folder , file_name )
return results
except :
return read |
def _next_rId ( self ) :
"""Next available rId in collection , starting from ' rId1 ' and making use
of any gaps in numbering , e . g . ' rId2 ' for rIds [ ' rId1 ' , ' rId3 ' ] .""" | for n in range ( 1 , len ( self ) + 2 ) :
rId_candidate = 'rId%d' % n
# like ' rId19'
if rId_candidate not in self :
return rId_candidate |
def get_eco_map ( url ) :
"""To conver the three column file to
a hashmap we join primary and secondary keys ,
for example
IEAGO _ REF : 000002ECO : 0000256
IEAGO _ REF : 000003ECO : 0000501
IEADefaultECO : 0000501
becomes
IEA - GO _ REF : 000002 : ECO : 0000256
IEA - GO _ REF : 000003 : ECO : 0000501
IEA : ECO : 0000501
: return : dict""" | # this would go in a translation table but it is generated dynamicly
# maybe when we move to a make driven system
eco_map = { }
request = urllib . request . Request ( url )
response = urllib . request . urlopen ( request )
for line in response :
line = line . decode ( 'utf-8' ) . rstrip ( )
if re . match ( r'^#' , line ) :
continue
( code , go_ref , eco_curie ) = line . split ( '\t' )
if go_ref != 'Default' :
eco_map [ "{}-{}" . format ( code , go_ref ) ] = eco_curie
else :
eco_map [ code ] = eco_curie
return eco_map |
def begin_state ( self , batch_size = 0 , func = ndarray . zeros , ** kwargs ) :
"""Initial state for this cell .
Parameters
func : callable , default symbol . zeros
Function for creating initial state .
For Symbol API , func can be ` symbol . zeros ` , ` symbol . uniform ` ,
` symbol . var etc ` . Use ` symbol . var ` if you want to directly
feed input as states .
For NDArray API , func can be ` ndarray . zeros ` , ` ndarray . ones ` , etc .
batch _ size : int , default 0
Only required for NDArray API . Size of the batch ( ' N ' in layout )
dimension of input .
* * kwargs :
Additional keyword arguments passed to func . For example
` mean ` , ` std ` , ` dtype ` , etc .
Returns
states : nested list of Symbol
Starting states for the first RNN step .""" | assert not self . _modified , "After applying modifier cells (e.g. ZoneoutCell) the base " "cell cannot be called directly. Call the modifier cell instead."
states = [ ]
for info in self . state_info ( batch_size ) :
self . _init_counter += 1
if info is not None :
info . update ( kwargs )
else :
info = kwargs
state = func ( name = '%sbegin_state_%d' % ( self . _prefix , self . _init_counter ) , ** info )
states . append ( state )
return states |
def start_date ( request ) :
"""Add the start date to the context for eighth admin views .""" | if request . user and request . user . is_authenticated and request . user . is_eighth_admin :
return { "admin_start_date" : get_start_date ( request ) }
return { } |
def read_int16 ( self , little_endian = True ) :
"""Read 2 byte as a signed integer value from the stream .
Args :
little _ endian ( bool ) : specify the endianness . ( Default ) Little endian .
Returns :
int :""" | if little_endian :
endian = "<"
else :
endian = ">"
return self . unpack ( '%sh' % endian , 2 ) |
def blend_palette ( colors , n_colors = 6 , as_cmap = False ) :
"""Make a palette that blends between a list of colors .
Parameters
colors : sequence of matplotlib colors
hex , rgb - tuple , or html color name
n _ colors : int , optional
number of colors in the palette
as _ cmap : bool , optional
if True , return as a matplotlib colormap instead of list
Returns
palette : list or colormap""" | name = "-" . join ( map ( str , colors ) )
pal = mpl . colors . LinearSegmentedColormap . from_list ( name , colors )
if not as_cmap :
pal = pal ( np . linspace ( 0 , 1 , n_colors ) )
return pal |
def _init_itemid2name ( self ) :
"""Print gene symbols instead of gene IDs , if provided .""" | if not hasattr ( self . args , 'id2sym' ) :
return None
fin_id2sym = self . args . id2sym
if fin_id2sym is not None and os . path . exists ( fin_id2sym ) :
id2sym = { }
cmpl = re . compile ( r'^\s*(\S+)[\s,;]+(\S+)' )
with open ( fin_id2sym ) as ifstrm :
for line in ifstrm :
mtch = cmpl . search ( line )
if mtch :
id2sym [ mtch . group ( 1 ) ] = mtch . group ( 2 )
return id2sym |
def logical_chassis_fwdl_sanity_input_rbridge_id ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
logical_chassis_fwdl_sanity = ET . Element ( "logical_chassis_fwdl_sanity" )
config = logical_chassis_fwdl_sanity
input = ET . SubElement ( logical_chassis_fwdl_sanity , "input" )
rbridge_id = ET . SubElement ( input , "rbridge-id" )
rbridge_id . text = kwargs . pop ( 'rbridge_id' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def load_json ( self , path ) :
"""Load a JSON file from the user profile .""" | with open ( self . profile_path ( path , must_exist = True ) , encoding = 'utf-8' ) as f :
data = json . load ( f )
return data |
def get_colours ( color_group , color_name , reverse = False ) :
color_group = color_group . lower ( )
cmap = get_map ( color_group , color_name , reverse = reverse )
return cmap . hex_colors
"""if not reverse :
return cmap . hex _ colors
else :
return cmap . hex _ colors [ : : - 1]""" | |
def reccyl ( rectan ) :
"""Convert from rectangular to cylindrical coordinates .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / reccyl _ c . html
: param rectan : Rectangular coordinates of a point .
: type rectan : 3 - Element Array of floats
: return :
Distance from z axis ,
Angle ( radians ) from xZ plane ,
Height above xY plane .
: rtype : tuple""" | rectan = stypes . toDoubleVector ( rectan )
radius = ctypes . c_double ( 0 )
lon = ctypes . c_double ( 0 )
z = ctypes . c_double ( 0 )
libspice . reccyl_c ( rectan , ctypes . byref ( radius ) , ctypes . byref ( lon ) , ctypes . byref ( z ) )
return radius . value , lon . value , z . value |
def deleteMediaPreviews ( self ) :
"""Delete the preview thumbnails for items in this library . This cannot
be undone . Recreating media preview files can take hours or even days .""" | key = '/library/sections/%s/indexes' % self . key
self . _server . query ( key , method = self . _server . _session . delete ) |
def query_one ( self , * args , ** kwargs ) :
"""Return first document from : meth : ` query ` , with same parameters .""" | for r in self . query ( * args , ** kwargs ) :
return r
return None |
def confirmation ( self , apdu ) :
"""This function is called when the application has provided a response
and needs it to be sent to the client .""" | if _debug :
ServerSSM . _debug ( "confirmation %r" , apdu )
# check to see we are in the correct state
if self . state != AWAIT_RESPONSE :
if _debug :
ServerSSM . _debug ( " - warning: not expecting a response" )
# abort response
if ( apdu . apduType == AbortPDU . pduType ) :
if _debug :
ServerSSM . _debug ( " - abort" )
self . set_state ( ABORTED )
# send the response to the device
self . response ( apdu )
return
# simple response
if ( apdu . apduType == SimpleAckPDU . pduType ) or ( apdu . apduType == ErrorPDU . pduType ) or ( apdu . apduType == RejectPDU . pduType ) :
if _debug :
ServerSSM . _debug ( " - simple ack, error, or reject" )
# transaction completed
self . set_state ( COMPLETED )
# send the response to the device
self . response ( apdu )
return
# complex ack
if ( apdu . apduType == ComplexAckPDU . pduType ) :
if _debug :
ServerSSM . _debug ( " - complex ack" )
# save the response and set the segmentation context
self . set_segmentation_context ( apdu )
# the segment size is the minimum of the size of the largest packet
# that can be delivered to the client and the largest it can accept
if ( not self . device_info ) or ( self . device_info . maxNpduLength is None ) :
self . segmentSize = self . maxApduLengthAccepted
else :
self . segmentSize = min ( self . device_info . maxNpduLength , self . maxApduLengthAccepted )
if _debug :
ServerSSM . _debug ( " - segment size: %r" , self . segmentSize )
# compute the segment count
if not apdu . pduData : # always at least one segment
self . segmentCount = 1
else : # split into chunks , maybe need one more
self . segmentCount , more = divmod ( len ( apdu . pduData ) , self . segmentSize )
if more :
self . segmentCount += 1
if _debug :
ServerSSM . _debug ( " - segment count: %r" , self . segmentCount )
# make sure we support segmented transmit if we need to
if self . segmentCount > 1 :
if _debug :
ServerSSM . _debug ( " - segmentation required, %d segments" , self . segmentCount )
# make sure we support segmented transmit
if self . segmentationSupported not in ( 'segmentedTransmit' , 'segmentedBoth' ) :
if _debug :
ServerSSM . _debug ( " - server can't send segmented responses" )
abort = self . abort ( AbortReason . segmentationNotSupported )
self . response ( abort )
return
# make sure client supports segmented receive
if not self . segmented_response_accepted :
if _debug :
ServerSSM . _debug ( " - client can't receive segmented responses" )
abort = self . abort ( AbortReason . segmentationNotSupported )
self . response ( abort )
return
# make sure we dont exceed the number of segments in our response
# that the client said it was willing to accept in the request
if ( self . maxSegmentsAccepted is not None ) and ( self . segmentCount > self . maxSegmentsAccepted ) :
if _debug :
ServerSSM . _debug ( " - client can't receive enough segments" )
abort = self . abort ( AbortReason . apduTooLong )
self . response ( abort )
return
# initialize the state
self . segmentRetryCount = 0
self . initialSequenceNumber = 0
self . actualWindowSize = None
# send out the first segment ( or the whole thing )
if self . segmentCount == 1 :
self . response ( apdu )
self . set_state ( COMPLETED )
else :
self . response ( self . get_segment ( 0 ) )
self . set_state ( SEGMENTED_RESPONSE , self . segmentTimeout )
else :
raise RuntimeError ( "invalid APDU (4)" ) |
def check_exists ( name , path ) :
'''Check if the given path is an alternative for a name .
. . versionadded : : 2015.8.4
CLI Example :
. . code - block : : bash
salt ' * ' alternatives . check _ exists name path''' | cmd = [ _get_cmd ( ) , '--display' , name ]
out = __salt__ [ 'cmd.run_all' ] ( cmd , python_shell = False )
if out [ 'retcode' ] > 0 and out [ 'stderr' ] != '' :
return False
return any ( ( line . startswith ( path ) for line in out [ 'stdout' ] . splitlines ( ) ) ) |
def is_dead ( self , proc , name ) :
"""Checks to see if the specified process is dead .
: param psutil . Process proc : The process to check
: param str name : The name of consumer
: rtype : bool""" | LOGGER . debug ( 'Checking %s (%r)' , name , proc )
try :
status = proc . status ( )
except psutil . NoSuchProcess :
LOGGER . debug ( 'NoSuchProcess: %s (%r)' , name , proc )
return True
LOGGER . debug ( 'Process %s (%s) status: %r (Unresponsive Count: %s)' , name , proc . pid , status , self . unresponsive [ name ] )
if status in _PROCESS_RUNNING :
return False
elif status == psutil . STATUS_ZOMBIE :
try :
proc . wait ( 0.1 )
except psutil . TimeoutExpired :
pass
try :
proc . terminate ( )
status = proc . status ( )
except psutil . NoSuchProcess :
LOGGER . debug ( 'NoSuchProcess: %s (%r)' , name , proc )
return True
return status in _PROCESS_STOPPED_OR_DEAD |
def load_template ( self , name ) :
"""Attempts to load the relevant template from our templating system / environment .
Args :
name : The name of the template to load .
Return :
On success , a StatikTemplate object that can be used to render content .""" | # hopefully speeds up loading of templates a little , especially when loaded multiple times
if name in self . cached_templates :
logger . debug ( "Using cached template: %s" , name )
return self . cached_templates [ name ]
logger . debug ( "Attempting to find template by name: %s" , name )
name_with_ext , provider_name , base_path = self . find_template_details ( name )
full_path = None
if base_path is not None :
full_path = os . path . join ( base_path , name_with_ext )
# load it with the relevant provider
template = template_exception_handler ( lambda : self . get_provider ( provider_name ) . load_template ( name_with_ext , full_path = full_path ) , self . error_context , filename = full_path )
# cache it for potential later use
self . cached_templates [ name ] = template
return template |
def result ( self ) :
"""Get the result ( s ) for this hook call ( DEPRECATED in favor of ` ` get _ result ( ) ` ` ) .""" | msg = "Use get_result() which forces correct exception handling"
warnings . warn ( DeprecationWarning ( msg ) , stacklevel = 2 )
return self . _result |
def split_camel ( word : str ) -> str :
"""Separate any words joined in Camel case fashion using a single space .
> > > split _ camel ( ' esseCarthaginienses ' )
' esse Carthaginienses '
> > > split _ camel ( ' urbemCertimam ' )
' urbem Certimam '""" | m = re . match ( '[a-z]+[A-Z][a-z]' , word )
if m :
_ , end = m . span ( )
return word [ : end - 2 ] + ' ' + word [ end - 2 : ]
return word |
def acl_middleware ( callback ) :
"""Returns a aiohttp _ auth . acl middleware factory for use by the aiohttp
application object .
Args :
callback : This is a callable which takes a user _ id ( as returned from
the auth . get _ auth function ) , and expects a sequence of permitted ACL
groups to be returned . This can be a empty tuple to represent no
explicit permissions , or None to explicitly forbid this particular
user _ id . Note that the user _ id passed may be None if no
authenticated user exists .
Returns :
A aiohttp middleware factory .""" | async def _acl_middleware_factory ( app , handler ) :
async def _middleware_handler ( request ) : # Save the policy in the request
request [ GROUPS_KEY ] = callback
# Call the next handler in the chain
return await handler ( request )
return _middleware_handler
return _acl_middleware_factory |
def _handleDecodeHextileRAW ( self , block , bg , color , x , y , width , height , tx , ty , tw , th ) :
"""the tile is in raw encoding""" | self . updateRectangle ( tx , ty , tw , th , block )
self . _doNextHextileSubrect ( bg , color , x , y , width , height , tx , ty ) |
def QA_data_tick_resample ( tick , type_ = '1min' ) :
"""tickιζ ·ζδ»»ζηΊ§ε«ειηΊΏ
Arguments :
tick { [ type ] } - - transaction
Returns :
[ type ] - - [ description ]""" | tick = tick . assign ( amount = tick . price * tick . vol )
resx = pd . DataFrame ( )
_temp = set ( tick . index . date )
for item in _temp :
_data = tick . loc [ str ( item ) ]
_data1 = _data [ time ( 9 , 31 ) : time ( 11 , 30 ) ] . resample ( type_ , closed = 'right' , base = 30 , loffset = type_ ) . apply ( { 'price' : 'ohlc' , 'vol' : 'sum' , 'code' : 'last' , 'amount' : 'sum' } )
_data2 = _data [ time ( 13 , 1 ) : time ( 15 , 0 ) ] . resample ( type_ , closed = 'right' , loffset = type_ ) . apply ( { 'price' : 'ohlc' , 'vol' : 'sum' , 'code' : 'last' , 'amount' : 'sum' } )
resx = resx . append ( _data1 ) . append ( _data2 )
resx . columns = resx . columns . droplevel ( 0 )
return resx . reset_index ( ) . drop_duplicates ( ) . set_index ( [ 'datetime' , 'code' ] ) |
async def get_shade ( self , shade_id , from_cache = True ) -> BaseShade :
"""Get a shade instance based on shade id .""" | if not from_cache :
await self . get_shades ( )
for _shade in self . shades :
if _shade . id == shade_id :
return _shade
raise ResourceNotFoundException ( "Shade not found. Id: {}" . format ( shade_id ) ) |
def sun_utc ( self , date , latitude , longitude , observer_elevation = 0 ) :
"""Calculate all the info for the sun at once .
All times are returned in the UTC timezone .
: param date : Date to calculate for .
: type date : : class : ` datetime . date `
: param latitude : Latitude - Northern latitudes should be positive
: type latitude : float
: param longitude : Longitude - Eastern longitudes should be positive
: type longitude : float
: param observer _ elevation : Elevation in metres to calculate sun for
: type observer _ elevation : int
: returns : Dictionary with keys ` ` dawn ` ` , ` ` sunrise ` ` , ` ` noon ` ` ,
` ` sunset ` ` and ` ` dusk ` ` whose values are the results of the
corresponding ` _ utc ` methods .
: rtype : dict""" | dawn = self . dawn_utc ( date , latitude , longitude , observer_elevation = observer_elevation )
sunrise = self . sunrise_utc ( date , latitude , longitude , observer_elevation = observer_elevation )
noon = self . solar_noon_utc ( date , longitude )
sunset = self . sunset_utc ( date , latitude , longitude , observer_elevation = observer_elevation )
dusk = self . dusk_utc ( date , latitude , longitude , observer_elevation = observer_elevation )
return { "dawn" : dawn , "sunrise" : sunrise , "noon" : noon , "sunset" : sunset , "dusk" : dusk , } |
def GET_save_getitemvalues ( self ) -> None :
"""Save the values of all current | GetItem | objects .""" | for item in state . getitems :
for name , value in item . yield_name2value ( state . idx1 , state . idx2 ) :
state . getitemvalues [ self . _id ] [ name ] = value |
def sort ( line ) :
"""change point position if x1 , y0 < x0 , y0""" | x0 , y0 , x1 , y1 = line
# if ( x0 * * 2 + y0 * * 2 ) * * 0.5 < ( x1 * * 2 + y1 * * 2 ) * * 0.5:
# return ( x1 , y1 , x0 , y0)
# return line
# if x1 < x0:
# return ( x1 , y1 , x0 , y0)
# return line
turn = False
if abs ( x1 - x0 ) > abs ( y1 - y0 ) :
if x1 < x0 :
turn = True
elif y1 < y0 :
turn = True
if turn :
return ( x1 , y1 , x0 , y0 )
# return line [ ( 2,3,0,1 ) ]
return line |
def _to_dict ( self ) :
"""Return a json dictionary representing this model .""" | _dict = { }
if hasattr ( self , 'sentence_id' ) and self . sentence_id is not None :
_dict [ 'sentence_id' ] = self . sentence_id
if hasattr ( self , 'text' ) and self . text is not None :
_dict [ 'text' ] = self . text
if hasattr ( self , 'tones' ) and self . tones is not None :
_dict [ 'tones' ] = [ x . _to_dict ( ) for x in self . tones ]
if hasattr ( self , 'tone_categories' ) and self . tone_categories is not None :
_dict [ 'tone_categories' ] = [ x . _to_dict ( ) for x in self . tone_categories ]
if hasattr ( self , 'input_from' ) and self . input_from is not None :
_dict [ 'input_from' ] = self . input_from
if hasattr ( self , 'input_to' ) and self . input_to is not None :
_dict [ 'input_to' ] = self . input_to
return _dict |
def draw_text ( data , obj ) :
"""Paints text on the graph .""" | content = [ ]
properties = [ ]
style = [ ]
if isinstance ( obj , mpl . text . Annotation ) :
_annotation ( obj , data , content )
# 1 : coordinates
# 2 : properties ( shapes , rotation , etc )
# 3 : text style
# 4 : the text
# - - - - - 1 - - - - - 2 - - - 3 - - 4 - -
pos = obj . get_position ( )
# from . util import transform _ to _ data _ coordinates
# pos = transform _ to _ data _ coordinates ( obj , * pos )
text = obj . get_text ( )
if text in [ "" , data [ "current axis title" ] ] : # Text nodes which are direct children of Axes are typically titles . They are
# already captured by the ` title ` property of pgfplots axes , so skip them here .
return data , content
size = obj . get_size ( )
bbox = obj . get_bbox_patch ( )
converter = mpl . colors . ColorConverter ( )
# without the factor 0.5 , the fonts are too big most of the time .
# TODO fix this
scaling = 0.5 * size / data [ "font size" ]
ff = data [ "float format" ]
if scaling != 1.0 :
properties . append ( ( "scale=" + ff ) . format ( scaling ) )
if bbox is not None :
_bbox ( bbox , data , properties , scaling )
ha = obj . get_ha ( )
va = obj . get_va ( )
anchor = _transform_positioning ( ha , va )
if anchor is not None :
properties . append ( anchor )
data , col , _ = color . mpl_color2xcolor ( data , converter . to_rgb ( obj . get_color ( ) ) )
properties . append ( "text={}" . format ( col ) )
properties . append ( "rotate={:.1f}" . format ( obj . get_rotation ( ) ) )
if obj . get_style ( ) == "italic" :
style . append ( "\\itshape" )
else :
assert obj . get_style ( ) == "normal"
# From matplotlib / font _ manager . py :
# weight _ dict = {
# ' ultralight ' : 100,
# ' light ' : 200,
# ' normal ' : 400,
# ' regular ' : 400,
# ' book ' : 400,
# ' medium ' : 500,
# ' roman ' : 500,
# ' semibold ' : 600,
# ' demibold ' : 600,
# ' demi ' : 600,
# ' bold ' : 700,
# ' heavy ' : 800,
# ' extra bold ' : 800,
# ' black ' : 900}
# get _ weights returns a numeric value in the range 0-1000 or one of
# β light β , β normal β , β regular β , β book β , β medium β , β roman β , β semibold β ,
# β demibold β , β demi β , β bold β , β heavy β , β extra bold β , β black β
weight = obj . get_weight ( )
if weight in [ "semibold" , "demibold" , "demi" , "bold" , "heavy" , "extra bold" , "black" , ] or ( isinstance ( weight , int ) and weight > 550 ) :
style . append ( "\\bfseries" )
# \ lfseries isn ' t that common yet
# elif weight = = ' light ' or ( isinstance ( weight , int ) and weight < 300 ) :
# style . append ( ' \ \ lfseries ' )
if obj . axes : # If the coordinates are relative to an axis , use ` axis cs ` .
tikz_pos = ( "(axis cs:" + ff + "," + ff + ")" ) . format ( * pos )
else : # relative to the entire figure , it ' s a getting a littler harder . See
# < http : / / tex . stackexchange . com / a / 274902/13262 > for a solution to the
# problem :
tikz_pos = ( "({{$(current bounding box.south west)!" + ff + "!" "(current bounding box.south east)$}}" "|-" "{{$(current bounding box.south west)!" + ff + "!" "(current bounding box.north west)$}})" ) . format ( * pos )
if "\n" in text : # http : / / tex . stackexchange . com / a / 124114/13262
properties . append ( "align={}" . format ( ha ) )
# Manipulating the text here is actually against mpl2tikz ' s policy not
# to do that . On the other hand , newlines should translate into
# newlines .
# We might want to remove this here in the future .
text = text . replace ( "\n " , "\\\\" )
content . append ( "\\node at {}[\n {}\n]{{{}}};\n" . format ( tikz_pos , ",\n " . join ( properties ) , " " . join ( style + [ text ] ) ) )
return data , content |
def autocorr_noise_id ( x , af , data_type = "phase" , dmin = 0 , dmax = 2 ) :
"""Lag - 1 autocorrelation based noise identification
Parameters
x : numpy . array
phase or fractional frequency time - series data
minimum recommended length is len ( x ) > 30 roughly .
af : int
averaging factor
data _ type : string { ' phase ' , ' freq ' }
" phase " for phase data in seconds
" freq " for fractional frequency data
dmin : int
minimum required number of differentiations in the algorithm
dmax : int
maximum number of differentiations
defaults to 2 for ADEV
set to 3 for HDEV
Returns
alpha _ int : int
noise - slope as integer
alpha : float
noise - slope as float
d : int
number of differentiations of the time - series performed
Notes
http : / / www . stable32 . com / Auto . pdf
http : / / citeseerx . ist . psu . edu / viewdoc / download ? doi = 10.1.1.503.9864 & rep = rep1 & type = pdf
Power law noise identification using the lag 1 autocorrelation
Riley , W . J . et al .
18th European Frequency and Time Forum ( EFTF 2004)
https : / / ieeexplore . ieee . org / document / 5075021""" | d = 0
# number of differentiations
lag = 1
if data_type is "phase" :
if af > 1 : # x = scipy . signal . decimate ( x , af , n = 1 , ftype = ' fir ' )
x = x [ 0 : len ( x ) : af ]
# decimate by averaging factor
x = detrend ( x , deg = 2 )
# remove quadratic trend ( frequency offset and drift )
elif data_type is "freq" : # average by averaging factor
y_cut = np . array ( x [ : len ( x ) - ( len ( x ) % af ) ] )
# cut to length
assert len ( y_cut ) % af == 0
y_shaped = y_cut . reshape ( ( int ( len ( y_cut ) / af ) , af ) )
x = np . average ( y_shaped , axis = 1 )
# average
x = detrend ( x , deg = 1 )
# remove frequency drift
# require minimum length for time - series
if len ( x ) < 30 :
print ( "autocorr_noise_id() Don't know how to do noise-ID for time-series length= %d" % len ( x ) )
raise NotImplementedError
while True :
r1 = lag1_acf ( x )
rho = r1 / ( 1.0 + r1 )
if d >= dmin and ( rho < 0.25 or d >= dmax ) :
p = - 2 * ( rho + d )
# print r1
# assert r1 < 0
# assert r1 > - 1.0/2.0
phase_add2 = 0
if data_type is "phase" :
phase_add2 = 2
alpha = p + phase_add2
alpha_int = int ( - 1.0 * np . round ( 2 * rho ) - 2.0 * d ) + phase_add2
# print " d = " , d , " alpha = " , p + 2
return alpha_int , alpha , d , rho
else :
x = np . diff ( x )
d = d + 1
assert False |
def sanity_check ( vcs ) :
"""Do sanity check before making changes
Check that we are not on a tag and / or do not have local changes .
Returns True when all is fine .""" | if not vcs . is_clean_checkout ( ) :
q = ( "This is NOT a clean checkout. You are on a tag or you have " "local changes.\n" "Are you sure you want to continue?" )
if not ask ( q , default = False ) :
sys . exit ( 1 ) |
def _get_subplot_extents ( self , overlay , ranges , range_type ) :
"""Iterates over all subplots and collects the extents of each .""" | if range_type == 'combined' :
extents = { 'extents' : [ ] , 'soft' : [ ] , 'hard' : [ ] , 'data' : [ ] }
else :
extents = { range_type : [ ] }
items = overlay . items ( )
if self . batched and self . subplots :
subplot = list ( self . subplots . values ( ) ) [ 0 ]
subplots = [ ( k , subplot ) for k in overlay . data . keys ( ) ]
else :
subplots = self . subplots . items ( )
for key , subplot in subplots :
found = False
if subplot is None :
continue
layer = overlay . data . get ( key , None )
if isinstance ( self . hmap , DynamicMap ) and layer is None :
for _ , layer in items :
if isinstance ( layer , subplot . hmap . type ) :
found = True
break
if not found :
layer = None
if layer is None or not subplot . apply_ranges :
continue
if isinstance ( layer , CompositeOverlay ) :
sp_ranges = ranges
else :
sp_ranges = util . match_spec ( layer , ranges ) if ranges else { }
for rt in extents :
extent = subplot . get_extents ( layer , sp_ranges , range_type = rt )
extents [ rt ] . append ( extent )
return extents |
def connection_lost ( self , exception ) :
"""Called when the serial port is closed or the reader loop terminated
otherwise .""" | if isinstance ( exception , Exception ) :
logger . debug ( 'Connection to port `%s` lost: %s' , self . port , exception )
else :
logger . debug ( 'Connection to port `%s` closed' , self . port )
self . connected . clear ( )
self . disconnected . set ( ) |
def from_string ( cls , key , key_id = None ) :
"""Construct an Signer instance from a private key in PEM format .
Args :
key ( str ) : Private key in PEM format .
key _ id ( str ) : An optional key id used to identify the private key .
Returns :
google . auth . crypt . Signer : The constructed signer .
Raises :
ValueError : If the key cannot be parsed as PKCS # 1 or PKCS # 8 in
PEM format .""" | key = _helpers . from_bytes ( key )
# PEM expects str in Python 3
marker_id , key_bytes = pem . readPemBlocksFromFile ( six . StringIO ( key ) , _PKCS1_MARKER , _PKCS8_MARKER )
# Key is in pkcs1 format .
if marker_id == 0 :
private_key = rsa . key . PrivateKey . load_pkcs1 ( key_bytes , format = 'DER' )
# Key is in pkcs8.
elif marker_id == 1 :
key_info , remaining = decoder . decode ( key_bytes , asn1Spec = _PKCS8_SPEC )
if remaining != b'' :
raise ValueError ( 'Unused bytes' , remaining )
private_key_info = key_info . getComponentByName ( 'privateKey' )
private_key = rsa . key . PrivateKey . load_pkcs1 ( private_key_info . asOctets ( ) , format = 'DER' )
else :
raise ValueError ( 'No key could be detected.' )
return cls ( private_key , key_id = key_id ) |
def pathFromIndex ( self , index ) :
"""Returns the joined path from the given model index . This will
join together the full path with periods .
: param index | < QModelIndex >
: return < str >""" | item = self . _model . itemFromIndex ( index )
out = [ ]
while ( item ) :
out . append ( nativestring ( item . text ( ) ) )
item = item . parent ( )
return '.' . join ( reversed ( out ) ) |
def layer_covariance ( layer1 , layer2 = None ) :
"""Computes the covariance matrix between the neurons of two layers . If only one
layer is passed , computes the symmetric covariance matrix of that layer .""" | layer2 = layer2 or layer1
act1 , act2 = layer1 . activations , layer2 . activations
num_datapoints = act1 . shape [ 0 ]
# cast to avoid numpy type promotion during division
return np . matmul ( act1 . T , act2 ) / float ( num_datapoints ) |
def _GetParentModificationTime ( self , gzip_file_entry ) :
"""Retrieves the modification time of the file entry ' s parent file .
Note that this retrieves the time from the file entry of the parent of the
gzip file entry ' s path spec , which is different from trying to retrieve it
from the gzip file entry ' s parent file entry .
It would be preferable to retrieve the modification time from the metadata
in the gzip file itself , but it appears to not be set when the file is
written by fseventsd .
Args :
gzip _ file _ entry ( dfvfs . FileEntry ) : file entry of the gzip file containing
the fseventsd data .
Returns :
dfdatetime . DateTimeValues : parent modification time , or None if not
available .""" | parent_file_entry = path_spec_resolver . Resolver . OpenFileEntry ( gzip_file_entry . path_spec . parent )
if not parent_file_entry :
return None
return parent_file_entry . modification_time |
def _threshold_batch ( self , vectors , batch_size , threshold , show_progressbar , return_names ) :
"""Batched cosine distance .""" | vectors = self . normalize ( vectors )
# Single transpose , makes things faster .
reference_transposed = self . norm_vectors . T
for i in tqdm ( range ( 0 , len ( vectors ) , batch_size ) , disable = not show_progressbar ) :
distances = vectors [ i : i + batch_size ] . dot ( reference_transposed )
# For safety we clip
distances = np . clip ( distances , a_min = .0 , a_max = 1.0 )
for lidx , dists in enumerate ( distances ) :
indices = np . flatnonzero ( dists >= threshold )
sorted_indices = indices [ np . argsort ( - dists [ indices ] ) ]
if return_names :
yield [ ( self . indices [ d ] , dists [ d ] ) for d in sorted_indices ]
else :
yield list ( dists [ sorted_indices ] ) |
def reduce_by ( self , package_request ) :
"""Reduce this scope wrt a package request .
Returns :
A ( _ PackageScope , [ Reduction ] ) tuple , where the scope is a new
scope copy with reductions applied , or self if there were no
reductions , or None if the scope was completely reduced .""" | self . solver . reduction_broad_tests_count += 1
if self . package_request . conflict : # conflict scopes don ' t reduce . Instead , other scopes will be
# reduced against a conflict scope .
return ( self , [ ] )
# perform the reduction
new_slice , reductions = self . variant_slice . reduce_by ( package_request )
# there was total reduction
if new_slice is None :
self . solver . reductions_count += 1
if self . pr :
reqstr = _short_req_str ( package_request )
self . pr ( "%s was reduced to nothing by %s" , self , reqstr )
self . pr . br ( )
return ( None , reductions )
# there was some reduction
if new_slice is not self . variant_slice :
self . solver . reductions_count += 1
scope = self . _copy ( new_slice )
if self . pr :
reqstr = _short_req_str ( package_request )
self . pr ( "%s was reduced to %s by %s" , self , scope , reqstr )
self . pr . br ( )
return ( scope , reductions )
# there was no reduction
return ( self , [ ] ) |
def calc_offset ( cube ) :
"""Calculate an offset .
Calculate offset from the side of data so that at least 200 image pixels are in the MAD stats .
Parameters
cube : pyciss . ringcube . RingCube
Cubefile with ring image""" | i = 0
while pd . Series ( cube . img [ : , i ] ) . count ( ) < 200 :
i += 1
return max ( i , 20 ) |
def set_linetrace_on_frame ( f , localtrace = None ) :
"""Non - portable function to modify linetracing .
Remember to enable global tracing with : py : func : ` sys . settrace ` , otherwise no
effect !""" | traceptr , _ , _ = get_frame_pointers ( f )
if localtrace is not None : # Need to incref to avoid the frame causing a double - delete
ctypes . pythonapi . Py_IncRef ( localtrace )
# Not sure if this is the best way to do this , but it works .
addr = id ( localtrace )
else :
addr = 0
traceptr . contents = ctypes . py_object . from_address ( addr ) |
def normalize_bins ( self , inplace : bool = False ) -> "HistogramCollection" :
"""Normalize each bin in the collection so that the sum is 1.0 for each bin .
Note : If a bin is zero in all collections , the result will be inf .""" | col = self if inplace else self . copy ( )
sums = self . sum ( ) . frequencies
for h in col . histograms :
h . set_dtype ( float )
h . _frequencies /= sums
h . _errors2 /= sums ** 2
# TODO : Does this make sense ?
return col |
async def enqueue_job ( self , function : str , * args : Any , _job_id : Optional [ str ] = None , _defer_until : Optional [ datetime ] = None , _defer_by : Union [ None , int , float , timedelta ] = None , _expires : Union [ None , int , float , timedelta ] = None , _job_try : Optional [ int ] = None , ** kwargs : Any , ) -> Optional [ Job ] :
"""Enqueue a job .
: param function : Name of the function to call
: param args : args to pass to the function
: param _ job _ id : ID of the job , can be used to enforce job uniqueness
: param _ defer _ until : datetime at which to run the job
: param _ defer _ by : duration to wait before running the job
: param _ expires : if the job still hasn ' t started after this duration , do not run it
: param _ job _ try : useful when re - enqueueing jobs within a job
: param kwargs : any keyword arguments to pass to the function
: return : : class : ` arq . jobs . Job ` instance or ` ` None ` ` if a job with this ID already exists""" | job_id = _job_id or uuid4 ( ) . hex
job_key = job_key_prefix + job_id
assert not ( _defer_until and _defer_by ) , "use either 'defer_until' or 'defer_by' or neither, not both"
defer_by_ms = to_ms ( _defer_by )
expires_ms = to_ms ( _expires )
with await self as conn :
pipe = conn . pipeline ( )
pipe . unwatch ( )
pipe . watch ( job_key )
job_exists = pipe . exists ( job_key )
await pipe . execute ( )
if await job_exists :
return
enqueue_time_ms = timestamp_ms ( )
if _defer_until is not None :
score = to_unix_ms ( _defer_until )
elif defer_by_ms :
score = enqueue_time_ms + defer_by_ms
else :
score = enqueue_time_ms
expires_ms = expires_ms or score - enqueue_time_ms + expires_extra_ms
job = pickle_job ( function , args , kwargs , _job_try , enqueue_time_ms )
tr = conn . multi_exec ( )
tr . psetex ( job_key , expires_ms , job )
tr . zadd ( queue_name , score , job_id )
try :
await tr . execute ( )
except MultiExecError : # job got enqueued since we checked ' job _ exists '
return
return Job ( job_id , self ) |
def from_string ( cls , cl_function , dependencies = ( ) , nmr_constraints = None ) :
"""Parse the given CL function into a SimpleCLFunction object .
Args :
cl _ function ( str ) : the function we wish to turn into an object
dependencies ( list or tuple of CLLibrary ) : The list of CL libraries this function depends on
Returns :
SimpleCLFunction : the CL data type for this parameter declaration""" | return_type , function_name , parameter_list , body = split_cl_function ( cl_function )
return SimpleConstraintFunction ( return_type , function_name , parameter_list , body , dependencies = dependencies , nmr_constraints = nmr_constraints ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.