signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def get ( self , key , value ) :
"""Retrieve single group record by id or name
Supports resource cache
Keyword Args :
id ( str ) : Full Group ID
name ( str ) : Group name
Raises :
TypeError : Unexpected or more than one keyword argument provided
ValueError : No matching group found based on provided inputs
Returns :
Group : Group instance matching provided inputs""" | if key == 'id' :
response = self . _swimlane . request ( 'get' , 'groups/{}' . format ( value ) )
return Group ( self . _swimlane , response . json ( ) )
else :
response = self . _swimlane . request ( 'get' , 'groups/lookup?name={}' . format ( value ) )
matched_groups = response . json ( )
for group_data in matched_groups :
if group_data . get ( 'name' ) == value :
return Group ( self . _swimlane , group_data )
raise ValueError ( 'Unable to find group with name "{}"' . format ( value ) ) |
def submit ( args = None ) :
"""Performs the submit according to arguments and
returns an object describing the result .""" | streamsx . _streams . _version . _mismatch_check ( 'streamsx.topology.context' )
cmd_args = _parse_args ( args )
if cmd_args . topology is not None :
app = _get_topology_app ( cmd_args )
elif cmd_args . main_composite is not None :
app = _get_spl_app ( cmd_args )
elif cmd_args . bundle is not None :
app = _get_bundle ( cmd_args )
_job_config_args ( cmd_args , app )
sr = _submit ( cmd_args , app )
if 'return_code' not in sr :
sr [ 'return_code' ] = 1 ;
print ( sr )
return sr |
def generate_intersect_subparser ( subparsers ) :
"""Adds a sub - command parser to ` subparsers ` to make an
intersection query .""" | parser = subparsers . add_parser ( 'intersect' , description = constants . INTERSECT_DESCRIPTION , epilog = constants . INTERSECT_EPILOG , formatter_class = ParagraphFormatter , help = constants . INTERSECT_HELP )
parser . set_defaults ( func = ngram_intersection )
utils . add_common_arguments ( parser )
utils . add_db_arguments ( parser )
utils . add_corpus_arguments ( parser )
utils . add_query_arguments ( parser ) |
def poll_parser ( poll ) :
"""Parses a poll object""" | if __is_deleted ( poll ) :
return deleted_parser ( poll )
if poll [ 'type' ] not in poll_types :
raise Exception ( 'Not a poll type' )
return Poll ( poll [ 'id' ] , poll [ 'by' ] , __check_key ( 'kids' , poll ) , # poll and pollopt differ this property
__check_key ( 'parts' , poll ) , # poll and pollopt differ this property
poll [ 'score' ] , poll [ 'text' ] , poll [ 'time' ] , poll [ 'title' ] , poll [ 'type' ] , ) |
def get_biome_color_based_on_elevation ( world , elev , x , y , rng ) :
'''This is the " business logic " for determining the base biome color in satellite view .
This includes generating some " noise " at each spot in a pixel ' s rgb value , potentially
modifying the noise based on elevation , and finally incorporating this with the base biome color .
The basic rules regarding noise generation are :
- Oceans have no noise added
- land tiles start with noise somewhere inside ( - NOISE _ RANGE , NOISE _ RANGE ) for each rgb value
- land tiles with high elevations further modify the noise by set amounts ( to drain some of the
color and make the map look more like mountains )
The biome ' s base color may be interpolated with a predefined mountain brown color if the elevation is high enough .
Finally , the noise plus the biome color are added and returned .
rng refers to an instance of a random number generator used to draw the random samples needed by this function .''' | v = world . biome_at ( ( x , y ) ) . name ( )
biome_color = _biome_satellite_colors [ v ]
# Default is no noise - will be overwritten if this tile is land
noise = ( 0 , 0 , 0 )
if world . is_land ( ( x , y ) ) : # # Generate some random noise to apply to this pixel
# There is noise for each element of the rgb value
# This noise will be further modified by the height of this tile
noise = rng . randint ( - NOISE_RANGE , NOISE_RANGE , size = 3 )
# draw three random numbers at once
# # # # # # Case 1 - elevation is very high # # # # #
if elev > HIGH_MOUNTAIN_ELEV : # Modify the noise to make the area slightly brighter to simulate snow - topped mountains .
noise = add_colors ( noise , HIGH_MOUNTAIN_NOISE_MODIFIER )
# Average the biome ' s color with the MOUNTAIN _ COLOR to tint the terrain
biome_color = average_colors ( biome_color , MOUNTAIN_COLOR )
# # # # # # Case 2 - elevation is high # # # # #
elif elev > MOUNTAIN_ELEV : # Modify the noise to make this tile slightly darker , especially draining the green
noise = add_colors ( noise , MOUNTAIN_NOISE_MODIFIER )
# Average the biome ' s color with the MOUNTAIN _ COLOR to tint the terrain
biome_color = average_colors ( biome_color , MOUNTAIN_COLOR )
# # # # # # Case 3 - elevation is somewhat high # # # # #
elif elev > HIGH_HILL_ELEV :
noise = add_colors ( noise , HIGH_HILL_NOISE_MODIFIER )
# # # # # # Case 4 - elevation is a little bit high # # # # #
elif elev > HILL_ELEV :
noise = add_colors ( noise , HILL_NOISE_MODIFIER )
# There is also a minor base modifier to the pixel ' s rgb value based on height
modification_amount = int ( elev / BASE_ELEVATION_INTENSITY_MODIFIER )
base_elevation_modifier = ( modification_amount , modification_amount , modification_amount )
this_tile_color = add_colors ( biome_color , noise , base_elevation_modifier )
return this_tile_color |
def load_builtin_plugins ( ) -> int :
"""Load built - in plugins distributed along with " nonebot " package .""" | plugin_dir = os . path . join ( os . path . dirname ( __file__ ) , 'plugins' )
return load_plugins ( plugin_dir , 'nonebot.plugins' ) |
def settings ( ** kwargs ) :
"""apply given PyEMMA config values temporarily within the given context .""" | from pyemma import config
old_settings = { }
try : # remember old setting , set new one . May raise ValueError , if invalid setting is given .
for k , v in kwargs . items ( ) :
old_settings [ k ] = getattr ( config , k )
setattr ( config , k , v )
yield
finally : # restore old settings
for k , v in old_settings . items ( ) :
setattr ( config , k , v ) |
def update ( self , id , name = None , parent_id = None , order = None ) :
"""更新部门
https : / / work . weixin . qq . com / api / doc # 90000/90135/90206
: param id : 部门 id
: param name : 部门名称 。 长度限制为1 ~ 32个字符 , 字符不能包括 \\ : ? ” < > |
: param parent _ id : 父亲部门id
: param order : 在父部门中的次序值 。 order值大的排序靠前 。 有效的值范围是 [ 0 , 2 ^ 32)
: return : 返回的 JSON 数据包""" | data = optionaldict ( id = id , name = name , parentid = parent_id , order = order )
return self . _post ( 'department/update' , data = data ) |
def _merge_wf_inputs ( new , out , wf_outputs , to_ignore , parallel , nested_inputs ) :
"""Merge inputs for a sub - workflow , adding any not present inputs in out .
Skips inputs that are internally generated or generated and ignored , keeping
only as inputs those that we do not generate internally .""" | internal_generated_ids = [ ]
for vignore in to_ignore :
vignore_id = _get_string_vid ( vignore )
# ignore anything we generate internally , but not those we need to pull in
# from the external process
if vignore_id not in [ v [ "id" ] for v in wf_outputs ] :
internal_generated_ids . append ( vignore_id )
ignore_ids = set ( internal_generated_ids + [ v [ "id" ] for v in wf_outputs ] )
cur_ids = set ( [ v [ "id" ] for v in out ] )
remapped_new = [ ]
for v in new :
remapped_v = copy . deepcopy ( v )
outv = copy . deepcopy ( v )
outv [ "id" ] = get_base_id ( v [ "id" ] )
outv [ "source" ] = v [ "id" ]
if outv [ "id" ] not in cur_ids and outv [ "id" ] not in ignore_ids :
if nested_inputs and v [ "id" ] in nested_inputs :
outv = _flatten_nested_input ( outv )
out . append ( outv )
if remapped_v [ "id" ] in set ( [ v [ "source" ] for v in out ] ) :
remapped_v [ "source" ] = get_base_id ( remapped_v [ "id" ] )
remapped_new . append ( remapped_v )
return out , remapped_new |
def dfs_grid ( grid , i , j , mark = 'X' , free = '.' ) :
"""DFS on a grid , mark connected component , iterative version
: param grid : matrix , 4 - neighborhood
: param i , j : cell in this matrix , start of DFS exploration
: param free : symbol for walkable cells
: param mark : symbol to overwrite visited vertices
: complexity : linear""" | height = len ( grid )
width = len ( grid [ 0 ] )
to_visit = [ ( i , j ) ]
grid [ i ] [ j ] = mark
while to_visit :
i1 , j1 = to_visit . pop ( )
for i2 , j2 in [ ( i1 + 1 , j1 ) , ( i1 , j1 + 1 ) , ( i1 - 1 , j1 ) , ( i1 , j1 - 1 ) ] :
if ( 0 <= i2 < height and 0 <= j2 < width and grid [ i2 ] [ j2 ] == free ) :
grid [ i2 ] [ j2 ] = mark
# mark path
to_visit . append ( ( i2 , j2 ) ) |
def _local_browser_class ( browser_name ) :
"""Returns class , kwargs , and args needed to instantiate the local browser .""" | # Log name of local browser
LOGGER . info ( u"Using local browser: %s [Default is firefox]" , browser_name )
# Get class of local browser based on name
browser_class = BROWSERS . get ( browser_name )
headless = os . environ . get ( 'BOKCHOY_HEADLESS' , 'false' ) . lower ( ) == 'true'
if browser_class is None :
raise BrowserConfigError ( u"Invalid browser name {name}. Options are: {options}" . format ( name = browser_name , options = ", " . join ( list ( BROWSERS . keys ( ) ) ) ) )
else :
if browser_name == 'firefox' : # Remove geckodriver log data from previous test cases
log_path = os . path . join ( os . getcwd ( ) , 'geckodriver.log' )
if os . path . exists ( log_path ) :
os . remove ( log_path )
firefox_options = FirefoxOptions ( )
firefox_options . log . level = 'trace'
if headless :
firefox_options . headless = True
browser_args = [ ]
browser_kwargs = { 'firefox_profile' : _firefox_profile ( ) , 'options' : firefox_options , }
firefox_path = os . environ . get ( 'SELENIUM_FIREFOX_PATH' )
firefox_log = os . environ . get ( 'SELENIUM_FIREFOX_LOG' )
if firefox_path and firefox_log :
browser_kwargs . update ( { 'firefox_binary' : FirefoxBinary ( firefox_path = firefox_path , log_file = firefox_log ) } )
elif firefox_path :
browser_kwargs . update ( { 'firefox_binary' : FirefoxBinary ( firefox_path = firefox_path ) } )
elif firefox_log :
browser_kwargs . update ( { 'firefox_binary' : FirefoxBinary ( log_file = firefox_log ) } )
elif browser_name == 'chrome' :
chrome_options = ChromeOptions ( )
if headless :
chrome_options . headless = True
# Emulate webcam and microphone for testing purposes
chrome_options . add_argument ( '--use-fake-device-for-media-stream' )
# Bypasses the security prompt displayed by the browser when it attempts to
# access a media device ( e . g . , a webcam )
chrome_options . add_argument ( '--use-fake-ui-for-media-stream' )
browser_args = [ ]
browser_kwargs = { 'options' : chrome_options , }
else :
browser_args , browser_kwargs = [ ] , { }
return browser_class , browser_args , browser_kwargs |
def bss_eval_sources ( reference_sources , estimated_sources , compute_permutation = True ) :
"""Ordering and measurement of the separation quality for estimated source
signals in terms of filtered true source , interference and artifacts .
The decomposition allows a time - invariant filter distortion of length
512 , as described in Section III . B of [ # vincent2006performance ] _ .
Passing ` ` False ` ` for ` ` compute _ permutation ` ` will improve the computation
performance of the evaluation ; however , it is not always appropriate and
is not the way that the BSS _ EVAL Matlab toolbox computes bss _ eval _ sources .
Examples
> > > # reference _ sources [ n ] should be an ndarray of samples of the
> > > # n ' th reference source
> > > # estimated _ sources [ n ] should be the same for the n ' th estimated
> > > # source
> > > ( sdr , sir , sar ,
. . . perm ) = mir _ eval . separation . bss _ eval _ sources ( reference _ sources ,
. . . estimated _ sources )
Parameters
reference _ sources : np . ndarray , shape = ( nsrc , nsampl )
matrix containing true sources ( must have same shape as
estimated _ sources )
estimated _ sources : np . ndarray , shape = ( nsrc , nsampl )
matrix containing estimated sources ( must have same shape as
reference _ sources )
compute _ permutation : bool , optional
compute permutation of estimate / source combinations ( True by default )
Returns
sdr : np . ndarray , shape = ( nsrc , )
vector of Signal to Distortion Ratios ( SDR )
sir : np . ndarray , shape = ( nsrc , )
vector of Source to Interference Ratios ( SIR )
sar : np . ndarray , shape = ( nsrc , )
vector of Sources to Artifacts Ratios ( SAR )
perm : np . ndarray , shape = ( nsrc , )
vector containing the best ordering of estimated sources in
the mean SIR sense ( estimated source number ` ` perm [ j ] ` ` corresponds to
true source number ` ` j ` ` ) . Note : ` ` perm ` ` will be ` ` [ 0 , 1 , . . . ,
nsrc - 1 ] ` ` if ` ` compute _ permutation ` ` is ` ` False ` ` .
References
. . [ # ] Emmanuel Vincent , Shoko Araki , Fabian J . Theis , Guido Nolte , Pau
Bofill , Hiroshi Sawada , Alexey Ozerov , B . Vikrham Gowreesunker , Dominik
Lutter and Ngoc Q . K . Duong , " The Signal Separation Evaluation Campaign
(2007-2010 ) : Achievements and remaining challenges " , Signal Processing ,
92 , pp . 1928-1936 , 2012.""" | # make sure the input is of shape ( nsrc , nsampl )
if estimated_sources . ndim == 1 :
estimated_sources = estimated_sources [ np . newaxis , : ]
if reference_sources . ndim == 1 :
reference_sources = reference_sources [ np . newaxis , : ]
validate ( reference_sources , estimated_sources )
# If empty matrices were supplied , return empty lists ( special case )
if reference_sources . size == 0 or estimated_sources . size == 0 :
return np . array ( [ ] ) , np . array ( [ ] ) , np . array ( [ ] ) , np . array ( [ ] )
nsrc = estimated_sources . shape [ 0 ]
# does user desire permutations ?
if compute_permutation : # compute criteria for all possible pair matches
sdr = np . empty ( ( nsrc , nsrc ) )
sir = np . empty ( ( nsrc , nsrc ) )
sar = np . empty ( ( nsrc , nsrc ) )
for jest in range ( nsrc ) :
for jtrue in range ( nsrc ) :
s_true , e_spat , e_interf , e_artif = _bss_decomp_mtifilt ( reference_sources , estimated_sources [ jest ] , jtrue , 512 )
sdr [ jest , jtrue ] , sir [ jest , jtrue ] , sar [ jest , jtrue ] = _bss_source_crit ( s_true , e_spat , e_interf , e_artif )
# select the best ordering
perms = list ( itertools . permutations ( list ( range ( nsrc ) ) ) )
mean_sir = np . empty ( len ( perms ) )
dum = np . arange ( nsrc )
for ( i , perm ) in enumerate ( perms ) :
mean_sir [ i ] = np . mean ( sir [ perm , dum ] )
popt = perms [ np . argmax ( mean_sir ) ]
idx = ( popt , dum )
return ( sdr [ idx ] , sir [ idx ] , sar [ idx ] , np . asarray ( popt ) )
else : # compute criteria for only the simple correspondence
# ( estimate 1 is estimate corresponding to reference source 1 , etc . )
sdr = np . empty ( nsrc )
sir = np . empty ( nsrc )
sar = np . empty ( nsrc )
for j in range ( nsrc ) :
s_true , e_spat , e_interf , e_artif = _bss_decomp_mtifilt ( reference_sources , estimated_sources [ j ] , j , 512 )
sdr [ j ] , sir [ j ] , sar [ j ] = _bss_source_crit ( s_true , e_spat , e_interf , e_artif )
# return the default permutation for compatibility
popt = np . arange ( nsrc )
return ( sdr , sir , sar , popt ) |
def get_ZXY_data_IFFT ( Data , zf , xf , yf , zwidth = 10000 , xwidth = 5000 , ywidth = 5000 , timeStart = None , timeEnd = None , show_fig = True ) :
"""Given a Data object and the frequencies of the z , x and y peaks ( and some
optional parameters for the created filters ) this function extracts the
individual z , x and y signals ( in volts ) by creating IIR filters and filtering
the Data .
Parameters
Data : DataObject
DataObject containing the data for which you want to extract the
z , x and y signals .
zf : float
The frequency of the z peak in the PSD
xf : float
The frequency of the x peak in the PSD
yf : float
The frequency of the y peak in the PSD
zwidth : float , optional
The width of the pass - band of the IIR filter to be generated to
filter Z .
xwidth : float , optional
The width of the pass - band of the IIR filter to be generated to
filter X .
ywidth : float , optional
The width of the pass - band of the IIR filter to be generated to
filter Y .
timeStart : float , optional
Starting time for filtering
timeEnd : float , optional
Ending time for filtering
show _ fig : bool , optional
If True - plot unfiltered and filtered PSD for z , x and y .
If False - don ' t plot anything
Returns
zdata : ndarray
Array containing the z signal in volts with time .
xdata : ndarray
Array containing the x signal in volts with time .
ydata : ndarray
Array containing the y signal in volts with time .
timedata : ndarray
Array containing the time data to go with the z , x , and y signal .""" | if timeStart == None :
timeStart = Data . timeStart
if timeEnd == None :
timeEnd = Data . timeEnd
time = Data . time . get_array ( )
StartIndex = _np . where ( time == take_closest ( time , timeStart ) ) [ 0 ] [ 0 ]
EndIndex = _np . where ( time == take_closest ( time , timeEnd ) ) [ 0 ] [ 0 ]
SAMPLEFREQ = Data . SampleFreq
input_signal = Data . voltage [ StartIndex : EndIndex ]
zdata = IFFT_filter ( input_signal , SAMPLEFREQ , zf - zwidth / 2 , zf + zwidth / 2 )
xdata = IFFT_filter ( input_signal , SAMPLEFREQ , xf - xwidth / 2 , xf + xwidth / 2 )
ydata = IFFT_filter ( input_signal , SAMPLEFREQ , yf - ywidth / 2 , yf + ywidth / 2 )
if show_fig == True :
NPerSegment = len ( Data . time )
if NPerSegment > 1e7 :
NPerSegment = int ( 1e7 )
f , PSD = scipy . signal . welch ( input_signal , SAMPLEFREQ , nperseg = NPerSegment )
f_z , PSD_z = scipy . signal . welch ( zdata , SAMPLEFREQ , nperseg = NPerSegment )
f_y , PSD_y = scipy . signal . welch ( ydata , SAMPLEFREQ , nperseg = NPerSegment )
f_x , PSD_x = scipy . signal . welch ( xdata , SAMPLEFREQ , nperseg = NPerSegment )
_plt . plot ( f , PSD )
_plt . plot ( f_z , PSD_z , label = "z" )
_plt . plot ( f_x , PSD_x , label = "x" )
_plt . plot ( f_y , PSD_y , label = "y" )
_plt . legend ( loc = "best" )
_plt . xlim ( [ zf - zwidth , yf + ywidth ] )
_plt . xlabel ( 'Frequency (Hz)' )
_plt . ylabel ( r'$S_{xx}$ ($V^2/Hz$)' )
_plt . semilogy ( )
_plt . title ( "filepath = %s" % ( Data . filepath ) )
_plt . show ( )
timedata = time [ StartIndex : EndIndex ]
return zdata , xdata , ydata , timedata |
def sync ( self , api_token , sync_token , resource_types = '["all"]' , ** kwargs ) :
"""Update and retrieve Todoist data .
: param api _ token : The user ' s login api _ token .
: type api _ token : str
: param seq _ no : The request sequence number . On initial request pass
` ` 0 ` ` . On all others pass the last seq _ no you received .
: type seq _ no : int
: param seq _ no _ global : The request sequence number . On initial request
pass ` ` 0 ` ` . On all others pass the last seq _ no you received .
: type seq _ no _ global : int
: param resource _ types : Specifies which subset of data you want to
receive e . g . only projects . Defaults to all data .
: type resources _ types : str
: param commands : A list of JSON commands to perform .
: type commands : list ( str )
: return : The HTTP response to the request .
: rtype : : class : ` requests . Response `
> > > from pytodoist . api import TodoistAPI
> > > api = TodoistAPI ( )
> > > response = api . register ( ' john . doe @ gmail . com ' , ' John Doe ' ,
. . . ' password ' )
> > > user _ info = response . json ( )
> > > api _ token = user _ info [ ' api _ token ' ]
> > > response = api . sync ( api _ token , 0 , 0 , ' [ " projects " ] ' )
> > > print ( response . json ( ) )
{ ' seq _ no _ global ' : 3848029654 , ' seq _ no ' : 3848029654 , ' Projects ' : . . . }""" | params = { 'token' : api_token , 'sync_token' : sync_token , }
req_func = self . _post
if 'commands' not in kwargs : # GET if we ' re not changing data .
req_func = self . _get
params [ 'resource_types' ] = resource_types
return req_func ( 'sync' , params , ** kwargs ) |
def create_directory ( directory ) :
"""Create directory but first delete it if it exists""" | if os . path . exists ( directory ) :
rmtree ( directory )
os . makedirs ( directory )
return directory |
def get_user_client ( self , user , password , populate = True ) :
"""Returns a new client for the given user . This is a lightweight
client that only uses different credentials and shares the transport
with the underlying client""" | return XCLIClientForUser ( weakproxy ( self ) , user , password , populate = populate ) |
def default_inasafe_html_resources ( feature , parent ) :
"""Retrieve default InaSAFE HTML resources ( style and script ) .""" | _ = feature , parent
# NOQA
project_context_scope = QgsExpressionContextUtils . projectScope ( )
key = provenance_layer_analysis_impacted [ 'provenance_key' ]
if not project_context_scope . hasVariable ( key ) :
return None
analysis_dir = dirname ( project_context_scope . variable ( key ) )
complete_html_report = get_impact_report_as_string ( analysis_dir )
requested_html_report = get_report_section ( complete_html_report , component_id = 'inasafe-html-resources' , container_wrapper_format = u'{section_content}' )
return requested_html_report |
def inc ( self , exception = None ) : # type : ( Optional [ ParseError . _ _ class _ _ ] ) - > bool
"""Increments the parser if the end of the input has not been reached .
Returns whether or not it was able to advance .""" | try :
self . _idx , self . _current = next ( self . _chars )
return True
except StopIteration :
self . _idx = len ( self )
self . _current = self . EOF
if exception :
raise self . parse_error ( exception )
return False |
def getClassAllSubs ( self , aURI ) :
"""note : requires SPARQL 1.1
2015-06-04 : currenlty not used , inferred from above""" | aURI = aURI
try :
qres = self . rdfgraph . query ( """SELECT DISTINCT ?x
WHERE {
{ ?x rdfs:subClassOf+ <%s> }
FILTER (!isBlank(?x))
}
""" % ( aURI ) )
except :
printDebug ( "... warning: the 'getClassAllSubs' query failed (maybe missing SPARQL 1.1 support?)" )
qres = [ ]
return list ( qres ) |
def _load_instance ( self , instance_id ) :
"""Return instance with the given id .
For performance reasons , the instance ID is first searched for in the
collection of VM instances started by ElastiCluster
( ` self . _ instances ` ) , then in the list of all instances known to the
cloud provider at the time of the last update
( ` self . _ cached _ instances ` ) , and finally the cloud provider is directly
queried .
: param str instance _ id : instance identifier
: return : py : class : ` boto . ec2 . instance . Reservation ` - instance
: raises : ` InstanceError ` is returned if the instance can ' t
be found in the local cache or in the cloud .""" | # if instance is known , return it
if instance_id in self . _instances :
return self . _instances [ instance_id ]
# else , check ( cached ) list from provider
if instance_id not in self . _cached_instances :
self . _cached_instances = self . _build_cached_instances ( )
if instance_id in self . _cached_instances :
inst = self . _cached_instances [ instance_id ]
self . _instances [ instance_id ] = inst
return inst
# If we reached this point , the instance was not found neither
# in the caches nor on the website .
raise InstanceNotFoundError ( "Instance `{instance_id}` not found" . format ( instance_id = instance_id ) ) |
def VerifyStructure ( self , parser_mediator , line ) :
"""Verify that this file is a Mac Wifi log file .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
line ( str ) : line from a text file .
Returns :
bool : True if the line is in the expected format , False if not .""" | self . _last_month = 0
self . _year_use = parser_mediator . GetEstimatedYear ( )
key = 'header'
try :
structure = self . _MAC_WIFI_HEADER . parseString ( line )
except pyparsing . ParseException :
structure = None
if not structure :
key = 'turned_over_header'
try :
structure = self . _MAC_WIFI_TURNED_OVER_HEADER . parseString ( line )
except pyparsing . ParseException :
structure = None
if not structure :
logger . debug ( 'Not a Mac Wifi log file' )
return False
time_elements_tuple = self . _GetTimeElementsTuple ( key , structure )
try :
dfdatetime_time_elements . TimeElementsInMilliseconds ( time_elements_tuple = time_elements_tuple )
except ValueError :
logger . debug ( 'Not a Mac Wifi log file, invalid date and time: {0!s}' . format ( structure . date_time ) )
return False
self . _last_month = time_elements_tuple [ 1 ]
return True |
def from_jd ( jd ) :
'''Calculate Indian Civil date from Julian day
Offset in years from Saka era to Gregorian epoch''' | start = 80
# Day offset between Saka and Gregorian
jd = trunc ( jd ) + 0.5
greg = gregorian . from_jd ( jd )
# Gregorian date for Julian day
leap = isleap ( greg [ 0 ] )
# Is this a leap year ?
# Tentative year in Saka era
year = greg [ 0 ] - SAKA_EPOCH
# JD at start of Gregorian year
greg0 = gregorian . to_jd ( greg [ 0 ] , 1 , 1 )
yday = jd - greg0
# Day number ( 0 based ) in Gregorian year
if leap :
Caitra = 31
# Days in Caitra this year
else :
Caitra = 30
if yday < start : # Day is at the end of the preceding Saka year
year -= 1
yday += Caitra + ( 31 * 5 ) + ( 30 * 3 ) + 10 + start
yday -= start
if yday < Caitra :
month = 1
day = yday + 1
else :
mday = yday - Caitra
if ( mday < ( 31 * 5 ) ) :
month = trunc ( mday / 31 ) + 2
day = ( mday % 31 ) + 1
else :
mday -= 31 * 5
month = trunc ( mday / 30 ) + 7
day = ( mday % 30 ) + 1
return ( year , month , int ( day ) ) |
def cmp_pkgrevno ( package , revno , pkgcache = None ) :
"""Compare supplied revno with the revno of the installed package .
* 1 = > Installed revno is greater than supplied arg
* 0 = > Installed revno is the same as supplied arg
* - 1 = > Installed revno is less than supplied arg
This function imports YumBase function if the pkgcache argument
is None .""" | if not pkgcache :
y = yum . YumBase ( )
packages = y . doPackageLists ( )
pkgcache = { i . Name : i . version for i in packages [ 'installed' ] }
pkg = pkgcache [ package ]
if pkg > revno :
return 1
if pkg < revno :
return - 1
return 0 |
def checked ( response ) :
"""Check a response status before returning it .
: param response : a response from a XMLRPC call to OpenSubtitles .
: return : the response .
: raise : : class : ` OpenSubtitlesError `""" | status_code = int ( response [ 'status' ] [ : 3 ] )
if status_code == 401 :
raise Unauthorized
if status_code == 406 :
raise NoSession
if status_code == 407 :
raise DownloadLimitReached
if status_code == 413 :
raise InvalidImdbid
if status_code == 414 :
raise UnknownUserAgent
if status_code == 415 :
raise DisabledUserAgent
if status_code == 503 :
raise ServiceUnavailable
if status_code != 200 :
raise OpenSubtitlesError ( response [ 'status' ] )
return response |
def _strip_comments ( line ) :
"""Processes line stripping any comments from it
: param line : line to be processed
: type line : str
: return : line with removed comments
: rtype : str""" | if line == '' :
return line
r = re . search ( '(?P<line>[^#]*)(#(?P<comment>.*))?' , line )
if r :
line = r . group ( 'line' )
if not line . endswith ( '\n' ) :
line += '\n'
return line
return '\n' |
def metta_config ( quarter , num_dimensions ) :
"""Returns metta metadata for a quarter ' s SOC code classifier matrix
Args :
quarter ( str ) quarter , in format ' 2015Q1'
num _ dimensions ( int ) Number of features in matrix
Returns : ( dict ) metadata suitable for metta . archive _ train _ test""" | first_day , last_day = quarter_boundaries ( quarter )
return { 'start_time' : first_day , 'end_time' : last_day , 'prediction_window' : 3 , 'label_name' : 'onet_soc_code' , 'label_type' : 'categorical' , 'matrix_id' : 'job_postings_{}' . format ( quarter ) , 'feature_names' : [ 'doc2vec_{}' . format ( i ) for i in range ( num_dimensions ) ] , } |
def doCommit ( self , p : Prepare ) :
"""Create a commit message from the given Prepare message and trigger the
commit phase
: param p : the prepare message""" | key_3pc = ( p . viewNo , p . ppSeqNo )
self . logger . debug ( "{} Sending COMMIT{} at {}" . format ( self , key_3pc , self . get_current_time ( ) ) )
params = [ self . instId , p . viewNo , p . ppSeqNo ]
pre_prepare = self . getPrePrepare ( * key_3pc )
# BLS multi - sig :
if p . stateRootHash is not None :
pre_prepare = self . getPrePrepare ( * key_3pc )
params = self . _bls_bft_replica . update_commit ( params , pre_prepare )
commit = Commit ( * params )
if self . isMaster :
rv = self . execute_hook ( ReplicaHooks . CREATE_CM , commit )
commit = rv if rv is not None else commit
self . send ( commit , TPCStat . CommitSent )
self . addToCommits ( commit , self . name ) |
def reset_password ( app , appbuilder , username , password ) :
"""Resets a user ' s password""" | _appbuilder = import_application ( app , appbuilder )
user = _appbuilder . sm . find_user ( username = username )
if not user :
click . echo ( "User {0} not found." . format ( username ) )
else :
_appbuilder . sm . reset_password ( user . id , password )
click . echo ( click . style ( "User {0} reseted." . format ( username ) , fg = "green" ) ) |
def name ( self ) :
"""Get the module name
: return : Module name
: rtype : str | unicode""" | res = type ( self ) . __name__
if self . _id :
res += ".{}" . format ( self . _id )
return res |
def build_ap_info_pkt ( self , layer_cls , dest ) :
"""Build a packet with info describing the current AP
For beacon / proberesp use""" | return RadioTap ( ) / Dot11 ( addr1 = dest , addr2 = self . mac , addr3 = self . mac ) / layer_cls ( timestamp = 0 , beacon_interval = 100 , cap = 'ESS+privacy' ) / Dot11Elt ( ID = "SSID" , info = self . ssid ) / Dot11EltRates ( rates = [ 130 , 132 , 139 , 150 , 12 , 18 , 24 , 36 ] ) / Dot11Elt ( ID = "DSset" , info = chb ( self . channel ) ) / Dot11EltRSN ( group_cipher_suite = RSNCipherSuite ( cipher = 0x2 ) , pairwise_cipher_suites = [ RSNCipherSuite ( cipher = 0x2 ) ] , akm_suites = [ AKMSuite ( suite = 0x2 ) ] ) |
def add_row ( self , label , row_data , columns = "" ) :
"""Add a row with data .
If any new keys are present in row _ data dictionary ,
that column will be added to the dataframe .
This is done inplace""" | # use provided column order , making sure you don ' t lose any values
# from self . df . columns
if len ( columns ) :
if sorted ( self . df . columns ) == sorted ( columns ) :
self . df . columns = columns
else :
new_columns = [ ]
new_columns . extend ( columns )
for col in self . df . columns :
if col not in new_columns :
new_columns . append ( col )
# makes sure all columns have data or None
if sorted ( row_data . keys ( ) ) != sorted ( self . df . columns ) : # add any new column names
for key in row_data :
if key not in self . df . columns :
self . df [ key ] = None
# add missing column names into row _ data
for col_label in self . df . columns :
if col_label not in list ( row_data . keys ( ) ) :
row_data [ col_label ] = None
# ( make sure you are working with strings )
self . df . index = self . df . index . astype ( str )
label = str ( label )
# create a new row with suffix " new "
# ( this ensures that you get a unique , new row ,
# instead of adding on to an existing row with the same label )
self . df . loc [ label + "new" ] = pd . Series ( row_data )
# rename it to be correct
self . df . rename ( index = { label + "new" : label } , inplace = True )
# use next line to sort index inplace
# self . df . sort _ index ( inplace = True )
return self . df |
def call_and_catch_errors ( self , f , * args , ** kwargs ) :
"""Call the given function with the given arguments . If it succeeds ,
return its return value . If it raises a : class : ` scss . errors . SassError `
and ` live _ errors ` is turned on , return CSS containing a traceback and
error message .""" | try :
return f ( * args , ** kwargs )
except SassError as e :
if self . live_errors : # TODO should this setting also capture and display warnings ?
return e . to_css ( )
else :
raise |
def batch_norm_relu ( inputs , is_training , relu = True ) :
"""Block of batch norm and relu .""" | inputs = mtf . layers . batch_norm ( inputs , is_training , BATCH_NORM_DECAY , epsilon = BATCH_NORM_EPSILON , init_zero = ( not relu ) )
if relu :
inputs = mtf . relu ( inputs )
return inputs |
def open_gif ( self , filename ) :
"""Open a gif file .
Parameters
filename : str
Filename of the gif to open . Filename must end in gif .""" | if filename [ - 3 : ] != 'gif' :
raise Exception ( 'Unsupported filetype. Must end in .gif' )
if isinstance ( vtki . FIGURE_PATH , str ) and not os . path . isabs ( filename ) :
filename = os . path . join ( vtki . FIGURE_PATH , filename )
self . _gif_filename = os . path . abspath ( filename )
self . mwriter = imageio . get_writer ( filename , mode = 'I' ) |
def find_tags ( self ) :
"""Find information about the tags in the repository .
. . note : : The ` ` bzr tags ` ` command reports tags pointing to
non - existing revisions as ` ` ? ` ` but doesn ' t provide revision
ids . We can get the revision ids using the ` ` bzr tags
- - show - ids ` ` command but this command doesn ' t mark tags
pointing to non - existing revisions . We combine the output of
both because we want all the information .""" | valid_tags = [ ]
listing = self . context . capture ( 'bzr' , 'tags' )
for line in listing . splitlines ( ) :
tokens = line . split ( )
if len ( tokens ) == 2 and tokens [ 1 ] != '?' :
valid_tags . append ( tokens [ 0 ] )
listing = self . context . capture ( 'bzr' , 'tags' , '--show-ids' )
for line in listing . splitlines ( ) :
tokens = line . split ( )
if len ( tokens ) == 2 and tokens [ 0 ] in valid_tags :
tag , revision_id = tokens
yield Revision ( repository = self , revision_id = tokens [ 1 ] , tag = tokens [ 0 ] , ) |
def experiments_fmri_create ( self , experiment_id , filename ) :
"""Create functional data object from given file and associate the
object with the specified experiment .
Parameters
experiment _ id : string
Unique experiment identifier
filename : File - type object
Functional data file
Returns
FMRIDataHandle
Handle for created fMRI object or None if identified experiment
is unknown""" | # Get the experiment to ensure that it exist before we even create the
# functional data object
experiment = self . experiments_get ( experiment_id )
if experiment is None :
return None
# Create functional data object from given file
fmri = self . funcdata . create_object ( filename )
# Update experiment to associate it with created fMRI object . Assign
# result to experiment . Should the experiment have been deleted in
# parallel the result will be None
experiment = self . experiments . update_fmri_data ( experiment_id , fmri . identifier )
if experiment is None : # Delete fMRI object ' s data directory
shutil . rmtree ( fmri . directory )
# Delete functional data object from databases
self . funcdata . delete_object ( fmri . identifier , erase = True )
return None
else :
return funcdata . FMRIDataHandle ( fmri , experiment_id ) |
def import_keybase ( useropt ) :
"""Imports a public GPG key from Keybase""" | public_key = None
u_bits = useropt . split ( ':' )
username = u_bits [ 0 ]
if len ( u_bits ) == 1 :
public_key = cryptorito . key_from_keybase ( username )
else :
fingerprint = u_bits [ 1 ]
public_key = cryptorito . key_from_keybase ( username , fingerprint )
if cryptorito . has_gpg_key ( public_key [ 'fingerprint' ] ) :
sys . exit ( 2 )
cryptorito . import_gpg_key ( public_key [ 'bundle' ] . encode ( 'ascii' ) )
sys . exit ( 0 ) |
def flipVertical ( self ) :
"""flips an image object vertically""" | self . flipV = not self . flipV
self . _transmogrophy ( self . angle , self . percent , self . scaleFromCenter , self . flipH , self . flipV ) |
def pressure_integral ( T1 , P1 , dH ) :
r'''Method to compute an integral of the pressure differential of an
elevation difference with a base elevation defined by temperature ` T1 `
and pressure ` P1 ` . This is
similar to subtracting the pressures at two different elevations ,
except it allows for local conditions ( temperature and pressure ) to be
taken into account . This is useful for e . x . evaluating the pressure
difference between the top and bottom of a natural draft cooling tower .
Parameters
T1 : float
Temperature at the lower elevation condition , [ K ]
P1 : float
Pressure at the lower elevation condition , [ Pa ]
dH : float
Elevation difference for which to evaluate the pressure difference ,
Returns
delta _ P : float
Pressure difference between the elevations , [ Pa ]''' | # Compute the elevation to obtain the pressure specified
def to_solve ( H ) :
return ATMOSPHERE_1976 ( H ) . P - P1
H_ref = brenth ( to_solve , - 610.0 , 86000 )
# Compute the temperature delta
dT = T1 - ATMOSPHERE_1976 ( H_ref ) . T
def to_int ( Z ) :
atm = ATMOSPHERE_1976 ( Z , dT = dT )
return atm . g * atm . rho
from scipy . integrate import quad
return float ( quad ( to_int , H_ref , H_ref + dH ) [ 0 ] ) |
def histogram_bin_edges_mincount ( data , min_count , bins ) :
r'''Merge bins with right - neighbour until each bin has a minimum number of data - points .
: arguments :
* * data * * ( ` ` < array _ like > ` ` )
Input data . The histogram is computed over the flattened array .
* * bins * * ( ` ` < array _ like > ` ` | ` ` < int > ` ` )
The bin - edges ( or the number of bins , automatically converted to equal - sized bins ) .
* * min _ count * * ( ` ` < int > ` ` )
The minimum number of data - points per bin .''' | # escape
if min_count is None :
return bins
if min_count is False :
return bins
# check
if type ( min_count ) != int :
raise IOError ( '"min_count" must be an integer number' )
# keep removing where needed
while True :
P , _ = np . histogram ( data , bins = bins , density = False )
idx = np . where ( P < min_count ) [ 0 ]
if len ( idx ) == 0 :
return bins
idx = idx [ 0 ]
if idx + 1 == len ( P ) :
bins = np . hstack ( ( bins [ : ( idx ) ] , bins [ - 1 ] ) )
else :
bins = np . hstack ( ( bins [ : ( idx + 1 ) ] , bins [ ( idx + 2 ) : ] ) ) |
def add_location_timezone_to_device ( self , device_obj , location , timezone ) :
"""Returns ' device object ' with updated location
http : / / docs . exosite . com / portals / # update - device
http : / / docs . exosite . com / portals / # device - object""" | dictify_device_meta ( device_obj )
device_obj [ 'info' ] [ 'description' ] [ 'meta' ] [ 'location' ] = location
device_obj [ 'info' ] [ 'description' ] [ 'meta' ] [ 'Location' ] = location
device_obj [ 'info' ] [ 'description' ] [ 'meta' ] [ 'timezone' ] = timezone
device_obj [ 'info' ] [ 'description' ] [ 'meta' ] [ 'Timezone' ] = timezone
return self . update_device ( device_obj ) |
def Pop ( self ) :
"""Remove the tag from the front of the list and return it .""" | if self . tagList :
tag = self . tagList [ 0 ]
del self . tagList [ 0 ]
else :
tag = None
return tag |
def _authenticate_client ( self , client , secret ) :
"""Returns response of authenticating with the given client and
secret .""" | client_s = str . join ( ':' , [ client , secret ] )
credentials = base64 . b64encode ( client_s . encode ( 'utf-8' ) ) . decode ( 'utf-8' )
headers = { 'Content-Type' : 'application/x-www-form-urlencoded' , 'Cache-Control' : 'no-cache' , 'Authorization' : 'Basic ' + credentials }
params = { 'client_id' : client , 'grant_type' : 'client_credentials' }
uri = self . uri + '/oauth/token'
logging . debug ( "URI=" + str ( uri ) )
logging . debug ( "HEADERS=" + str ( headers ) )
logging . debug ( "BODY=" + str ( params ) )
response = requests . post ( uri , headers = headers , params = params )
if response . status_code == 200 :
logging . debug ( "RESPONSE=" + str ( response . json ( ) ) )
return response . json ( )
else :
logging . warning ( "Failed to authenticate as %s" % ( client ) )
response . raise_for_status ( ) |
def vrrp_priority ( self , ** kwargs ) :
"""Set VRRP priority .
Args :
int _ type ( str ) : Type of interface . ( gigabitethernet ,
tengigabitethernet , etc ) .
name ( str ) : Name of interface . ( 1/0/5 , 1/0/10 , etc ) .
vrid ( str ) : VRRPv3 ID .
priority ( str ) : VRRP Priority .
ip _ version ( str ) : Version of IP ( 4 , 6 ) .
callback ( function ) : A function executed upon completion of the
method . The only parameter passed to ` callback ` will be the
` ` ElementTree ` ` ` config ` .
Returns :
Return value of ` callback ` .
Raises :
KeyError : if ` int _ type ` , ` name ` , ` vrid ` , ` priority ` , or
` ip _ version ` is not passed .
ValueError : if ` int _ type ` , ` name ` , ` vrid ` , ` priority ` , or
` ip _ version ` is invalid .
Examples :
> > > import pynos . device
> > > switches = [ ' 10.24.39.211 ' , ' 10.24.39.203 ' ]
> > > auth = ( ' admin ' , ' password ' )
> > > for switch in switches :
. . . conn = ( switch , ' 22 ' )
. . . with pynos . device . Device ( conn = conn , auth = auth ) as dev :
. . . output = dev . interface . anycast _ mac ( rbridge _ id = ' 225 ' ,
. . . mac = ' aabb . ccdd . eeff ' , delete = True )
. . . output = dev . services . vrrp ( ip _ version = ' 6 ' ,
. . . enabled = True , rbridge _ id = ' 225 ' )
. . . output = dev . services . vrrp ( enabled = True ,
. . . rbridge _ id = ' 225 ' )
. . . output = dev . interface . set _ ip ( ' tengigabitethernet ' ,
. . . ' 225/0/18 ' , ' 10.1.1.2/24 ' )
. . . output = dev . interface . ip _ address ( name = ' 225/0/18 ' ,
. . . int _ type = ' tengigabitethernet ' ,
. . . ip _ addr = ' 2001:4818 : f000:1ab : cafe : beef : 1000:2/64 ' )
. . . dev . interface . vrrp _ vip ( int _ type = ' tengigabitethernet ' ,
. . . name = ' 225/0/18 ' , vrid = ' 1 ' , vip = ' 10.1.1.1/24 ' )
. . . dev . interface . vrrp _ vip ( int _ type = ' tengigabitethernet ' ,
. . . name = ' 225/0/18 ' , vrid = ' 1 ' ,
. . . vip = ' fe80 : : cafe : beef : 1000:1/64 ' )
. . . dev . interface . vrrp _ vip ( int _ type = ' tengigabitethernet ' ,
. . . name = ' 225/0/18 ' , vrid = ' 1 ' ,
. . . vip = ' 2001:4818 : f000:1ab : cafe : beef : 1000:1/64 ' )
. . . dev . interface . vrrp _ priority (
. . . int _ type = ' tengigabitethernet ' ,
. . . name = ' 225/0/18 ' , vrid = ' 1 ' , ip _ version = ' 4 ' ,
. . . priority = ' 66 ' )
. . . dev . interface . vrrp _ priority (
. . . int _ type = ' tengigabitethernet ' ,
. . . name = ' 225/0/18 ' , vrid = ' 1 ' , ip _ version = ' 6 ' ,
. . . priority = ' 77 ' )
. . . output = dev . interface . add _ vlan _ int ( ' 88 ' )
. . . output = dev . interface . ip _ address ( int _ type = ' ve ' ,
. . . name = ' 88 ' , ip _ addr = ' 172.16.10.1/24 ' , rbridge _ id = ' 225 ' )
. . . output = dev . interface . ip _ address ( int _ type = ' ve ' ,
. . . name = ' 88 ' , rbridge _ id = ' 225 ' ,
. . . ip _ addr = ' 2003:4818 : f000:1ab : cafe : beef : 1000:2/64 ' )
. . . dev . interface . vrrp _ vip ( int _ type = ' ve ' , name = ' 88 ' ,
. . . vrid = ' 1 ' , vip = ' 172.16.10.2/24 ' , rbridge _ id = ' 225 ' )
. . . dev . interface . vrrp _ vip ( int _ type = ' ve ' , name = ' 88 ' ,
. . . rbridge _ id = ' 225 ' , vrid = ' 1 ' ,
. . . vip = ' fe80 : : dafe : beef : 1000:1/64 ' )
. . . dev . interface . vrrp _ vip ( int _ type = ' ve ' , rbridge _ id = ' 225 ' ,
. . . name = ' 88 ' , vrid = ' 1 ' ,
. . . vip = ' 2003:4818 : f000:1ab : cafe : beef : 1000:1/64 ' )
. . . dev . interface . vrrp _ priority ( int _ type = ' ve ' , name = ' 88 ' ,
. . . rbridge _ id = ' 225 ' , vrid = ' 1 ' , ip _ version = ' 4 ' ,
. . . priority = ' 66 ' )
. . . dev . interface . vrrp _ priority ( int _ type = ' ve ' , name = ' 88 ' ,
. . . rbridge _ id = ' 225 ' , vrid = ' 1 ' , ip _ version = ' 6 ' ,
. . . priority = ' 77 ' )
. . . output = dev . services . vrrp ( ip _ version = ' 6 ' ,
. . . enabled = False , rbridge _ id = ' 225 ' )
. . . output = dev . services . vrrp ( enabled = False ,
. . . rbridge _ id = ' 225 ' )""" | int_type = kwargs . pop ( 'int_type' ) . lower ( )
name = kwargs . pop ( 'name' )
vrid = kwargs . pop ( 'vrid' )
priority = kwargs . pop ( 'priority' )
ip_version = int ( kwargs . pop ( 'ip_version' ) )
rbridge_id = kwargs . pop ( 'rbridge_id' , '1' )
callback = kwargs . pop ( 'callback' , self . _callback )
valid_int_types = [ 'gigabitethernet' , 'tengigabitethernet' , 'fortygigabitethernet' , 'hundredgigabitethernet' , 'port_channel' , 've' ]
vrrp_args = dict ( name = name , vrid = vrid , priority = priority )
vrrp_priority = None
method_name = None
method_class = self . _interface
if int_type not in valid_int_types :
raise ValueError ( '`int_type` must be one of: %s' % repr ( valid_int_types ) )
if ip_version == 4 :
vrrp_args [ 'version' ] = '3'
method_name = 'interface_%s_vrrp_priority' % int_type
elif ip_version == 6 :
method_name = 'interface_%s_ipv6_vrrpv3_group_priority' % int_type
if int_type == 've' :
method_name = "rbridge_id_%s" % method_name
if ip_version == 6 :
method_name = method_name . replace ( 'group_' , '' )
method_class = self . _rbridge
vrrp_args [ 'rbridge_id' ] = rbridge_id
if not pynos . utilities . valid_vlan_id ( name ) :
raise InvalidVlanId ( "`name` must be between `1` and `8191`" )
elif not pynos . utilities . valid_interface ( int_type , name ) :
raise ValueError ( '`name` must be in the format of x/y/z for ' 'physical interfaces or x for port channel.' )
vrrp_priority = getattr ( method_class , method_name )
config = vrrp_priority ( ** vrrp_args )
return callback ( config ) |
def cast_scalar ( method ) :
"""Cast scalars to constant interpolating objects""" | @ wraps ( method )
def new_method ( self , other ) :
if np . isscalar ( other ) :
other = type ( self ) ( [ other ] , self . domain ( ) )
return method ( self , other )
return new_method |
def single_run_max_confidence_recipe ( sess , model , x , y , nb_classes , eps , clip_min , clip_max , eps_iter , nb_iter , report_path , batch_size = BATCH_SIZE , eps_iter_small = None ) :
"""A reasonable attack bundling recipe for a max norm threat model and
a defender that uses confidence thresholding . This recipe uses both
uniform noise and randomly - initialized PGD targeted attacks .
References :
https : / / openreview . net / forum ? id = H1g0piA9tQ
This version runs each attack ( noise , targeted PGD for each class with
nb _ iter iterations , target PGD for each class with 25X more iterations )
just once and then stops . See ` basic _ max _ confidence _ recipe ` for a version
that runs indefinitely .
: param sess : tf . Session
: param model : cleverhans . model . Model
: param x : numpy array containing clean example inputs to attack
: param y : numpy array containing true labels
: param nb _ classes : int , number of classes
: param eps : float , maximum size of perturbation ( measured by max norm )
: param eps _ iter : float , step size for one version of PGD attacks
( will also run another version with eps _ iter _ small step size )
: param nb _ iter : int , number of iterations for the cheaper PGD attacks
( will also run another version with 25X more iterations )
: param report _ path : str , the path that the report will be saved to .
: param batch _ size : int , the total number of examples to run simultaneously
: param eps _ iter _ small : optional , float .
The second version of the PGD attack is run with 25 * nb _ iter iterations
and eps _ iter _ small step size . If eps _ iter _ small is not specified it is
set to eps _ iter / 25.""" | noise_attack = Noise ( model , sess )
pgd_attack = ProjectedGradientDescent ( model , sess )
threat_params = { "eps" : eps , "clip_min" : clip_min , "clip_max" : clip_max }
noise_attack_config = AttackConfig ( noise_attack , threat_params , "noise" )
attack_configs = [ noise_attack_config ]
pgd_attack_configs = [ ]
pgd_params = copy . copy ( threat_params )
pgd_params [ "eps_iter" ] = eps_iter
pgd_params [ "nb_iter" ] = nb_iter
assert batch_size % num_devices == 0
dev_batch_size = batch_size // num_devices
ones = tf . ones ( dev_batch_size , tf . int32 )
expensive_pgd = [ ]
if eps_iter_small is None :
eps_iter_small = eps_iter / 25.
for cls in range ( nb_classes ) :
cls_params = copy . copy ( pgd_params )
cls_params [ 'y_target' ] = tf . to_float ( tf . one_hot ( ones * cls , nb_classes ) )
cls_attack_config = AttackConfig ( pgd_attack , cls_params , "pgd_" + str ( cls ) )
pgd_attack_configs . append ( cls_attack_config )
expensive_params = copy . copy ( cls_params )
expensive_params [ "eps_iter" ] = eps_iter_small
expensive_params [ "nb_iter" ] *= 25.
expensive_config = AttackConfig ( pgd_attack , expensive_params , "expensive_pgd_" + str ( cls ) )
expensive_pgd . append ( expensive_config )
attack_configs = [ noise_attack_config ] + pgd_attack_configs + expensive_pgd
new_work_goal = { config : 1 for config in attack_configs }
goals = [ MaxConfidence ( t = 1. , new_work_goal = new_work_goal ) ]
bundle_attacks ( sess , model , x , y , attack_configs , goals , report_path , attack_batch_size = batch_size , eval_batch_size = batch_size ) |
def onChangeSelectedBlocksIndent ( self , increase , withSpace = False ) :
"""Tab or Space pressed and few blocks are selected , or Shift + Tab pressed
Insert or remove text from the beginning of blocks""" | def blockIndentation ( block ) :
text = block . text ( )
return text [ : len ( text ) - len ( text . lstrip ( ) ) ]
def cursorAtSpaceEnd ( block ) :
cursor = QTextCursor ( block )
cursor . setPosition ( block . position ( ) + len ( blockIndentation ( block ) ) )
return cursor
def indentBlock ( block ) :
cursor = cursorAtSpaceEnd ( block )
cursor . insertText ( ' ' if withSpace else self . text ( ) )
def spacesCount ( text ) :
return len ( text ) - len ( text . rstrip ( ' ' ) )
def unIndentBlock ( block ) :
currentIndent = blockIndentation ( block )
if currentIndent . endswith ( '\t' ) :
charsToRemove = 1
elif withSpace :
charsToRemove = 1 if currentIndent else 0
else :
if self . useTabs :
charsToRemove = min ( spacesCount ( currentIndent ) , self . width )
else : # spaces
if currentIndent . endswith ( self . text ( ) ) : # remove indent level
charsToRemove = self . width
else : # remove all spaces
charsToRemove = min ( spacesCount ( currentIndent ) , self . width )
if charsToRemove :
cursor = cursorAtSpaceEnd ( block )
cursor . setPosition ( cursor . position ( ) - charsToRemove , QTextCursor . KeepAnchor )
cursor . removeSelectedText ( )
cursor = self . _qpart . textCursor ( )
startBlock = self . _qpart . document ( ) . findBlock ( cursor . selectionStart ( ) )
endBlock = self . _qpart . document ( ) . findBlock ( cursor . selectionEnd ( ) )
if ( cursor . selectionStart ( ) != cursor . selectionEnd ( ) and endBlock . position ( ) == cursor . selectionEnd ( ) and endBlock . previous ( ) . isValid ( ) ) :
endBlock = endBlock . previous ( )
# do not indent not selected line if indenting multiple lines
indentFunc = indentBlock if increase else unIndentBlock
if startBlock != endBlock : # indent multiply lines
stopBlock = endBlock . next ( )
block = startBlock
with self . _qpart :
while block != stopBlock :
indentFunc ( block )
block = block . next ( )
newCursor = QTextCursor ( startBlock )
newCursor . setPosition ( endBlock . position ( ) + len ( endBlock . text ( ) ) , QTextCursor . KeepAnchor )
self . _qpart . setTextCursor ( newCursor )
else : # indent 1 line
indentFunc ( startBlock ) |
def fix ( self , * args , ** kwargs ) :
"""Turns parameters to constants . As arguments , parameters must be strings .
As keyword arguments , they can be set at the same time .
Note this will NOT work when specifying a non - string fit function ,
because there is no flexibility in the number of arguments . To get
around this , suppose you ' ve defined a function stuff ( x , a , b ) . Instead
of sending the stuff object to self . set _ functions ( ) directly , make it
a string function , e . g . :
self . set _ functions ( ' stuff ( x , a , b ) ' , ' a , b ' , stuff = stuff )""" | # first set all the keyword argument values
self . set ( ** kwargs )
# get everything into one big list
pnames = list ( args ) + list ( kwargs . keys ( ) )
# move each pname to the constants
for pname in pnames :
if not pname in self . _pnames :
self . _error ( "Naughty. '" + pname + "' is not a valid fit parameter name." )
else :
n = self . _pnames . index ( pname )
# use the fit result if it exists
if self . results :
value = self . results [ 0 ] [ n ]
# otherwise use the guess value
else :
value = self . _pguess [ n ]
# make the switcheroo
if type ( self . _pnames ) is not list :
self . _pnames = list ( self . _pnames )
if type ( self . _pguess ) is not list :
self . _pguess = list ( self . _pguess )
if type ( self . _cnames ) is not list :
self . _cnames = list ( self . _cnames )
if type ( self . _constants ) is not list :
self . _constants = list ( self . _constants )
self . _pnames . pop ( n )
self . _pguess . pop ( n )
self . _cnames . append ( pname )
self . _constants . append ( value )
# update
self . _update_functions ( )
return self |
def is_mod_function ( mod , fun ) :
"""Checks if a function in a module was declared in that module .
http : / / stackoverflow . com / a / 1107150/3004221
Args :
mod : the module
fun : the function""" | return inspect . isfunction ( fun ) and inspect . getmodule ( fun ) == mod |
def get ( self , thread_uuid , uuid ) :
"""Get one thread member .""" | members = ( v for v in self . list ( thread_uuid ) if v . get ( 'userUuid' ) == uuid )
for i in members :
self . log . debug ( i )
return i
return None |
def ziptake ( items_list , indexes_list ) :
"""SeeAlso :
vt . ziptake""" | return [ take ( list_ , index_list ) for list_ , index_list in zip ( items_list , indexes_list ) ] |
def create_resource ( self , resource_form ) :
"""Creates a new ` ` Resource ` ` .
arg : resource _ form ( osid . resource . ResourceForm ) : the form for
this ` ` Resource ` `
return : ( osid . resource . Resource ) - the new ` ` Resource ` `
raise : IllegalState - ` ` resource _ form ` ` already used in a
create transaction
raise : InvalidArgument - one or more of the form elements is
invalid
raise : NullArgument - ` ` resource _ form ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
raise : Unsupported - ` ` resource _ form ` ` did not originate from
` ` get _ resource _ form _ for _ create ( ) ` `
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for
# osid . resource . ResourceAdminSession . create _ resource _ template
collection = JSONClientValidated ( 'resource' , collection = 'Resource' , runtime = self . _runtime )
if not isinstance ( resource_form , ABCResourceForm ) :
raise errors . InvalidArgument ( 'argument type is not an ResourceForm' )
if resource_form . is_for_update ( ) :
raise errors . InvalidArgument ( 'the ResourceForm is for update only, not create' )
try :
if self . _forms [ resource_form . get_id ( ) . get_identifier ( ) ] == CREATED :
raise errors . IllegalState ( 'resource_form already used in a create transaction' )
except KeyError :
raise errors . Unsupported ( 'resource_form did not originate from this session' )
if not resource_form . is_valid ( ) :
raise errors . InvalidArgument ( 'one or more of the form elements is invalid' )
insert_result = collection . insert_one ( resource_form . _my_map )
self . _forms [ resource_form . get_id ( ) . get_identifier ( ) ] = CREATED
result = objects . Resource ( osid_object_map = collection . find_one ( { '_id' : insert_result . inserted_id } ) , runtime = self . _runtime , proxy = self . _proxy )
return result |
def _inner_convert_old_schema ( self , node , depth ) :
"""Internal recursion helper for L { _ convert _ old _ schema } .
@ param node : A node in the associative list tree as described in
_ convert _ old _ schema . A two tuple of ( name , parameter ) .
@ param depth : The depth that the node is at . This is important to know
if we ' re currently processing a list or a structure . ( " foo . N " is a
list called " foo " , " foo . N . fieldname " describes a field in a list of
structs ) .""" | name , parameter_description = node
if not isinstance ( parameter_description , list ) : # This is a leaf , i . e . , an actual L { Parameter } instance .
return parameter_description
if depth % 2 == 0 : # we ' re processing a structure .
fields = { }
for node in parameter_description :
fields [ node [ 0 ] ] = self . _inner_convert_old_schema ( node , depth + 1 )
return Structure ( name , fields = fields )
else : # we ' re processing a list .
if not isinstance ( parameter_description , list ) :
raise TypeError ( "node %r must be an associative list" % ( parameter_description , ) )
if not len ( parameter_description ) == 1 :
raise ValueError ( "Multiple different index names specified: %r" % ( [ item [ 0 ] for item in parameter_description ] , ) )
subnode = parameter_description [ 0 ]
item = self . _inner_convert_old_schema ( subnode , depth + 1 )
return List ( name = name , item = item , optional = item . optional ) |
def apply_operator_overloading ( ) :
"""Function to apply operator overloading to Publisher class""" | # operator overloading is ( unfortunately ) not working for the following
# cases :
# int , float , str - should return appropriate type instead of a Publisher
# len - should return an integer
# " x in y " - is using _ _ bool _ _ which is not working with Publisher
for method in ( '__lt__' , '__le__' , '__eq__' , '__ne__' , '__ge__' , '__gt__' , '__add__' , '__and__' , '__lshift__' , '__mod__' , '__mul__' , '__pow__' , '__rshift__' , '__sub__' , '__xor__' , '__concat__' , '__getitem__' , '__floordiv__' , '__truediv__' ) :
def _op ( operand_left , operand_right , operation = method ) :
if isinstance ( operand_right , Publisher ) :
return CombineLatest ( operand_left , operand_right , map_ = getattr ( operator , operation ) )
return _MapConstant ( operand_left , operand_right , getattr ( operator , operation ) )
setattr ( Publisher , method , _op )
for method , _method in ( ( '__radd__' , '__add__' ) , ( '__rand__' , '__and__' ) , ( '__rlshift__' , '__lshift__' ) , ( '__rmod__' , '__mod__' ) , ( '__rmul__' , '__mul__' ) , ( '__rpow__' , '__pow__' ) , ( '__rrshift__' , '__rshift__' ) , ( '__rsub__' , '__sub__' ) , ( '__rxor__' , '__xor__' ) , ( '__rfloordiv__' , '__floordiv__' ) , ( '__rtruediv__' , '__truediv__' ) ) :
def _op ( operand_left , operand_right , operation = _method ) :
return _MapConstantReverse ( operand_left , operand_right , getattr ( operator , operation ) )
setattr ( Publisher , method , _op )
for method , _method in ( ( '__neg__' , operator . neg ) , ( '__pos__' , operator . pos ) , ( '__abs__' , operator . abs ) , ( '__invert__' , operator . invert ) , ( '__round__' , round ) , ( '__trunc__' , math . trunc ) , ( '__floor__' , math . floor ) , ( '__ceil__' , math . ceil ) ) :
def _op_unary ( operand , operation = _method ) :
return _MapUnary ( operand , operation )
setattr ( Publisher , method , _op_unary )
def _getattr ( publisher , attribute_name ) :
if not publisher . inherited_type or not hasattr ( publisher . inherited_type , attribute_name ) :
raise AttributeError ( 'Attribute %r not found' % attribute_name )
return _GetAttr ( publisher , attribute_name )
setattr ( Publisher , '__getattr__' , _getattr ) |
def _rmsprop ( self , grads , cache = None , decay_rate = 0.95 ) :
"""Uses RMSProp to compute step from gradients .
Args :
grads : numpy array of gradients .
cache : numpy array of same shape as ` grads ` as RMSProp cache
decay _ rate : How fast to decay cache
Returns :
A tuple of
step : numpy array of the same shape as ` grads ` giving the step .
Note that this does not yet take the learning rate into account .
cache : Updated RMSProp cache .""" | if cache is None :
cache = np . zeros_like ( grads )
cache = decay_rate * cache + ( 1 - decay_rate ) * grads ** 2
step = - grads / np . sqrt ( cache + K . epsilon ( ) )
return step , cache |
def lookup ( self , nick ) :
"""Looks for the most recent paste by a given nick .
Returns the uid or None""" | query = dict ( nick = nick )
order = [ ( 'time' , pymongo . DESCENDING ) ]
recs = self . db . pastes . find ( query ) . sort ( order ) . limit ( 1 )
try :
return next ( recs ) [ 'uid' ]
except StopIteration :
pass |
def require_sequence ( self ) -> None :
"""Require the node to be a sequence .""" | if not isinstance ( self . yaml_node , yaml . SequenceNode ) :
raise RecognitionError ( ( '{}{}A sequence is required here' ) . format ( self . yaml_node . start_mark , os . linesep ) ) |
def _add_modifiers ( self , sql , blueprint , column ) :
"""Add the column modifiers to the deifinition""" | for modifier in self . _modifiers :
method = '_modify_%s' % modifier
if hasattr ( self , method ) :
sql += getattr ( self , method ) ( blueprint , column )
return sql |
def Tethering_bind ( self , port ) :
"""Function path : Tethering . bind
Domain : Tethering
Method name : bind
Parameters :
Required arguments :
' port ' ( type : integer ) - > Port number to bind .
No return value .
Description : Request browser port binding .""" | assert isinstance ( port , ( int , ) ) , "Argument 'port' must be of type '['int']'. Received type: '%s'" % type ( port )
subdom_funcs = self . synchronous_command ( 'Tethering.bind' , port = port )
return subdom_funcs |
def make_token_post ( server , data ) :
"""Try getting an access token from the server . If successful , returns the
JSON response . If unsuccessful , raises an OAuthException .""" | try :
response = requests . post ( server + TOKEN_ENDPOINT , data = data , timeout = TIMEOUT )
body = response . json ( )
except Exception as e :
log . warning ( 'Other error when exchanging code' , exc_info = True )
raise OAuthException ( error = 'Authentication Failed' , error_description = str ( e ) )
if 'error' in body :
log . error ( body )
raise OAuthException ( error = body . get ( 'error' , 'Unknown Error' ) , error_description = body . get ( 'error_description' , '' ) )
return body |
def describe_stream ( self , stream_arn , first_shard = None ) :
"""Wraps : func : ` boto3 . DynamoDBStreams . Client . describe _ stream ` , handling continuation tokens .
: param str stream _ arn : Stream arn , usually from the model ' s ` ` Meta . stream [ " arn " ] ` ` .
: param str first _ shard : * ( Optional ) * If provided , only shards after this shard id will be returned .
: return : All shards in the stream , or a subset if ` ` first _ shard ` ` is provided .
: rtype : dict""" | description = { "Shards" : [ ] }
request = { "StreamArn" : stream_arn , "ExclusiveStartShardId" : first_shard }
# boto3 isn ' t down with literal Nones .
if first_shard is None :
request . pop ( "ExclusiveStartShardId" )
while request . get ( "ExclusiveStartShardId" ) is not missing :
try :
response = self . stream_client . describe_stream ( ** request ) [ "StreamDescription" ]
except botocore . exceptions . ClientError as error :
if error . response [ "Error" ] [ "Code" ] == "ResourceNotFoundException" :
raise InvalidStream ( f"The stream arn {stream_arn!r} does not exist." ) from error
raise BloopException ( "Unexpected error while describing stream." ) from error
# Docs aren ' t clear if the terminal value is null , or won ' t exist .
# Since we don ' t terminate the loop on None , the " or missing " here
# will ensure we stop on a falsey value .
request [ "ExclusiveStartShardId" ] = response . pop ( "LastEvaluatedShardId" , None ) or missing
description [ "Shards" ] . extend ( response . pop ( "Shards" , [ ] ) )
description . update ( response )
return description |
def read_objfile ( fname ) :
"""Takes . obj filename and returns dict of object properties for each object in file .""" | verts = defaultdict ( list )
obj_props = [ ]
with open ( fname ) as f :
lines = f . read ( ) . splitlines ( )
for line in lines :
if line :
split_line = line . strip ( ) . split ( ' ' , 1 )
if len ( split_line ) < 2 :
continue
prefix , value = split_line [ 0 ] , split_line [ 1 ]
if prefix == 'o' :
obj_props . append ( { } )
obj = obj_props [ - 1 ]
obj [ 'f' ] = [ ]
obj [ prefix ] = value
# For files without an ' o ' statement
elif prefix == 'v' and len ( obj_props ) < 1 :
obj_props . append ( { } )
obj = obj_props [ - 1 ]
obj [ 'f' ] = [ ]
obj [ 'o' ] = fname
if obj_props :
if prefix [ 0 ] == 'v' :
verts [ prefix ] . append ( [ float ( val ) for val in value . split ( ' ' ) ] )
elif prefix == 'f' :
obj [ prefix ] . append ( parse_mixed_delim_str ( value ) )
else :
obj [ prefix ] = value
# Reindex vertices to be in face index order , then remove face indices .
verts = { key : np . array ( value ) for key , value in iteritems ( verts ) }
for obj in obj_props :
obj [ 'f' ] = tuple ( np . array ( verts ) if verts [ 0 ] else tuple ( ) for verts in zip ( * obj [ 'f' ] ) )
for idx , vertname in enumerate ( [ 'v' , 'vt' , 'vn' ] ) :
if vertname in verts :
obj [ vertname ] = verts [ vertname ] [ obj [ 'f' ] [ idx ] . flatten ( ) - 1 , : ]
else :
obj [ vertname ] = tuple ( )
del obj [ 'f' ]
geoms = { obj [ 'o' ] : obj for obj in obj_props }
return geoms |
def _get_bcpIn ( self ) :
"""Subclasses may override this method .""" | segment = self . _segment
offCurves = segment . offCurve
if offCurves :
bcp = offCurves [ - 1 ]
x , y = relativeBCPIn ( self . anchor , ( bcp . x , bcp . y ) )
else :
x = y = 0
return ( x , y ) |
def sinatra_path_to_regex ( cls , path ) :
"""Converts a sinatra - style path to a regex with named
parameters .""" | # Return the path if already a ( compiled ) regex
if type ( path ) is cls . regex_type :
return path
# Build a regular expression string which is split on the ' / ' character
regex = [ "(?P<{}>\w+)" . format ( segment [ 1 : ] ) if cls . sinatra_param_regex . match ( segment ) else segment for segment in path . split ( '/' ) ]
return re . compile ( '/' . join ( regex ) ) |
def save_snapshot ( self , context , snapshot_name , save_memory = 'No' ) :
"""Saves virtual machine to a snapshot
: param context : resource context of the vCenterShell
: type context : models . QualiDriverModels . ResourceCommandContext
: param snapshot _ name : snapshot name to save to
: type snapshot _ name : str
: param save _ memory : Snapshot the virtual machine ' s memory . Lookup , Yes / No
: type save _ memory : str
: return :""" | resource_details = self . _parse_remote_model ( context )
created_snapshot_path = self . command_wrapper . execute_command_with_connection ( context , self . snapshot_saver . save_snapshot , resource_details . vm_uuid , snapshot_name , save_memory )
return set_command_result ( created_snapshot_path ) |
def d_hkl ( self , miller_index : Vector3Like ) -> float :
"""Returns the distance between the hkl plane and the origin
Args :
miller _ index ( [ h , k , l ] ) : Miller index of plane
Returns :
d _ hkl ( float )""" | gstar = self . reciprocal_lattice_crystallographic . metric_tensor
hkl = np . array ( miller_index )
return 1 / ( ( dot ( dot ( hkl , gstar ) , hkl . T ) ) ** ( 1 / 2 ) ) |
def _make_symlink ( self , link_name : str , link_target : str ) :
'''Make a symlink on the system .''' | path = self . _file_writer_session . extra_resource_path ( 'dummy' )
if path :
dir_path = os . path . dirname ( path )
symlink_path = os . path . join ( dir_path , link_name )
_logger . debug ( 'symlink {} -> {}' , symlink_path , link_target )
os . symlink ( link_target , symlink_path )
_logger . info ( _ ( 'Created symbolic link {symlink_path} to target {symlink_target}.' ) , symlink_path = symlink_path , symlink_target = link_target ) |
def classify_intersection5 ( s , curve1 , curve2 ) :
"""Image for : func : ` . _ surface _ helpers . classify _ intersection ` docstring .""" | if NO_IMAGES :
return
surface1 = bezier . Surface . from_nodes ( np . asfortranarray ( [ [ 1.0 , 1.5 , 2.0 , 1.25 , 1.75 , 1.5 ] , [ 0.0 , 1.0 , 0.0 , 0.9375 , 0.9375 , 1.875 ] , ] ) )
surface2 = bezier . Surface . from_nodes ( np . asfortranarray ( [ [ 3.0 , 1.5 , 0.0 , 2.25 , 0.75 , 1.5 ] , [ 0.0 , 1.0 , 0.0 , - 2.0 , - 2.0 , - 4.0 ] , ] ) )
figure , ( ax1 , ax2 ) = plt . subplots ( 2 , 1 )
classify_help ( s , curve1 , surface1 , curve2 , surface2 , 0 , ax = ax1 )
classify_help ( s , curve1 , surface1 , curve2 , surface2 , 1 , ax = ax2 )
# Remove the alpha from the color
color1 = ax1 . patches [ 0 ] . get_facecolor ( ) [ : 3 ]
color2 = ax1 . patches [ 1 ] . get_facecolor ( ) [ : 3 ]
# Now add the " degenerate " intersection polygons . The first
# comes from specializing to
# left1(0.5 , 1.0 ) - left2(0.0 , 0.25 ) - right1(0.375 , 0.5)
surface3 = bezier . Surface . from_nodes ( np . asfortranarray ( [ [ 1.5 , 1.75 , 2.0 , 1.6875 , 1.9375 , 1.875 ] , [ 0.5 , 0.5 , 0.0 , 0.5 , 0.234375 , 0.46875 ] , ] ) )
# NOTE : We don ' t require the intersection polygon be valid .
surface3 . plot ( 256 , ax = ax1 )
# The second comes from specializing to
# left1(0.0 , 0.5 ) - right1(0.5 , 0.625 ) - left3(0.75 , 1.0)
surface4 = bezier . Surface . from_nodes ( np . asfortranarray ( [ [ 1.0 , 1.25 , 1.5 , 1.0625 , 1.3125 , 1.125 ] , [ 0.0 , 0.5 , 0.5 , 0.234375 , 0.5 , 0.46875 ] , ] ) )
# NOTE : We don ' t require the intersection polygon be valid .
surface4 . plot ( 256 , ax = ax2 )
( int_x , ) , ( int_y , ) = curve1 . evaluate ( s )
ax1 . plot ( [ int_x ] , [ int_y ] , color = color1 , linestyle = "None" , marker = "o" )
ax2 . plot ( [ int_x ] , [ int_y ] , color = color2 , linestyle = "None" , marker = "o" )
for ax in ( ax1 , ax2 ) :
ax . axis ( "scaled" )
ax . set_xlim ( - 0.0625 , 3.0625 )
ax . set_ylim ( - 0.0625 , 0.5625 )
plt . setp ( ax1 . get_xticklabels ( ) , visible = False )
figure . tight_layout ( h_pad = - 7.0 )
save_image ( figure , "classify_intersection5.png" ) |
def getAnalysisRequestTemplatesInfo ( self ) :
"""Returns a lost of dicts with the analysis request templates infomration
[ { ' uid ' : ' xxxx ' , ' id ' : ' xxxx ' , ' title ' : ' xxx ' , ' url ' : ' xxx ' } , . . . ]""" | arts_list = [ ]
for art in self . context . ar_templates :
pc = getToolByName ( self . context , 'portal_catalog' )
contentFilter = { 'portal_type' : 'ARTemplate' , 'UID' : art }
art_brain = pc ( contentFilter )
if len ( art_brain ) == 1 :
art_obj = art_brain [ 0 ] . getObject ( )
arts_list . append ( { 'uid' : art_obj . UID ( ) , 'id' : art_obj . id , 'title' : art_obj . title , 'url' : art_obj . absolute_url ( ) , } )
return arts_list |
def put ( self , value , priority = 100 ) :
"""Put a task into the queue .
Args :
value ( str ) : Task data .
priority ( int ) : An optional priority as an integer with at most 3 digits .
Lower values signify higher priority .""" | task_name = '{}{:03d}_{}' . format ( self . TASK_PREFIX , priority , self . _counter )
path = posixpath . join ( self . _queue_path , task_name )
self . _client . kv [ path ] = value |
def delete_orderrun ( backend , orderrun_id ) :
"""Delete the orderrun specified by the argument .""" | click . secho ( '%s - Deleting orderrun %s' % ( get_datetime ( ) , orderrun_id ) , fg = 'green' )
check_and_print ( DKCloudCommandRunner . delete_orderrun ( backend . dki , orderrun_id . strip ( ) ) ) |
def setPgConfigOptions ( ** kwargs ) :
"""Sets the PyQtGraph config options and emits a log message""" | for key , value in kwargs . items ( ) :
logger . debug ( "Setting PyQtGraph config option: {} = {}" . format ( key , value ) )
pg . setConfigOptions ( ** kwargs ) |
def finalize_content ( self ) :
"""Finalize the additons""" | self . write_closed = True
body = self . raw_body . decode ( self . encoding )
self . _init_xml ( body )
self . _form_output ( ) |
def is_ancestor_of_book ( self , id_ , book_id ) :
"""Tests if an ` ` Id ` ` is an ancestor of a book .
arg : id ( osid . id . Id ) : an ` ` Id ` `
arg : book _ id ( osid . id . Id ) : the ` ` Id ` ` of a book
return : ( boolean ) - ` ` tru ` ` e if this ` ` id ` ` is an ancestor of
` ` book _ id , ` ` ` ` false ` ` otherwise
raise : NotFound - ` ` book _ id ` ` is not found
raise : NullArgument - ` ` id ` ` or ` ` book _ id ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *
* implementation notes * : If ` ` id ` ` not found return ` ` false ` ` .""" | # Implemented from template for
# osid . resource . BinHierarchySession . is _ ancestor _ of _ bin
if self . _catalog_session is not None :
return self . _catalog_session . is_ancestor_of_catalog ( id_ = id_ , catalog_id = book_id )
return self . _hierarchy_session . is_ancestor ( id_ = id_ , ancestor_id = book_id ) |
def OnGoToCell ( self , event ) :
"""Shift a given cell into view""" | row , col , tab = event . key
try :
self . grid . actions . cursor = row , col , tab
except ValueError :
msg = _ ( "Cell {key} outside grid shape {shape}" ) . format ( key = event . key , shape = self . grid . code_array . shape )
post_command_event ( self . grid . main_window , self . grid . StatusBarMsg , text = msg )
event . Skip ( )
return
self . grid . MakeCellVisible ( row , col )
event . Skip ( ) |
def unmarshal ( self , value , custom_formatters = None , strict = True ) :
"""Unmarshal parameter from the value .""" | if self . deprecated :
warnings . warn ( "The schema is deprecated" , DeprecationWarning )
casted = self . cast ( value , custom_formatters = custom_formatters , strict = strict )
if casted is None and not self . required :
return None
if self . enum and casted not in self . enum :
raise InvalidSchemaValue ( "Value {value} not in enum choices: {type}" , value , self . enum )
return casted |
def get_documentation ( self , element , namespace = None , schema_str = None ) :
"""* * Helper method : * * should return an schema specific documentation
given an element parsing or getting the ` Clark ' s Notation ` _
` { url : schema } Element ` from the message error on validate method .
: param str element : Element string following the Clark ' s Notation
: param dict namespace : Element string following the Clark ' s Notation
: returns : The documentation text if exists
: rtype : unicode
. . _ ` Clark ' s Notation ` : http : / / effbot . org / zone / element - namespaces . htm""" | if namespace is None :
namespace = { 'xs' : 'http://www.w3.org/2001/XMLSchema' }
schema_root = etree . parse ( StringIO ( self . schema ) )
document = schema_root . xpath ( self . get_element_from_clark ( element ) , namespaces = namespace )
return document and document [ 0 ] . text or '' |
def _format_str ( format_int ) :
"""Return the string representation of a given numeric format .""" | for dictionary in _formats , _subtypes , _endians :
for k , v in dictionary . items ( ) :
if v == format_int :
return k
else :
return 'n/a' |
def description ( self , request , tag ) :
"""Render the description of the wrapped L { Parameter } instance .""" | if self . parameter . description is not None :
tag [ self . parameter . description ]
return tag |
def image_save_buffer_fix ( maxblock = 1048576 ) :
"""Contextmanager that change MAXBLOCK in ImageFile .""" | before = ImageFile . MAXBLOCK
ImageFile . MAXBLOCK = maxblock
try :
yield
finally :
ImageFile . MAXBLOCK = before |
def _user_to_rgba ( color , expand = True , clip = False ) :
"""Convert color ( s ) from any set of fmts ( str / hex / arr ) to RGB ( A ) array""" | if color is None :
color = np . zeros ( 4 , np . float32 )
if isinstance ( color , string_types ) :
color = _string_to_rgb ( color )
elif isinstance ( color , ColorArray ) :
color = color . rgba
# We have to treat this specially
elif isinstance ( color , ( list , tuple ) ) :
if any ( isinstance ( c , string_types ) for c in color ) :
color = [ _user_to_rgba ( c , expand = expand , clip = clip ) for c in color ]
if any ( len ( c ) > 1 for c in color ) :
raise RuntimeError ( 'could not parse colors, are they nested?' )
color = [ c [ 0 ] for c in color ]
color = np . atleast_2d ( color ) . astype ( np . float32 )
if color . shape [ 1 ] not in ( 3 , 4 ) :
raise ValueError ( 'color must have three or four elements' )
if expand and color . shape [ 1 ] == 3 : # only expand if requested
color = np . concatenate ( ( color , np . ones ( ( color . shape [ 0 ] , 1 ) ) ) , axis = 1 )
if color . min ( ) < 0 or color . max ( ) > 1 :
if clip :
color = np . clip ( color , 0 , 1 )
else :
raise ValueError ( "Color values must be between 0 and 1 (or use " "clip=True to automatically clip the values)." )
return color |
def query_geonames_country ( self , placename , country ) :
"""Like query _ geonames , but this time limited to a specified country .""" | # first , try for an exact phrase match
q = { "multi_match" : { "query" : placename , "fields" : [ 'name^5' , 'asciiname^5' , 'alternativenames' ] , "type" : "phrase" } }
res = self . conn . filter ( "term" , country_code3 = country ) . query ( q ) [ 0 : 50 ] . execute ( )
# if no results , use some fuzziness , but still require all terms to be present .
# Fuzzy is not allowed in " phrase " searches .
if res . hits . total == 0 : # tried wrapping this in a { " constant _ score " : { " query " : . . . but made it worse
q = { "multi_match" : { "query" : placename , "fields" : [ 'name' , 'asciiname' , 'alternativenames' ] , "fuzziness" : 1 , "operator" : "and" } }
res = self . conn . filter ( "term" , country_code3 = country ) . query ( q ) [ 0 : 50 ] . execute ( )
out = utilities . structure_results ( res )
return out |
def get_pvalue ( self ) :
"""Returns pval for 1st method , if it exists . Else returns uncorrected pval .""" | if self . method_flds :
return getattr ( self , "p_{m}" . format ( m = self . get_method_name ( ) ) )
return getattr ( self , "p_uncorrected" ) |
def p_program_def ( t ) :
"""program _ def : PROGRAM ID LBRACE version _ def version _ def _ list RBRACE EQUALS constant SEMI""" | print ( "Ignoring program {0:s} = {1:s}" . format ( t [ 2 ] , t [ 8 ] ) )
global name_dict
id = t [ 2 ]
value = t [ 8 ]
lineno = t . lineno ( 1 )
if id_unique ( id , 'program' , lineno ) :
name_dict [ id ] = const_info ( id , value , lineno ) |
def format ( self , altitude = None , deg_char = '' , min_char = 'm' , sec_char = 's' ) :
"""Format decimal degrees ( DD ) to degrees minutes seconds ( DMS )""" | latitude = "%s %s" % ( format_degrees ( abs ( self . latitude ) , symbols = { 'deg' : deg_char , 'arcmin' : min_char , 'arcsec' : sec_char } ) , self . latitude >= 0 and 'N' or 'S' )
longitude = "%s %s" % ( format_degrees ( abs ( self . longitude ) , symbols = { 'deg' : deg_char , 'arcmin' : min_char , 'arcsec' : sec_char } ) , self . longitude >= 0 and 'E' or 'W' )
coordinates = [ latitude , longitude ]
if altitude is None :
altitude = bool ( self . altitude )
if altitude :
if not isinstance ( altitude , string_compare ) :
altitude = 'km'
coordinates . append ( self . format_altitude ( altitude ) )
return ", " . join ( coordinates ) |
def calc_tkor_v1 ( self ) :
"""Adjust the given air temperature values .
Required control parameters :
| NHRU |
| KT |
Required input sequence :
| TemL |
Calculated flux sequence :
| TKor |
Basic equation :
: math : ` TKor = KT + TemL `
Example :
> > > from hydpy . models . lland import *
> > > parameterstep ( ' 1d ' )
> > > nhru ( 3)
> > > kt ( - 2.0 , 0.0 , 2.0)
> > > inputs . teml ( 1 . )
> > > model . calc _ tkor _ v1 ( )
> > > fluxes . tkor
tkor ( - 1.0 , 1.0 , 3.0)""" | con = self . parameters . control . fastaccess
inp = self . sequences . inputs . fastaccess
flu = self . sequences . fluxes . fastaccess
for k in range ( con . nhru ) :
flu . tkor [ k ] = con . kt [ k ] + inp . teml |
def cudnnGetConvolution2dForwardOutputDim ( convDesc , inputTensorDesc , wDesc ) :
"""Return the dimensions of the output tensor given a convolution descriptor .
This function returns the dimensions of the resulting 4D tensor of a 2D
convolution , given the convolution descriptor , the input tensor descriptor and
the filter descriptor . This function can help to setup the output tensor and allocate
the proper amount of memory prior to launching the actual convolution .
Parameters
convDesc : cudnnConvolutionDescriptor
Handle to a previously created convolution descriptor .
inputTensorDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor .
wDesc : cudnnFilterDescriptor
Handle to a previously initialized filter descriptor .
Returns
n : int
Number of output images .
c : int
Number of output feature maps per image .
h : int
Height of each output feature map .
w : int
Width of each output feature map .""" | n = ctypes . c_int ( )
c = ctypes . c_int ( )
h = ctypes . c_int ( )
w = ctypes . c_int ( )
status = _libcudnn . cudnnGetConvolution2dForwardOutputDim ( convDesc , inputTensorDesc , wDesc , ctypes . byref ( n ) , ctypes . byref ( c ) , ctypes . byref ( h ) , ctypes . byref ( w ) )
cudnnCheckStatus ( status )
return n . value , c . value , h . value , w . value |
def describe_features ( self , traj ) :
"""Return a list of dictionaries describing the atom pair features .
Parameters
traj : mdtraj . Trajectory
The trajectory to describe
Returns
feature _ descs : list of dict
Dictionary describing each feature with the following information
about the atoms participating in each dihedral
- resnames : unique names of residues
- atominds : the two atom inds
- resseqs : unique residue sequence ids ( not necessarily
0 - indexed )
- resids : unique residue ids ( 0 - indexed )
- featurizer : AtomPairsFeaturizer
- featuregroup : Distance .
- other info : Value of the exponent""" | feature_descs = [ ]
top = traj . topology
residue_indices = [ [ top . atom ( i [ 0 ] ) . residue . index , top . atom ( i [ 1 ] ) . residue . index ] for i in self . atom_indices ]
aind = [ ]
resseqs = [ ]
resnames = [ ]
for ind , resid_ids in enumerate ( residue_indices ) :
aind += [ [ i for i in self . atom_indices [ ind ] ] ]
resseqs += [ [ top . residue ( ri ) . resSeq for ri in resid_ids ] ]
resnames += [ [ top . residue ( ri ) . name for ri in resid_ids ] ]
zippy = itertools . product ( [ "AtomPairs" ] , [ "Distance" ] , [ "Exponent {}" . format ( self . exponent ) ] , zip ( aind , resseqs , residue_indices , resnames ) )
feature_descs . extend ( dict_maker ( zippy ) )
return feature_descs |
def samples_to_records ( samples , default_keys = None ) :
"""Convert samples into output CWL records .""" | from bcbio . pipeline import run_info
RECORD_CONVERT_TO_LIST = set ( [ "config__algorithm__tools_on" , "config__algorithm__tools_off" , "reference__genome_context" ] )
all_keys = _get_all_cwlkeys ( samples , default_keys )
out = [ ]
for data in samples :
for raw_key in sorted ( list ( all_keys ) ) :
key = raw_key . split ( "__" )
if tz . get_in ( key , data ) is None :
data = tz . update_in ( data , key , lambda x : None )
if raw_key not in data [ "cwl_keys" ] :
data [ "cwl_keys" ] . append ( raw_key )
if raw_key in RECORD_CONVERT_TO_LIST :
val = tz . get_in ( key , data )
if not val :
val = [ ]
elif not isinstance ( val , ( list , tuple ) ) :
val = [ val ]
data = tz . update_in ( data , key , lambda x : val )
# Booleans are problematic for CWL serialization , convert into string representation
if isinstance ( tz . get_in ( key , data ) , bool ) :
data = tz . update_in ( data , key , lambda x : str ( tz . get_in ( key , data ) ) )
data [ "metadata" ] = run_info . add_metadata_defaults ( data . get ( "metadata" , { } ) )
out . append ( data )
return out |
def build ( documentPath , outputUFOFormatVersion = 3 , roundGeometry = True , verbose = True , # not supported
logPath = None , # not supported
progressFunc = None , # not supported
processRules = True , logger = None , useVarlib = False , ) :
"""Simple builder for UFO designspaces .""" | import os , glob
if os . path . isdir ( documentPath ) : # process all * . designspace documents in this folder
todo = glob . glob ( os . path . join ( documentPath , "*.designspace" ) )
else : # process the
todo = [ documentPath ]
results = [ ]
for path in todo :
document = DesignSpaceProcessor ( ufoVersion = outputUFOFormatVersion )
document . useVarlib = useVarlib
document . roundGeometry = roundGeometry
document . read ( path )
try :
r = document . generateUFO ( processRules = processRules )
results . append ( r )
except :
if logger :
logger . exception ( "ufoProcessor error" )
# results + = document . generateUFO ( processRules = processRules )
reader = None
return results |
def read ( self , timeout = 20.0 ) :
"""read data on the IN endpoint associated to the HID interface""" | start = time ( )
while len ( self . rcv_data ) == 0 :
sleep ( 0 )
if time ( ) - start > timeout : # Read operations should typically take ~ 1-2ms .
# If this exception occurs , then it could indicate
# a problem in one of the following areas :
# 1 . Bad usb driver causing either a dropped read or write
# 2 . CMSIS - DAP firmware problem cause a dropped read or write
# 3 . CMSIS - DAP is performing a long operation or is being
# halted in a debugger
raise DAPAccessIntf . DeviceError ( "Read timed out" )
return self . rcv_data . popleft ( ) |
def _utc_year ( self ) :
"""Return a fractional UTC year , for convenience when plotting .
An experiment , probably superseded by the ` ` J ` ` attribute below .""" | d = self . _utc_float ( ) - 1721059.5
# d + = offset
C = 365 * 100 + 24
d -= 365
d += d // C - d // ( 4 * C )
d += 365
# Y = d / C * 100
# print ( Y )
K = 365 * 3 + 366
d -= ( d + K * 7 // 8 ) // K
# d - = d / / 1461.0
return d / 365.0 |
def migrate_database ( adapter ) :
"""Migrate an old loqusdb instance to 1.0
Args :
adapter
Returns :
nr _ updated ( int ) : Number of variants that where updated""" | all_variants = adapter . get_variants ( )
nr_variants = all_variants . count ( )
nr_updated = 0
with progressbar ( all_variants , label = "Updating variants" , length = nr_variants ) as bar :
for variant in bar : # Do not update if the variants have the correct format
if 'chrom' in variant :
continue
nr_updated += 1
splitted_id = variant [ '_id' ] . split ( '_' )
chrom = splitted_id [ 0 ]
start = int ( splitted_id [ 1 ] )
ref = splitted_id [ 2 ]
alt = splitted_id [ 3 ]
# Calculate end
end = start + ( max ( len ( ref ) , len ( alt ) ) - 1 )
adapter . db . variant . find_one_and_update ( { '_id' : variant [ '_id' ] } , { '$set' : { 'chrom' : chrom , 'start' : start , 'end' : end } } )
return nr_updated |
def _stripe_object_to_refunds ( cls , target_cls , data , charge ) :
"""Retrieves Refunds for a charge
: param target _ cls : The target class to instantiate per invoice item .
: type target _ cls : ` ` Refund ` `
: param data : The data dictionary received from the Stripe API .
: type data : dict
: param charge : The charge object that refunds are for .
: type invoice : ` ` djstripe . models . Refund ` `
: return :""" | refunds = data . get ( "refunds" )
if not refunds :
return [ ]
refund_objs = [ ]
for refund_data in refunds . get ( "data" , [ ] ) :
item , _ = target_cls . _get_or_create_from_stripe_object ( refund_data , refetch = False )
refund_objs . append ( item )
return refund_objs |
async def emit_event ( self , event ) :
"""Publish an event
: param event : Event object""" | self . log . info ( "publishing event on %s" , self . publish_topic )
if self . config . extra [ 'config' ] [ 'pub_options' ] [ 'retain' ] :
try :
await persist_event ( self . publish_topic , event , self . pool )
except SystemError as error :
self . log . error ( error )
return
loop = asyncio . get_event_loop ( )
producer = AIOKafkaProducer ( loop = loop , bootstrap_servers = self . transport_host )
await producer . start ( )
try :
event = json . dumps ( event . __dict__ ) . encode ( )
await producer . send_and_wait ( self . publish_topic , event )
finally :
await producer . stop ( ) |
def from_line ( cls , line ) :
""": return : New RefLogEntry instance from the given revlog line .
: param line : line bytes without trailing newline
: raise ValueError : If line could not be parsed""" | line = line . decode ( defenc )
fields = line . split ( '\t' , 1 )
if len ( fields ) == 1 :
info , msg = fields [ 0 ] , None
elif len ( fields ) == 2 :
info , msg = fields
else :
raise ValueError ( "Line must have up to two TAB-separated fields." " Got %s" % repr ( line ) )
# END handle first split
oldhexsha = info [ : 40 ]
newhexsha = info [ 41 : 81 ]
for hexsha in ( oldhexsha , newhexsha ) :
if not cls . _re_hexsha_only . match ( hexsha ) :
raise ValueError ( "Invalid hexsha: %r" % ( hexsha , ) )
# END if hexsha re doesn ' t match
# END for each hexsha
email_end = info . find ( '>' , 82 )
if email_end == - 1 :
raise ValueError ( "Missing token: >" )
# END handle missing end brace
actor = Actor . _from_string ( info [ 82 : email_end + 1 ] )
time , tz_offset = parse_date ( info [ email_end + 2 : ] )
return RefLogEntry ( ( oldhexsha , newhexsha , actor , ( time , tz_offset ) , msg ) ) |
def custom_function ( func , input_files , output_file ) :
"""Calls a custom function which must create the output file .
The custom function takes 3 parameters : ` ` input _ files ` ` ,
` ` output _ file ` ` and a boolean ` ` release ` ` .""" | from . modules import utils
return { 'dependencies_fn' : utils . no_dependencies , 'compiler_fn' : func , 'input' : input_files , 'output' : output_file , 'kwargs' : { } , } |
def register ( self , entity ) :
"""Registers a new entity and returns the entity object with an ID""" | response = self . api . post_entity ( entity . serialize )
print ( response )
print ( )
if response [ 'status' ] [ 'code' ] == 200 :
entity . id = response [ 'id' ]
if response [ 'status' ] [ 'code' ] == 409 : # entity already exists
entity . id = next ( i . id for i in self . api . agent_entities if i . name == entity . name )
self . update ( entity )
return entity |
def makeairplantloop ( data , commdct ) :
"""make the edges for the airloop and the plantloop""" | anode = "epnode"
endnode = "EndNode"
# in plantloop get :
# demand inlet , outlet , branchlist
# supply inlet , outlet , branchlist
plantloops = loops . plantloopfields ( data , commdct )
# splitters
# inlet
# outlet1
# outlet2
splitters = loops . splitterfields ( data , commdct )
# mixer
# outlet
# inlet1
# inlet2
mixers = loops . mixerfields ( data , commdct )
# supply barnchlist
# branch1 - > inlet , outlet
# branch2 - > inlet , outlet
# branch3 - > inlet , outlet
# CONNET INLET OUTLETS
edges = [ ]
# get all branches
branchkey = "branch" . upper ( )
branches = data . dt [ branchkey ]
branch_i_o = { }
for br in branches :
br_name = br [ 1 ]
in_out = loops . branch_inlet_outlet ( data , commdct , br_name )
branch_i_o [ br_name ] = dict ( list ( zip ( [ "inlet" , "outlet" ] , in_out ) ) )
# for br _ name , in _ out in branch _ i _ o . items ( ) :
# edges . append ( ( ( in _ out [ " inlet " ] , anode ) , br _ name ) )
# edges . append ( ( br _ name , ( in _ out [ " outlet " ] , anode ) ) )
# instead of doing the branch
# do the content of the branch
edges = makebranchcomponents ( data , commdct )
# connect splitter to nodes
for splitter in splitters : # splitter _ inlet = inletbranch . node
splittername = splitter [ 0 ]
inletbranchname = splitter [ 1 ]
splitter_inlet = branch_i_o [ inletbranchname ] [ "outlet" ]
# edges = splitter _ inlet - > splittername
edges . append ( ( ( splitter_inlet , anode ) , splittername ) )
# splitter _ outlets = ouletbranches . nodes
outletbranchnames = [ br for br in splitter [ 2 : ] ]
splitter_outlets = [ branch_i_o [ br ] [ "inlet" ] for br in outletbranchnames ]
# edges = [ splittername - > outlet for outlet in splitter _ outlets ]
moreedges = [ ( splittername , ( outlet , anode ) ) for outlet in splitter_outlets ]
edges = edges + moreedges
for mixer in mixers : # mixer _ outlet = outletbranch . node
mixername = mixer [ 0 ]
outletbranchname = mixer [ 1 ]
mixer_outlet = branch_i_o [ outletbranchname ] [ "inlet" ]
# edges = mixername - > mixer _ outlet
edges . append ( ( mixername , ( mixer_outlet , anode ) ) )
# mixer _ inlets = inletbranches . nodes
inletbranchnames = [ br for br in mixer [ 2 : ] ]
mixer_inlets = [ branch_i_o [ br ] [ "outlet" ] for br in inletbranchnames ]
# edges = [ mixername - > inlet for inlet in mixer _ inlets ]
moreedges = [ ( ( inlet , anode ) , mixername ) for inlet in mixer_inlets ]
edges = edges + moreedges
# connect demand and supply side
# for plantloop in plantloops :
# supplyinlet = plantloop [ 1]
# supplyoutlet = plantloop [ 2]
# demandinlet = plantloop [ 4]
# demandoutlet = plantloop [ 5]
# # edges = [ supplyoutlet - > demandinlet , demandoutlet - > supplyinlet ]
# moreedges = [ ( ( supplyoutlet , endnode ) , ( demandinlet , endnode ) ) ,
# ( ( demandoutlet , endnode ) , ( supplyinlet , endnode ) ) ]
# edges = edges + moreedges
# - - - - - air loop stuff - - - - -
# from s _ airloop2 . py
# Get the demand and supply nodes from ' airloophvac '
# in airloophvac get :
# get branch , supplyinlet , supplyoutlet , demandinlet , demandoutlet
objkey = "airloophvac" . upper ( )
fieldlists = [ [ "Branch List Name" , "Supply Side Inlet Node Name" , "Demand Side Outlet Node Name" , "Demand Side Inlet Node Names" , "Supply Side Outlet Node Names" ] ] * loops . objectcount ( data , objkey )
airloophvacs = loops . extractfields ( data , commdct , objkey , fieldlists )
# airloophvac = airloophvacs [ 0]
# in AirLoopHVAC : ZoneSplitter :
# get Name , inlet , all outlets
objkey = "AirLoopHVAC:ZoneSplitter" . upper ( )
singlefields = [ "Name" , "Inlet Node Name" ]
fld = "Outlet %s Node Name"
repeatfields = loops . repeatingfields ( data , commdct , objkey , fld )
fieldlist = singlefields + repeatfields
fieldlists = [ fieldlist ] * loops . objectcount ( data , objkey )
zonesplitters = loops . extractfields ( data , commdct , objkey , fieldlists )
# in AirLoopHVAC : SupplyPlenum :
# get Name , Zone Name , Zone Node Name , inlet , all outlets
objkey = "AirLoopHVAC:SupplyPlenum" . upper ( )
singlefields = [ "Name" , "Zone Name" , "Zone Node Name" , "Inlet Node Name" ]
fld = "Outlet %s Node Name"
repeatfields = loops . repeatingfields ( data , commdct , objkey , fld )
fieldlist = singlefields + repeatfields
fieldlists = [ fieldlist ] * loops . objectcount ( data , objkey )
supplyplenums = loops . extractfields ( data , commdct , objkey , fieldlists )
# in AirLoopHVAC : ZoneMixer :
# get Name , outlet , all inlets
objkey = "AirLoopHVAC:ZoneMixer" . upper ( )
singlefields = [ "Name" , "Outlet Node Name" ]
fld = "Inlet %s Node Name"
repeatfields = loops . repeatingfields ( data , commdct , objkey , fld )
fieldlist = singlefields + repeatfields
fieldlists = [ fieldlist ] * loops . objectcount ( data , objkey )
zonemixers = loops . extractfields ( data , commdct , objkey , fieldlists )
# in AirLoopHVAC : ReturnPlenum :
# get Name , Zone Name , Zone Node Name , outlet , all inlets
objkey = "AirLoopHVAC:ReturnPlenum" . upper ( )
singlefields = [ "Name" , "Zone Name" , "Zone Node Name" , "Outlet Node Name" ]
fld = "Inlet %s Node Name"
repeatfields = loops . repeatingfields ( data , commdct , objkey , fld )
fieldlist = singlefields + repeatfields
fieldlists = [ fieldlist ] * loops . objectcount ( data , objkey )
returnplenums = loops . extractfields ( data , commdct , objkey , fieldlists )
# connect room to each equip in equiplist
# in ZoneHVAC : EquipmentConnections :
# get Name , equiplist , zoneairnode , returnnode
objkey = "ZoneHVAC:EquipmentConnections" . upper ( )
singlefields = [ "Zone Name" , "Zone Conditioning Equipment List Name" , "Zone Air Node Name" , "Zone Return Air Node Name" ]
repeatfields = [ ]
fieldlist = singlefields + repeatfields
fieldlists = [ fieldlist ] * loops . objectcount ( data , objkey )
equipconnections = loops . extractfields ( data , commdct , objkey , fieldlists )
# in ZoneHVAC : EquipmentList :
# get Name , all equiptype , all equipnames
objkey = "ZoneHVAC:EquipmentList" . upper ( )
singlefields = [ "Name" , ]
fieldlist = singlefields
flds = [ "Zone Equipment %s Object Type" , "Zone Equipment %s Name" ]
repeatfields = loops . repeatingfields ( data , commdct , objkey , flds )
fieldlist = fieldlist + repeatfields
fieldlists = [ fieldlist ] * loops . objectcount ( data , objkey )
equiplists = loops . extractfields ( data , commdct , objkey , fieldlists )
equiplistdct = dict ( [ ( ep [ 0 ] , ep [ 1 : ] ) for ep in equiplists ] )
for key , equips in list ( equiplistdct . items ( ) ) :
enames = [ equips [ i ] for i in range ( 1 , len ( equips ) , 2 ) ]
equiplistdct [ key ] = enames
# adistuunit - > room
# adistuunit < - VAVreheat
# airinlet - > VAVreheat
# in ZoneHVAC : AirDistributionUnit :
# get Name , equiplist , zoneairnode , returnnode
objkey = "ZoneHVAC:AirDistributionUnit" . upper ( )
singlefields = [ "Name" , "Air Terminal Object Type" , "Air Terminal Name" ]
repeatfields = [ ]
fieldlist = singlefields + repeatfields
fieldlists = [ fieldlist ] * loops . objectcount ( data , objkey )
adistuunits = loops . extractfields ( data , commdct , objkey , fieldlists )
# code only for AirTerminal : SingleDuct : VAV : Reheat
# get airinletnodes for vavreheats
# in AirTerminal : SingleDuct : VAV : Reheat :
# get Name , airinletnode
adistuinlets = loops . makeadistu_inlets ( data , commdct )
alladistu_comps = [ ]
for key in list ( adistuinlets . keys ( ) ) :
objkey = key . upper ( )
singlefields = [ "Name" ] + adistuinlets [ key ]
repeatfields = [ ]
fieldlist = singlefields + repeatfields
fieldlists = [ fieldlist ] * loops . objectcount ( data , objkey )
adistu_components = loops . extractfields ( data , commdct , objkey , fieldlists )
alladistu_comps . append ( adistu_components )
# in AirTerminal : SingleDuct : Uncontrolled :
# get Name , airinletnode
objkey = "AirTerminal:SingleDuct:Uncontrolled" . upper ( )
singlefields = [ "Name" , "Zone Supply Air Node Name" ]
repeatfields = [ ]
fieldlist = singlefields + repeatfields
fieldlists = [ fieldlist ] * loops . objectcount ( data , objkey )
uncontrolleds = loops . extractfields ( data , commdct , objkey , fieldlists )
anode = "epnode"
endnode = "EndNode"
# edges = [ ]
# connect demand and supply side
# for airloophvac in airloophvacs :
# supplyinlet = airloophvac [ 1]
# supplyoutlet = airloophvac [ 4]
# demandinlet = airloophvac [ 3]
# demandoutlet = airloophvac [ 2]
# # edges = [ supplyoutlet - > demandinlet , demandoutlet - > supplyinlet ]
# moreedges = [ ( ( supplyoutlet , endnode ) , ( demandinlet , endnode ) ) ,
# ( ( demandoutlet , endnode ) , ( supplyinlet , endnode ) ) ]
# edges = edges + moreedges
# connect zonesplitter to nodes
for zonesplitter in zonesplitters :
name = zonesplitter [ 0 ]
inlet = zonesplitter [ 1 ]
outlets = zonesplitter [ 2 : ]
edges . append ( ( ( inlet , anode ) , name ) )
for outlet in outlets :
edges . append ( ( name , ( outlet , anode ) ) )
# connect supplyplenum to nodes
for supplyplenum in supplyplenums :
name = supplyplenum [ 0 ]
inlet = supplyplenum [ 3 ]
outlets = supplyplenum [ 4 : ]
edges . append ( ( ( inlet , anode ) , name ) )
for outlet in outlets :
edges . append ( ( name , ( outlet , anode ) ) )
# connect zonemixer to nodes
for zonemixer in zonemixers :
name = zonemixer [ 0 ]
outlet = zonemixer [ 1 ]
inlets = zonemixer [ 2 : ]
edges . append ( ( name , ( outlet , anode ) ) )
for inlet in inlets :
edges . append ( ( ( inlet , anode ) , name ) )
# connect returnplenums to nodes
for returnplenum in returnplenums :
name = returnplenum [ 0 ]
outlet = returnplenum [ 3 ]
inlets = returnplenum [ 4 : ]
edges . append ( ( name , ( outlet , anode ) ) )
for inlet in inlets :
edges . append ( ( ( inlet , anode ) , name ) )
# connect room to return node
for equipconnection in equipconnections :
zonename = equipconnection [ 0 ]
returnnode = equipconnection [ - 1 ]
edges . append ( ( zonename , ( returnnode , anode ) ) )
# connect equips to room
for equipconnection in equipconnections :
zonename = equipconnection [ 0 ]
zequiplistname = equipconnection [ 1 ]
for zequip in equiplistdct [ zequiplistname ] :
edges . append ( ( zequip , zonename ) )
# adistuunit < - adistu _ component
for adistuunit in adistuunits :
unitname = adistuunit [ 0 ]
compname = adistuunit [ 2 ]
edges . append ( ( compname , unitname ) )
# airinlet - > adistu _ component
for adistu_comps in alladistu_comps :
for adistu_comp in adistu_comps :
name = adistu_comp [ 0 ]
for airnode in adistu_comp [ 1 : ] :
edges . append ( ( ( airnode , anode ) , name ) )
# supplyairnode - > uncontrolled
for uncontrolled in uncontrolleds :
name = uncontrolled [ 0 ]
airnode = uncontrolled [ 1 ]
edges . append ( ( ( airnode , anode ) , name ) )
# edges = edges + moreedges
return edges |
def _find_base_tds_url ( catalog_url ) :
"""Identify the base URL of the THREDDS server from the catalog URL .
Will retain URL scheme , host , port and username / password when present .""" | url_components = urlparse ( catalog_url )
if url_components . path :
return catalog_url . split ( url_components . path ) [ 0 ]
else :
return catalog_url |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.