signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def get_cql_models ( app , connection = None , keyspace = None ) :
""": param app : django models module
: param connection : connection name
: param keyspace : keyspace
: return : list of all cassandra . cqlengine . Model within app that should be
synced to keyspace ."""
|
from . models import DjangoCassandraModel
models = [ ]
single_cassandra_connection = len ( list ( get_cassandra_connections ( ) ) ) == 1
is_default_connection = connection == DEFAULT_DB_ALIAS or single_cassandra_connection
for name , obj in inspect . getmembers ( app ) :
cql_model_types = ( cqlengine . models . Model , DjangoCassandraModel )
if ( inspect . isclass ( obj ) and issubclass ( obj , cql_model_types ) and not obj . __abstract__ ) :
if obj . __connection__ == connection or ( obj . __connection__ is None and is_default_connection ) or obj . __connection__ is None and obj . __keyspace__ is not None and obj . __keyspace__ == keyspace :
models . append ( obj )
return models
|
def tasks ( ) :
'''Display registered tasks with their queue'''
|
tasks = get_tasks ( )
longest = max ( tasks . keys ( ) , key = len )
size = len ( longest )
for name , queue in sorted ( tasks . items ( ) ) :
print ( '* {0}: {1}' . format ( name . ljust ( size ) , queue ) )
|
def deconstruct ( self ) :
"""Deconstruct operation ."""
|
return ( self . __class__ . __name__ , [ ] , { 'process' : self . process , 'field' : self . _raw_field , 'new_field' : self . new_field , } )
|
def merge ( self , session , checksums , title ) :
'''Merges calcs into a new calc called DATASET
NB : this is the PUBLIC method
@ returns DATASET , error'''
|
calc = Output ( calcset = checksums )
cur_depth = 0
for nested_depth , grid_item , download_size in session . query ( model . Calculation . nested_depth , model . Grid . info , model . Metadata . download_size ) . filter ( model . Calculation . checksum == model . Grid . checksum , model . Grid . checksum == model . Metadata . checksum , model . Calculation . checksum . in_ ( checksums ) ) . all ( ) :
if nested_depth > cur_depth :
cur_depth = nested_depth
grid_item = json . loads ( grid_item )
for entity in self . hierarchy :
topic = grid_item . get ( entity [ 'source' ] )
if not topic :
continue
if not isinstance ( topic , list ) :
topic = [ topic ]
calc . info [ entity [ 'source' ] ] = list ( set ( calc . info . get ( entity [ 'source' ] , [ ] ) + topic ) )
calc . download_size += download_size
if not calc . download_size :
return None , 'Wrong parameters provided!'
calc . _nested_depth = cur_depth + 1
calc . info [ 'standard' ] = title
# generate fake checksum
calc . _checksum = calc . get_collective_checksum ( )
return calc , None
|
def post_map ( self , url , map , auth_map = None ) :
"""Gera um XML a partir dos dados do dicionário e o envia através de uma requisição POST .
: param url : URL para enviar a requisição HTTP .
: param map : Dicionário com os dados do corpo da requisição HTTP .
: param auth _ map : Dicionário com as informações para autenticação na networkAPI .
: return : Retorna uma tupla contendo :
( < código de resposta http > , < corpo da resposta > ) .
: raise ConnectionError : Falha na conexão com a networkAPI .
: raise RestError : Falha no acesso à networkAPI ."""
|
xml = dumps_networkapi ( map )
response_code , content = self . post ( url , xml , 'text/plain' , auth_map )
return response_code , content
|
def generate_scan_configuration_description ( scan_parameters ) :
'''Generate scan parameter dictionary . This is the only way to dynamically create table with dictionary , cannot be done with tables . IsDescription
Parameters
scan _ parameters : list , tuple
List of scan parameters names ( strings ) .
Returns
table _ description : dict
Table description .
Usage
pytables . createTable ( self . raw _ data _ file _ h5 . root , name = ' scan _ parameters ' , description = generate _ scan _ configuration _ description ( [ ' PlsrDAC ' ] ) , title = ' scan _ parameters ' , filters = filter _ tables )'''
|
table_description = np . dtype ( [ ( key , tb . StringCol ( 512 , pos = idx ) ) for idx , key in enumerate ( scan_parameters ) ] )
return table_description
|
def sparql_get ( self , owner , id , query , ** kwargs ) :
"""SPARQL query ( via GET )
This endpoint executes SPARQL queries against a dataset or data project . SPARQL results are available in a variety of formats . By default , ` application / sparql - results + json ` will be returned . Set the ` Accept ` header to one of the following values in accordance with your preference : - ` application / sparql - results + xml ` - ` application / sparql - results + json ` - ` application / rdf + json ` - ` application / rdf + xml ` - ` text / csv ` - ` text / tab - separated - values ` New to SPARQL ? Check out data . world ’ s [ SPARQL tutorial ] ( https : / / docs . data . world / tutorials / sparql / ) .
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please define a ` callback ` function
to be invoked when receiving the response .
> > > def callback _ function ( response ) :
> > > pprint ( response )
> > > thread = api . sparql _ get ( owner , id , query , callback = callback _ function )
: param callback function : The callback function
for asynchronous request . ( optional )
: param str owner : User name and unique identifier of the creator of a dataset or project . For example , in the URL : [ https : / / data . world / jonloyens / an - intro - to - dataworld - dataset ] ( https : / / data . world / jonloyens / an - intro - to - dataworld - dataset ) , jonloyens is the unique identifier of the owner . ( required )
: param str id : Dataset unique identifier . For example , in the URL : [ https : / / data . world / jonloyens / an - intro - to - dataworld - dataset ] ( https : / / data . world / jonloyens / an - intro - to - dataworld - dataset ) , an - intro - to - dataworld - dataset is the unique identifier of the dataset . ( required )
: param str query : ( required )
: return : None
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'callback' ) :
return self . sparql_get_with_http_info ( owner , id , query , ** kwargs )
else :
( data ) = self . sparql_get_with_http_info ( owner , id , query , ** kwargs )
return data
|
def _are_cmd_nodes_same ( node1 , node2 ) :
"""Checks to see if two cmddnodes are the same .
Two cmdnodes are defined to be the same if they have the same callbacks /
helptexts / summaries ."""
|
# Everything in node1 should be in node2
for propertytype in node1 :
if ( not propertytype in node2 or node1 [ propertytype ] != node2 [ propertytype ] ) :
return False
return True
|
def __get_stock_row ( self , stock : Stock , depth : int ) -> str :
"""formats stock row"""
|
assert isinstance ( stock , Stock )
view_model = AssetAllocationViewModel ( )
view_model . depth = depth
# Symbol
view_model . name = stock . symbol
# Current allocation
view_model . curr_allocation = stock . curr_alloc
# Value in base currency
view_model . curr_value = stock . value_in_base_currency
# Value in security ' s currency .
view_model . curr_value_own_currency = stock . value
view_model . own_currency = stock . currency
return view_model
|
def get_members ( self , retrieve = False ) :
'''get pcdm : hasMember for this resource
Args :
retrieve ( bool ) : if True , issue . refresh ( ) on resource thereby confirming existence and retrieving payload'''
|
if self . exists and hasattr ( self . rdf . triples , 'pcdm' ) and hasattr ( self . rdf . triples . pcdm , 'hasMember' ) :
members = [ self . repo . parse_uri ( uri ) for uri in self . rdf . triples . pcdm . hasMember ]
# return
return members
else :
return [ ]
|
def insert ( self , loc , column , value , allow_duplicates = False , inplace = False ) :
"""Insert column into molecule at specified location .
Wrapper around the : meth : ` pandas . DataFrame . insert ` method ."""
|
out = self if inplace else self . copy ( )
out . _frame . insert ( loc , column , value , allow_duplicates = allow_duplicates )
if not inplace :
return out
|
async def create_subprocess_with_handle ( command , display_handle , * , shell = False , cwd , ** kwargs ) :
'''Writes subprocess output to a display handle as it comes in , and also
returns a copy of it as a string . Throws if the subprocess returns an
error . Note that cwd is a required keyword - only argument , on theory that
peru should never start child processes " wherever I happen to be running
right now . "'''
|
# We ' re going to get chunks of bytes from the subprocess , and it ' s possible
# that one of those chunks ends in the middle of a unicode character . An
# incremental decoder keeps those dangling bytes around until the next
# chunk arrives , so that split characters get decoded properly . Use
# stdout ' s encoding , but provide a default for the case where stdout has
# been redirected to a StringIO . ( This happens in tests . )
encoding = sys . stdout . encoding or 'utf8'
decoder_factory = codecs . getincrementaldecoder ( encoding )
decoder = decoder_factory ( errors = 'replace' )
output_copy = io . StringIO ( )
# Display handles are context managers . Entering and exiting the display
# handle lets the display know when the job starts and stops .
with display_handle :
stdin = asyncio . subprocess . DEVNULL
stdout = asyncio . subprocess . PIPE
stderr = asyncio . subprocess . STDOUT
if shell :
proc = await asyncio . create_subprocess_shell ( command , stdin = stdin , stdout = stdout , stderr = stderr , cwd = cwd , ** kwargs )
else :
proc = await asyncio . create_subprocess_exec ( * command , stdin = stdin , stdout = stdout , stderr = stderr , cwd = cwd , ** kwargs )
# Read all the output from the subprocess as its comes in .
while True :
outputbytes = await proc . stdout . read ( 4096 )
if not outputbytes :
break
outputstr = decoder . decode ( outputbytes )
outputstr_unified = _unify_newlines ( outputstr )
display_handle . write ( outputstr_unified )
output_copy . write ( outputstr_unified )
returncode = await proc . wait ( )
if returncode != 0 :
raise subprocess . CalledProcessError ( returncode , command , output_copy . getvalue ( ) )
if hasattr ( decoder , 'buffer' ) : # The utf8 decoder has this attribute , but some others don ' t .
assert not decoder . buffer , 'decoder nonempty: ' + repr ( decoder . buffer )
return output_copy . getvalue ( )
|
def _handle_download_result ( self , resource , tmp_dir_path , sha256 , dl_size ) :
"""Store dled file to definitive place , write INFO file , return path ."""
|
fnames = tf . io . gfile . listdir ( tmp_dir_path )
if len ( fnames ) > 1 :
raise AssertionError ( 'More than one file in %s.' % tmp_dir_path )
original_fname = fnames [ 0 ]
tmp_path = os . path . join ( tmp_dir_path , original_fname )
self . _recorded_sizes_checksums [ resource . url ] = ( dl_size , sha256 )
if self . _register_checksums :
self . _record_sizes_checksums ( )
elif ( dl_size , sha256 ) != self . _sizes_checksums . get ( resource . url , None ) :
raise NonMatchingChecksumError ( resource . url , tmp_path )
download_path = self . _get_final_dl_path ( resource . url , sha256 )
resource_lib . write_info_file ( resource , download_path , self . _dataset_name , original_fname )
# Unconditionally overwrite because either file doesn ' t exist or
# FORCE _ DOWNLOAD = true
tf . io . gfile . rename ( tmp_path , download_path , overwrite = True )
tf . io . gfile . rmtree ( tmp_dir_path )
return download_path
|
def pretty_dumps ( data ) :
"""Return json string in pretty format .
* * 中文文档 * *
将字典转化成格式化后的字符串 。"""
|
try :
return json . dumps ( data , sort_keys = True , indent = 4 , ensure_ascii = False )
except :
return json . dumps ( data , sort_keys = True , indent = 4 , ensure_ascii = True )
|
def complete ( self , default_output = None ) :
"""Marks this asynchronous Pipeline as complete .
Args :
default _ output : What value the ' default ' output slot should be assigned .
Raises :
UnexpectedPipelineError if the slot no longer exists or this method was
called for a pipeline that is not async ."""
|
# TODO : Enforce that all outputs expected by this async pipeline were
# filled before this complete ( ) function was called . May required all
# async functions to declare their outputs upfront .
if not self . async :
raise UnexpectedPipelineError ( 'May only call complete() method for asynchronous pipelines.' )
self . _context . fill_slot ( self . _pipeline_key , self . outputs . default , default_output )
|
def _add_ubridge_ethernet_connection ( self , bridge_name , ethernet_interface , block_host_traffic = False ) :
"""Creates a connection with an Ethernet interface in uBridge .
: param bridge _ name : bridge name in uBridge
: param ethernet _ interface : Ethernet interface name
: param block _ host _ traffic : block network traffic originating from the host OS ( Windows only )"""
|
if sys . platform . startswith ( "linux" ) and block_host_traffic is False : # on Linux we use RAW sockets by default excepting if host traffic must be blocked
yield from self . _ubridge_send ( 'bridge add_nio_linux_raw {name} "{interface}"' . format ( name = bridge_name , interface = ethernet_interface ) )
elif sys . platform . startswith ( "win" ) : # on Windows we use Winpcap / Npcap
windows_interfaces = interfaces ( )
npf_id = None
source_mac = None
for interface in windows_interfaces : # Winpcap / Npcap uses a NPF ID to identify an interface on Windows
if "netcard" in interface and ethernet_interface in interface [ "netcard" ] :
npf_id = interface [ "id" ]
source_mac = interface [ "mac_address" ]
elif ethernet_interface in interface [ "name" ] :
npf_id = interface [ "id" ]
source_mac = interface [ "mac_address" ]
if npf_id :
yield from self . _ubridge_send ( 'bridge add_nio_ethernet {name} "{interface}"' . format ( name = bridge_name , interface = npf_id ) )
else :
raise NodeError ( "Could not find NPF id for interface {}" . format ( ethernet_interface ) )
if block_host_traffic :
if source_mac :
yield from self . _ubridge_send ( 'bridge set_pcap_filter {name} "not ether src {mac}"' . format ( name = bridge_name , mac = source_mac ) )
log . info ( 'PCAP filter applied on "{interface}" for source MAC {mac}' . format ( interface = ethernet_interface , mac = source_mac ) )
else :
log . warning ( "Could not block host network traffic on {} (no MAC address found)" . format ( ethernet_interface ) )
else : # on other platforms we just rely on the pcap library
yield from self . _ubridge_send ( 'bridge add_nio_ethernet {name} "{interface}"' . format ( name = bridge_name , interface = ethernet_interface ) )
source_mac = None
for interface in interfaces ( ) :
if interface [ "name" ] == ethernet_interface :
source_mac = interface [ "mac_address" ]
if source_mac :
yield from self . _ubridge_send ( 'bridge set_pcap_filter {name} "not ether src {mac}"' . format ( name = bridge_name , mac = source_mac ) )
log . info ( 'PCAP filter applied on "{interface}" for source MAC {mac}' . format ( interface = ethernet_interface , mac = source_mac ) )
|
def default ( session ) :
"""Default unit test session .
This is intended to be run * * without * * an interpreter set , so
that the current ` ` python ` ` ( on the ` ` PATH ` ` ) or the version of
Python corresponding to the ` ` nox ` ` binary the ` ` PATH ` ` can
run the tests ."""
|
# Install all test dependencies , then install local packages in - place .
session . install ( "mock" , "pytest" , "pytest-cov" )
for local_dep in LOCAL_DEPS :
session . install ( "-e" , local_dep )
# Pyarrow does not support Python 3.7
dev_install = ".[all]"
session . install ( "-e" , dev_install )
# IPython does not support Python 2 after version 5 . x
if session . python == "2.7" :
session . install ( "ipython==5.5" )
else :
session . install ( "ipython" )
# Run py . test against the unit tests .
session . run ( "py.test" , "--quiet" , "--cov=google.cloud.bigquery" , "--cov=tests.unit" , "--cov-append" , "--cov-config=.coveragerc" , "--cov-report=" , "--cov-fail-under=97" , os . path . join ( "tests" , "unit" ) , * session . posargs )
|
def hide_arp_holder_arp_entry_interfacetype_FortyGigabitEthernet_FortyGigabitEthernet ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
hide_arp_holder = ET . SubElement ( config , "hide-arp-holder" , xmlns = "urn:brocade.com:mgmt:brocade-arp" )
arp_entry = ET . SubElement ( hide_arp_holder , "arp-entry" )
arp_ip_address_key = ET . SubElement ( arp_entry , "arp-ip-address" )
arp_ip_address_key . text = kwargs . pop ( 'arp_ip_address' )
interfacetype = ET . SubElement ( arp_entry , "interfacetype" )
FortyGigabitEthernet = ET . SubElement ( interfacetype , "FortyGigabitEthernet" )
FortyGigabitEthernet = ET . SubElement ( FortyGigabitEthernet , "FortyGigabitEthernet" )
FortyGigabitEthernet . text = kwargs . pop ( 'FortyGigabitEthernet' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def spendables_for_address ( self , address ) :
"""Return a list of Spendable objects for the
given bitcoin address ."""
|
URL = self . api_domain + "/unspent?active=%s" % address
r = json . loads ( urlopen ( URL ) . read ( ) . decode ( "utf8" ) )
spendables = [ ]
for u in r [ "unspent_outputs" ] :
coin_value = u [ "value" ]
script = h2b ( u [ "script" ] )
previous_hash = h2b ( u [ "tx_hash" ] )
previous_index = u [ "tx_output_n" ]
spendables . append ( Tx . Spendable ( coin_value , script , previous_hash , previous_index ) )
return spendables
|
def refactor_froms_to_imports ( self , offset ) :
"""Converting imports of the form " from . . . " to " import . . . " ."""
|
refactor = ImportOrganizer ( self . project )
changes = refactor . froms_to_imports ( self . resource , offset )
return translate_changes ( changes )
|
def run_with_falcon ( self ) :
"""runs the falcon / http based test server"""
|
from wsgiref import simple_server
from zengine . server import app
httpd = simple_server . make_server ( self . manager . args . addr , int ( self . manager . args . port ) , app )
httpd . serve_forever ( )
|
def copy ( self ) :
"""Make a hard copy of ` self ` .
: return : A new LiveDefinition instance .
: rtype : angr . analyses . ddg . LiveDefinitions"""
|
ld = LiveDefinitions ( )
ld . _memory_map = self . _memory_map . copy ( )
ld . _register_map = self . _register_map . copy ( )
ld . _defs = self . _defs . copy ( )
return ld
|
def _dataslice ( self , data , indices ) :
"""Returns slice of data element if the item is deep
indexable . Warns if attempting to slice an object that has not
been declared deep indexable ."""
|
if self . _deep_indexable and isinstance ( data , Dimensioned ) and indices :
return data [ indices ]
elif len ( indices ) > 0 :
self . param . warning ( 'Cannot index into data element, extra data' ' indices ignored.' )
return data
|
def on_exit ( self ) :
"""When you click to exit , this function is called , prompts whether to save"""
|
answer = messagebox . askyesnocancel ( "Exit" , "Do you want to save as you quit the application?" )
if answer :
self . save ( )
self . quit ( )
self . destroy ( )
elif answer is None :
pass
# the cancel action
else :
self . quit ( )
self . destroy ( )
|
def build ( config , archiver , operators ) :
"""Build the history given a archiver and collection of operators .
: param config : The wily configuration
: type config : : namedtuple : ` wily . config . WilyConfig `
: param archiver : The archiver to use
: type archiver : : namedtuple : ` wily . archivers . Archiver `
: param operators : The list of operators to execute
: type operators : ` list ` of : namedtuple : ` wily . operators . Operator `"""
|
try :
logger . debug ( f"Using {archiver.name} archiver module" )
archiver = archiver . cls ( config )
revisions = archiver . revisions ( config . path , config . max_revisions )
except InvalidGitRepositoryError : # TODO : This logic shouldn ' t really be here ( SoC )
logger . info ( f"Defaulting back to the filesystem archiver, not a valid git repo" )
archiver = FilesystemArchiver ( config )
revisions = archiver . revisions ( config . path , config . max_revisions )
except Exception as e :
if hasattr ( e , "message" ) :
logger . error ( f"Failed to setup archiver: '{e.message}'" )
else :
logger . error ( f"Failed to setup archiver: '{type(e)} - {e}'" )
exit ( 1 )
state = State ( config , archiver = archiver )
# Check for existence of cache , else provision
state . ensure_exists ( )
index = state . index [ archiver . name ]
# remove existing revisions from the list
revisions = [ revision for revision in revisions if revision not in index ]
logger . info ( f"Found {len(revisions)} revisions from '{archiver.name}' archiver in '{config.path}'." )
_op_desc = "," . join ( [ operator . name for operator in operators ] )
logger . info ( f"Running operators - {_op_desc}" )
bar = Bar ( "Processing" , max = len ( revisions ) * len ( operators ) )
state . operators = operators
try :
with multiprocessing . Pool ( processes = len ( operators ) ) as pool :
for revision in revisions : # Checkout target revision
archiver . checkout ( revision , config . checkout_options )
stats = { "operator_data" : { } }
# Run each operator as a seperate process
data = pool . starmap ( run_operator , [ ( operator , revision , config ) for operator in operators ] , )
# Map the data back into a dictionary
for operator_name , result in data : # aggregate values to directories
roots = [ ]
# find all unique directories in the results
for entry in result . keys ( ) :
parent = pathlib . Path ( entry ) . parents [ 0 ]
if parent not in roots :
roots . append ( parent )
for root in roots : # find all matching entries recursively
aggregates = [ path for path in result . keys ( ) if root in pathlib . Path ( path ) . parents ]
result [ str ( root ) ] = { }
# aggregate values
for metric in resolve_operator ( operator_name ) . cls . metrics :
func = metric . aggregate
values = [ result [ aggregate ] [ metric . name ] for aggregate in aggregates if aggregate in result and metric . name in result [ aggregate ] ]
if len ( values ) > 0 :
result [ str ( root ) ] [ metric . name ] = func ( values )
stats [ "operator_data" ] [ operator_name ] = result
bar . next ( )
ir = index . add ( revision , operators = operators )
ir . store ( config , archiver , stats )
index . save ( )
bar . finish ( )
except Exception as e :
logger . error ( f"Failed to build cache: '{e}'" )
raise e
finally : # Reset the archive after every run back to the head of the branch
archiver . finish ( )
|
def delete_app_info ( app_id ) :
"""delete app info from local db"""
|
try :
conn = get_conn ( )
c = conn . cursor ( )
c . execute ( "DELETE FROM container WHERE app_id='{0}'" . format ( app_id ) )
c . execute ( "DELETE FROM app WHERE id='{0}'" . format ( app_id ) )
conn . commit ( )
# print ' clear old app % s in db succeed ! ' % app _ id
except Exception , e :
raise RuntimeError ( 'clear old app %s in db failed! %s' % ( app_id , e ) )
|
def query ( self , area = None , date = None , raw = None , area_relation = 'Intersects' , order_by = None , limit = None , offset = 0 , ** keywords ) :
"""Query the OpenSearch API with the coordinates of an area , a date interval
and any other search keywords accepted by the API .
Parameters
area : str , optional
The area of interest formatted as a Well - Known Text string .
date : tuple of ( str or datetime ) or str , optional
A time interval filter based on the Sensing Start Time of the products .
Expects a tuple of ( start , end ) , e . g . ( " NOW - 1DAY " , " NOW " ) .
The timestamps can be either a Python datetime or a string in one of the
following formats :
- yyyyMMdd
- yyyy - MM - ddThh : mm : ss . SSSZ ( ISO - 8601)
- yyyy - MM - ddThh : mm : ssZ
- NOW
- NOW - < n > DAY ( S ) ( or HOUR ( S ) , MONTH ( S ) , etc . )
- NOW + < n > DAY ( S )
- yyyy - MM - ddThh : mm : ssZ - < n > DAY ( S )
- NOW / DAY ( or HOUR , MONTH etc . ) - rounds the value to the given unit
Alternatively , an already fully formatted string such as " [ NOW - 1DAY TO NOW ] " can be
used as well .
raw : str , optional
Additional query text that will be appended to the query .
area _ relation : { ' Intersects ' , ' Contains ' , ' IsWithin ' } , optional
What relation to use for testing the AOI . Case insensitive .
- Intersects : true if the AOI and the footprint intersect ( default )
- Contains : true if the AOI is inside the footprint
- IsWithin : true if the footprint is inside the AOI
order _ by : str , optional
A comma - separated list of fields to order by ( on server side ) .
Prefix the field name by ' + ' or ' - ' to sort in ascending or descending order ,
respectively . Ascending order is used if prefix is omitted .
Example : " cloudcoverpercentage , - beginposition " .
limit : int , optional
Maximum number of products returned . Defaults to no limit .
offset : int , optional
The number of results to skip . Defaults to 0.
* * keywords
Additional keywords can be used to specify other query parameters ,
e . g . ` relativeorbitnumber = 70 ` .
See https : / / scihub . copernicus . eu / twiki / do / view / SciHubUserGuide / 3FullTextSearch
for a full list .
Range values can be passed as two - element tuples , e . g . ` cloudcoverpercentage = ( 0 , 30 ) ` .
` None ` can be used in range values for one - sided ranges , e . g . ` orbitnumber = ( 16302 , None ) ` .
Ranges with no bounds ( ` orbitnumber = ( None , None ) ` ) will not be included in the query .
The time interval formats accepted by the ` date ` parameter can also be used with
any other parameters that expect time intervals ( that is : ' beginposition ' , ' endposition ' ,
' date ' , ' creationdate ' , and ' ingestiondate ' ) .
Returns
dict [ string , dict ]
Products returned by the query as a dictionary with the product ID as the key and
the product ' s attributes ( a dictionary ) as the value ."""
|
query = self . format_query ( area , date , raw , area_relation , ** keywords )
self . logger . debug ( "Running query: order_by=%s, limit=%s, offset=%s, query=%s" , order_by , limit , offset , query )
formatted_order_by = _format_order_by ( order_by )
response , count = self . _load_query ( query , formatted_order_by , limit , offset )
self . logger . info ( "Found %s products" , count )
return _parse_opensearch_response ( response )
|
def _fullqualname_builtin_py3 ( obj ) :
"""Fully qualified name for ' builtin _ function _ or _ method ' objects in
Python 3."""
|
if obj . __module__ is not None : # built - in functions
module = obj . __module__
else : # built - in methods
if inspect . isclass ( obj . __self__ ) :
module = obj . __self__ . __module__
else :
module = obj . __self__ . __class__ . __module__
return module + '.' + obj . __qualname__
|
def _build_mappings ( self , classes : Sequence [ type ] ) -> Tuple [ Mapping [ type , Sequence [ type ] ] , Mapping [ type , Sequence [ type ] ] ] :
"""Collect all bases and organize into parent / child mappings ."""
|
parents_to_children : MutableMapping [ type , Set [ type ] ] = { }
children_to_parents : MutableMapping [ type , Set [ type ] ] = { }
visited_classes : Set [ type ] = set ( )
class_stack = list ( classes )
while class_stack :
class_ = class_stack . pop ( )
if class_ in visited_classes :
continue
visited_classes . add ( class_ )
for base in class_ . __bases__ :
if base not in visited_classes :
class_stack . append ( base )
parents_to_children . setdefault ( base , set ( ) ) . add ( class_ )
children_to_parents . setdefault ( class_ , set ( ) ) . add ( base )
sorted_parents_to_children : MutableMapping [ type , List [ type ] ] = collections . OrderedDict ( )
for parent , children in sorted ( parents_to_children . items ( ) , key = lambda x : ( x [ 0 ] . __module__ , x [ 0 ] . __name__ ) ) :
sorted_parents_to_children [ parent ] = sorted ( children , key = lambda x : ( x . __module__ , x . __name__ ) )
sorted_children_to_parents : MutableMapping [ type , List [ type ] ] = collections . OrderedDict ( )
for child , parents in sorted ( children_to_parents . items ( ) , key = lambda x : ( x [ 0 ] . __module__ , x [ 0 ] . __name__ ) ) :
sorted_children_to_parents [ child ] = sorted ( parents , key = lambda x : ( x . __module__ , x . __name__ ) )
return sorted_parents_to_children , sorted_children_to_parents
|
def read_trailer ( self ) :
'''Read the HTTP trailer fields .
Returns :
bytes : The trailer data .
Coroutine .'''
|
_logger . debug ( 'Reading chunked trailer.' )
trailer_data_list = [ ]
while True :
trailer_data = yield from self . _connection . readline ( )
trailer_data_list . append ( trailer_data )
if not trailer_data . strip ( ) :
break
return b'' . join ( trailer_data_list )
|
def _fix_unmapped ( mapped_file , unmapped_file , data ) :
"""The unmapped . bam file up until at least Tophat 2.1.1 is broken in various
ways , see https : / / github . com / cbrueffer / tophat - recondition for details .
Run TopHat - Recondition to fix these issues ."""
|
out_file = os . path . splitext ( unmapped_file ) [ 0 ] + "_fixup.bam"
if file_exists ( out_file ) :
return out_file
assert os . path . dirname ( mapped_file ) == os . path . dirname ( unmapped_file )
cmd = config_utils . get_program ( "tophat-recondition" , data )
cmd += " -q"
tophat_out_dir = os . path . dirname ( mapped_file )
tophat_logfile = os . path . join ( tophat_out_dir , 'tophat-recondition.log' )
with file_transaction ( data , tophat_logfile ) as tx_logfile :
cmd += ' --logfile %s' % tx_logfile
cmd += " -m %s" % mapped_file
cmd += " -u %s" % unmapped_file
cmd += " %s" % tophat_out_dir
do . run ( cmd , "Fixing unmapped reads with Tophat-Recondition." , None )
return out_file
|
def format_author_ed ( citation_elements ) :
"""Standardise to ( ed . ) and ( eds . )
e . g . Remove extra space in ( ed . )"""
|
for el in citation_elements :
if el [ 'type' ] == 'AUTH' :
el [ 'auth_txt' ] = el [ 'auth_txt' ] . replace ( '(ed. )' , '(ed.)' )
el [ 'auth_txt' ] = el [ 'auth_txt' ] . replace ( '(eds. )' , '(eds.)' )
return citation_elements
|
def _request_access_token ( grant_type , client_id = None , client_secret = None , scopes = None , code = None , redirect_url = None , refresh_token = None ) :
"""Make an HTTP POST to request an access token .
Parameters
grant _ type ( str )
Either ' client _ credientials ' ( Client Credentials Grant )
or ' authorization _ code ' ( Authorization Code Grant ) .
client _ id ( str )
Your app ' s Client ID .
client _ secret ( str )
Your app ' s Client Secret .
scopes ( set )
Set of permission scopes to request .
( e . g . { ' profile ' , ' history ' } )
code ( str )
The authorization code to switch for an access token .
Only used in Authorization Code Grant .
redirect _ url ( str )
The URL that the Uber server will redirect to .
refresh _ token ( str )
Refresh token used to get a new access token .
Only used for Authorization Code Grant .
Returns
( requests . Response )
Successful HTTP response from a ' POST ' to request
an access token .
Raises
ClientError ( APIError )
Thrown if there was an HTTP error ."""
|
url = build_url ( auth . AUTH_HOST , auth . ACCESS_TOKEN_PATH )
if isinstance ( scopes , set ) :
scopes = ' ' . join ( scopes )
args = { 'grant_type' : grant_type , 'client_id' : client_id , 'client_secret' : client_secret , 'scope' : scopes , 'code' : code , 'redirect_uri' : redirect_url , 'refresh_token' : refresh_token , }
response = post ( url = url , data = args )
if response . status_code == codes . ok :
return response
message = 'Failed to request access token: {}.'
message = message . format ( response . reason )
raise ClientError ( response , message )
|
def convert2 ( self , imtls , sids ) :
"""Convert a probability map into a composite array of shape ( N , )
and dtype ` imtls . dt ` .
: param imtls :
DictArray instance
: param sids :
the IDs of the sites we are interested in
: returns :
an array of curves of shape ( N , )"""
|
assert self . shape_z == 1 , self . shape_z
curves = numpy . zeros ( len ( sids ) , imtls . dt )
for imt in curves . dtype . names :
curves_by_imt = curves [ imt ]
for i , sid in numpy . ndenumerate ( sids ) :
try :
pcurve = self [ sid ]
except KeyError :
pass
# the poes will be zeros
else :
curves_by_imt [ i ] = pcurve . array [ imtls ( imt ) , 0 ]
return curves
|
def as_pyemu_matrix ( self , typ = Matrix ) :
"""Create a pyemu . Matrix from the Ensemble .
Parameters
typ : pyemu . Matrix or derived type
the type of matrix to return
Returns
pyemu . Matrix : pyemu . Matrix"""
|
x = self . values . copy ( ) . astype ( np . float )
return typ ( x = x , row_names = list ( self . index ) , col_names = list ( self . columns ) )
|
def all_gather ( data ) :
"""Run all _ gather on arbitrary picklable data ( not necessarily tensors )
Args :
data : any picklable object
Returns :
list [ data ] : list of data gathered from each rank"""
|
world_size = get_world_size ( )
if world_size == 1 :
return [ data ]
# serialized to a Tensor
buffer = pickle . dumps ( data )
storage = torch . ByteStorage . from_buffer ( buffer )
tensor = torch . ByteTensor ( storage ) . to ( "cuda" )
# obtain Tensor size of each rank
local_size = torch . IntTensor ( [ tensor . numel ( ) ] ) . to ( "cuda" )
size_list = [ torch . IntTensor ( [ 0 ] ) . to ( "cuda" ) for _ in range ( world_size ) ]
dist . all_gather ( size_list , local_size )
size_list = [ int ( size . item ( ) ) for size in size_list ]
max_size = max ( size_list )
# receiving Tensor from all ranks
# we pad the tensor because torch all _ gather does not support
# gathering tensors of different shapes
tensor_list = [ ]
for _ in size_list :
tensor_list . append ( torch . ByteTensor ( size = ( max_size , ) ) . to ( "cuda" ) )
if local_size != max_size :
padding = torch . ByteTensor ( size = ( max_size - local_size , ) ) . to ( "cuda" )
tensor = torch . cat ( ( tensor , padding ) , dim = 0 )
dist . all_gather ( tensor_list , tensor )
data_list = [ ]
for size , tensor in zip ( size_list , tensor_list ) :
buffer = tensor . cpu ( ) . numpy ( ) . tobytes ( ) [ : size ]
data_list . append ( pickle . loads ( buffer ) )
return data_list
|
def _flush_decompressor ( self ) :
'''Return any data left in the decompressor .'''
|
if self . _decompressor :
try :
return self . _decompressor . flush ( )
except zlib . error as error :
raise ProtocolError ( 'zlib flush error: {0}.' . format ( error ) ) from error
else :
return b''
|
def _format_reinit_msg ( self , name , kwargs = None , triggered_directly = True ) :
"""Returns a message that informs about re - initializing a compoment .
Sometimes , the module or optimizer need to be
re - initialized . Not only should the user receive a message
about this but also should they be informed about what
parameters , if any , caused it ."""
|
msg = "Re-initializing {}" . format ( name )
if triggered_directly and kwargs :
msg += ( " because the following parameters were re-set: {}." . format ( ', ' . join ( sorted ( kwargs ) ) ) )
else :
msg += "."
return msg
|
def _process_data ( * kwarg_names ) :
"""Helper function to handle data keyword argument"""
|
def _data_decorator ( func ) :
@ functools . wraps ( func )
def _mark_with_data ( * args , ** kwargs ) :
data = kwargs . pop ( 'data' , None )
if data is None :
return func ( * args , ** kwargs )
else :
data_args = [ data [ i ] if hashable ( data , i ) else i for i in args ]
data_kwargs = { kw : data [ kwargs [ kw ] ] if hashable ( data , kwargs [ kw ] ) else kwargs [ kw ] for kw in set ( kwarg_names ) . intersection ( list ( kwargs . keys ( ) ) ) }
try : # if any of the plots want to use the index _ data , they can
# use it by referring to this attribute .
data_kwargs [ 'index_data' ] = data . index
except AttributeError as e :
pass
kwargs_update = kwargs . copy ( )
kwargs_update . update ( data_kwargs )
return func ( * data_args , ** kwargs_update )
return _mark_with_data
return _data_decorator
|
def parse_url ( self ) -> RequestUrl :
"""获取url解析对象"""
|
if self . _URL is None :
current_url = b"%s://%s%s" % ( encode_str ( self . schema ) , encode_str ( self . host ) , self . _current_url )
self . _URL = RequestUrl ( current_url )
return cast ( RequestUrl , self . _URL )
|
def _update_trsys ( self , event ) :
"""Transform object ( s ) have changed for this Node ; assign these to the
visual ' s TransformSystem ."""
|
doc = self . document_node
scene = self . scene_node
root = self . root_node
self . transforms . visual_transform = self . node_transform ( scene )
self . transforms . scene_transform = scene . node_transform ( doc )
self . transforms . document_transform = doc . node_transform ( root )
Node . _update_trsys ( self , event )
|
def post_comment_ajax ( request , using = None ) :
"""Post a comment , via an Ajax call ."""
|
if not request . is_ajax ( ) :
return HttpResponseBadRequest ( "Expecting Ajax call" )
# This is copied from django _ comments .
# Basically that view does too much , and doesn ' t offer a hook to change the rendering .
# The request object is not passed to next _ redirect for example .
# This is a separate view to integrate both features . Previously this used django - ajaxcomments
# which is unfortunately not thread - safe ( it it changes the comment view per request ) .
# Fill out some initial data fields from an authenticated user , if present
data = request . POST . copy ( )
if request . user . is_authenticated :
if not data . get ( 'name' , '' ) :
data [ "name" ] = request . user . get_full_name ( ) or request . user . username
if not data . get ( 'email' , '' ) :
data [ "email" ] = request . user . email
# Look up the object we ' re trying to comment about
ctype = data . get ( "content_type" )
object_pk = data . get ( "object_pk" )
if ctype is None or object_pk is None :
return CommentPostBadRequest ( "Missing content_type or object_pk field." )
try :
model = apps . get_model ( * ctype . split ( "." , 1 ) )
target = model . _default_manager . using ( using ) . get ( pk = object_pk )
except ValueError :
return CommentPostBadRequest ( "Invalid object_pk value: {0}" . format ( escape ( object_pk ) ) )
except ( TypeError , LookupError ) :
return CommentPostBadRequest ( "Invalid content_type value: {0}" . format ( escape ( ctype ) ) )
except AttributeError :
return CommentPostBadRequest ( "The given content-type {0} does not resolve to a valid model." . format ( escape ( ctype ) ) )
except ObjectDoesNotExist :
return CommentPostBadRequest ( "No object matching content-type {0} and object PK {1} exists." . format ( escape ( ctype ) , escape ( object_pk ) ) )
except ( ValueError , ValidationError ) as e :
return CommentPostBadRequest ( "Attempting go get content-type {0!r} and object PK {1!r} exists raised {2}" . format ( escape ( ctype ) , escape ( object_pk ) , e . __class__ . __name__ ) )
# Do we want to preview the comment ?
is_preview = "preview" in data
# Construct the comment form
form = django_comments . get_form ( ) ( target , data = data , is_preview = is_preview )
# Check security information
if form . security_errors ( ) :
return CommentPostBadRequest ( "The comment form failed security verification: {0}" . format ( form . security_errors ( ) ) )
# If there are errors or if we requested a preview show the comment
if is_preview :
comment = form . get_comment_object ( ) if not form . errors else None
return _ajax_result ( request , form , "preview" , comment , object_id = object_pk )
if form . errors :
return _ajax_result ( request , form , "post" , object_id = object_pk )
# Otherwise create the comment
comment = form . get_comment_object ( )
comment . ip_address = request . META . get ( "REMOTE_ADDR" , None )
if request . user . is_authenticated :
comment . user = request . user
# Signal that the comment is about to be saved
responses = signals . comment_will_be_posted . send ( sender = comment . __class__ , comment = comment , request = request )
for ( receiver , response ) in responses :
if response is False :
return CommentPostBadRequest ( "comment_will_be_posted receiver {0} killed the comment" . format ( receiver . __name__ ) )
# Save the comment and signal that it was saved
comment . save ( )
signals . comment_was_posted . send ( sender = comment . __class__ , comment = comment , request = request )
return _ajax_result ( request , form , "post" , comment , object_id = object_pk )
|
def has_linguist_kwargs ( self , kwargs ) :
"""Parses the given kwargs and returns True if they contain
linguist lookups ."""
|
for k in kwargs :
if self . is_linguist_lookup ( k ) :
return True
return False
|
def extract_first_elements ( nested_list ) :
"""A Python function that returns the first element of every sublist within the provided list .
Examples :
extract _ first _ elements ( [ [ 1 , 2 ] , [ 3 , 4 , 5 ] , [ 6 , 7 , 8 , 9 ] ] ) - > [ 1 , 3 , 6]
extract _ first _ elements ( [ [ 1 , 2 , 3 ] , [ 4 , 5 ] ] ) - > [ 1 , 4]
extract _ first _ elements ( [ [ 9 , 8 , 1 ] , [ 1 , 2 ] ] ) - > [ 9 , 1]
Args :
nested _ list ( List [ List [ int ] ] ) : A list of lists with numbers .
Returns :
List [ int ] : A list with the first element from each sublist ."""
|
return [ sublist [ 0 ] for sublist in nested_list ]
|
def run ( self , lam , initial_values = None ) :
'''Run the graph - fused logit lasso with a fixed lambda penalty .'''
|
if initial_values is not None :
if self . k == 0 and self . trails is not None :
betas , zs , us = initial_values
else :
betas , us = initial_values
else :
if self . k == 0 and self . trails is not None :
betas = [ np . zeros ( self . num_nodes , dtype = 'double' ) for _ in self . bins ]
zs = [ np . zeros ( self . breakpoints [ - 1 ] , dtype = 'double' ) for _ in self . bins ]
us = [ np . zeros ( self . breakpoints [ - 1 ] , dtype = 'double' ) for _ in self . bins ]
else :
betas = [ np . zeros ( self . num_nodes , dtype = 'double' ) for _ in self . bins ]
us = [ np . zeros ( self . Dk . shape [ 0 ] , dtype = 'double' ) for _ in self . bins ]
for j , ( left , mid , right , trials , successes ) in enumerate ( self . bins ) :
if self . bins_allowed is not None and j not in self . bins_allowed :
continue
if self . verbose > 2 :
print ( '\tBin #{0} [{1},{2},{3}]' . format ( j , left , mid , right ) )
# if self . verbose > 3:
# print ' Trials : \ n { 0 } ' . format ( pretty _ str ( trials ) )
# print ' '
# print ' Successes : \ n { 0 } ' . format ( pretty _ str ( successes ) )
beta = betas [ j ]
u = us [ j ]
if self . k == 0 and self . trails is not None :
z = zs [ j ]
# Run the graph - fused lasso algorithm
self . graphfl ( len ( beta ) , trials , successes , self . ntrails , self . trails , self . breakpoints , lam , self . alpha , self . inflate , self . max_steps , self . converge , beta , z , u )
else : # Run the graph trend filtering algorithm
self . graphtf ( len ( beta ) , trials , successes , lam , self . Dk . shape [ 0 ] , self . Dk . shape [ 1 ] , self . Dk . nnz , self . Dk . row . astype ( 'int32' ) , self . Dk . col . astype ( 'int32' ) , self . Dk . data . astype ( 'double' ) , self . max_steps , self . converge , beta , u )
beta = np . clip ( beta , 1e-12 , 1 - 1e-12 )
# numerical stability
betas [ j ] = - np . log ( 1. / beta - 1. )
# convert back to natural parameter form
return ( betas , zs , us ) if self . k == 0 and self . trails is not None else ( betas , us )
|
def sample_forward_transitions ( self , batch_size , batch_info , forward_steps : int , discount_factor : float ) -> Transitions :
"""Sample transitions from replay buffer with _ forward steps _ .
That is , instead of getting a transition s _ t - > s _ t + 1 with reward r ,
get a transition s _ t - > s _ t + n with sum of intermediate rewards .
Used in a variant of Deep Q - Learning"""
|
raise NotImplementedError
|
def add_404_page ( app ) :
"""Build an extra ` ` 404 . html ` ` page if no ` ` " 404 " ` ` key is in the
` ` html _ additional _ pages ` ` config ."""
|
is_epub = isinstance ( app . builder , EpubBuilder )
config_pages = app . config . html_additional_pages
if not is_epub and "404" not in config_pages :
yield ( "404" , { } , "404.html" )
|
def mark_best_classifications ( errors ) :
"""Convenience wrapper around mark _ best _ classification .
Finds the best match for each TextLogError in errors , handling no match
meeting the cut off score and then mark _ best _ classification to save that
information ."""
|
for text_log_error in errors :
best_match = get_best_match ( text_log_error )
if not best_match :
continue
mark_best_classification ( text_log_error , best_match . classified_failure )
|
def debug ( context ) :
"""Outputs a whole load of debugging information , including the current
context and imported modules .
Sample usage : :
< pre >
{ % debug % }
< / pre >"""
|
from pprint import pformat
output = [ pformat ( val ) for val in context ]
output . append ( '\n\n' )
output . append ( pformat ( sys . modules ) )
return '' . join ( output )
|
def validate ( self , value , model_instance ) :
"""Validates value and throws ValidationError . Subclasses should override
this to provide validation logic ."""
|
return super ( self . __class__ , self ) . validate ( value . value , model_instance )
|
def _gate_height ( self , gate ) :
"""Return the height to use for this gate .
: param string gate : The name of the gate whose height is desired .
: return : Height of the gate .
: rtype : float"""
|
try :
height = self . settings [ 'gates' ] [ gate . __class__ . __name__ ] [ 'height' ]
except KeyError :
height = .5
return height
|
def peer_relation_id ( ) :
'''Get the peers relation id if a peers relation has been joined , else None .'''
|
md = metadata ( )
section = md . get ( 'peers' )
if section :
for key in section :
relids = relation_ids ( key )
if relids :
return relids [ 0 ]
return None
|
def insert ( self , action : Action , where : 'Union[int, Delegate.Where]' ) :
"""add a new action with specific priority
> > > delegate : Delegate
> > > delegate . insert ( lambda task , product , ctx : print ( product ) , where = Delegate . Where . after ( lambda action : action . _ _ name _ _ = = ' myfunc ' ) )
the codes above inserts an action after the specific action whose name is ' myfunc ' ."""
|
if isinstance ( where , int ) :
self . actions . insert ( where , action )
return
here = where ( self . actions )
self . actions . insert ( here , action )
|
def ping ( host , timeout = False , return_boolean = False ) :
'''Performs a ping to a host
CLI Example :
. . code - block : : bash
salt ' * ' network . ping archlinux . org
. . versionadded : : 2016.11.0
Return a True or False instead of ping output .
. . code - block : : bash
salt ' * ' network . ping archlinux . org return _ boolean = True
Set the time to wait for a response in seconds .
. . code - block : : bash
salt ' * ' network . ping archlinux . org timeout = 3'''
|
if timeout : # Windows ping differs by having timeout be for individual echo requests . '
# Divide timeout by tries to mimic BSD behaviour .
timeout = int ( timeout ) * 1000 // 4
cmd = [ 'ping' , '-n' , '4' , '-w' , six . text_type ( timeout ) , salt . utils . network . sanitize_host ( host ) ]
else :
cmd = [ 'ping' , '-n' , '4' , salt . utils . network . sanitize_host ( host ) ]
if return_boolean :
ret = __salt__ [ 'cmd.run_all' ] ( cmd , python_shell = False )
if ret [ 'retcode' ] != 0 :
return False
else :
return True
else :
return __salt__ [ 'cmd.run' ] ( cmd , python_shell = False )
|
def randomTraversal ( sensations , numTraversals ) :
"""Given a list of sensations , return the SDRs that would be obtained by
numTraversals random traversals of that set of sensations .
Each sensation is a dict mapping cortical column index to a pair of SDR ' s
( one location and one feature ) ."""
|
newSensations = [ ]
for _ in range ( numTraversals ) :
s = copy . deepcopy ( sensations )
random . shuffle ( s )
newSensations += s
return newSensations
|
def setCurrent ( self , state = True ) :
"""Marks this view as the current source based on the inputed flag . This method will return True if the currency changes .
: return < bool > | changed"""
|
if self . _current == state :
return False
widget = self . viewWidget ( )
if widget :
for other in widget . findChildren ( type ( self ) ) :
if other . isCurrent ( ) :
other . _current = False
if not other . signalsBlocked ( ) :
other . currentStateChanged . emit ( state )
other . deactivated . emit ( )
self . _current = state
if not self . signalsBlocked ( ) :
self . currentStateChanged . emit ( state )
if state :
self . activated . emit ( )
else :
self . deactivated . emit ( )
return True
|
def tripleexprlabel_to_iriref ( self , tripleExprLabel : ShExDocParser . TripleExprLabelContext ) -> Union [ ShExJ . BNODE , ShExJ . IRIREF ] :
"""tripleExprLabel : iri | blankNode"""
|
if tripleExprLabel . iri ( ) :
return self . iri_to_iriref ( tripleExprLabel . iri ( ) )
else :
return ShExJ . BNODE ( tripleExprLabel . blankNode ( ) . getText ( ) )
|
def describe_keyspaces ( self , ) :
"""list the defined keyspaces in this cluster"""
|
self . _seqid += 1
d = self . _reqs [ self . _seqid ] = defer . Deferred ( )
self . send_describe_keyspaces ( )
return d
|
def t_ID ( t ) :
r'[ a - zA - Z ] [ a - zA - Z0-9 ] * [ $ % ] ?'
|
t . type = reserved . get ( t . value . lower ( ) , 'ID' )
callables = { api . constants . CLASS . array : 'ARRAY_ID' , }
if t . type != 'ID' :
t . value = t . type
else :
entry = api . global_ . SYMBOL_TABLE . get_entry ( t . value ) if api . global_ . SYMBOL_TABLE is not None else None
if entry :
t . type = callables . get ( entry . class_ , t . type )
if t . type == 'BIN' :
t . lexer . begin ( 'bin' )
return None
return t
|
def q_tank ( sed_inputs = sed_dict ) :
"""Return the maximum flow through one sedimentation tank .
Parameters
sed _ inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed . yaml
Returns
float
Maximum flow through one sedimentation tank
Examples
> > > from aide _ design . play import *"""
|
return ( sed_inputs [ 'tank' ] [ 'L' ] * sed_inputs [ 'tank' ] [ 'vel_up' ] . to ( u . m / u . s ) * sed_inputs [ 'tank' ] [ 'W' ] . to ( u . m ) ) . magnitude
|
def environment_var_to_bool ( env_var ) :
"""Converts an environment variable to a boolean
Returns False if the environment variable is False , 0 or a case - insenstive string " false "
or " 0 " ."""
|
# Try to see if env _ var can be converted to an int
try :
env_var = int ( env_var )
except ValueError :
pass
if isinstance ( env_var , numbers . Number ) :
return bool ( env_var )
elif is_a_string ( env_var ) :
env_var = env_var . lower ( ) . strip ( )
if env_var in "false" :
return False
else :
return True
else :
return bool ( env_var )
|
def copy_from ( self , src , dest ) :
"""copy a file or a directory from container or image to host system . If you are copying
directories , the target directory must not exist ( this function is using ` shutil . copytree `
to copy directories and that ' s a requirement of the function ) . In case the directory exists ,
OSError on python 2 or FileExistsError on python 3 are raised .
: param src : str , path to a file or a directory within container or image
: param dest : str , path to a file or a directory on host system
: return : None"""
|
p = self . p ( src )
if os . path . isfile ( p ) :
logger . info ( "copying file %s to %s" , p , dest )
shutil . copy2 ( p , dest )
else :
logger . info ( "copying directory %s to %s" , p , dest )
shutil . copytree ( p , dest )
|
def elem_add ( self , idx = None , name = None , ** kwargs ) :
"""overloading elem _ add function of a JIT class"""
|
self . jit_load ( )
if self . loaded :
return self . system . __dict__ [ self . name ] . elem_add ( idx , name , ** kwargs )
|
def _add_plots_to_output ( out , data ) :
"""Add CNVkit plots summarizing called copy number values ."""
|
out [ "plot" ] = { }
diagram_plot = _add_diagram_plot ( out , data )
if diagram_plot :
out [ "plot" ] [ "diagram" ] = diagram_plot
scatter = _add_scatter_plot ( out , data )
if scatter :
out [ "plot" ] [ "scatter" ] = scatter
scatter_global = _add_global_scatter_plot ( out , data )
if scatter_global :
out [ "plot" ] [ "scatter_global" ] = scatter_global
return out
|
def _DrawTrips ( self , triplist , colpar = "" ) :
"""Generates svg polylines for each transit trip .
Args :
# Class Trip is defined in transitfeed . py
[ Trip , Trip , . . . ]
Returns :
# A string containing a polyline tag for each trip
' < polyline class = " T " stroke = " # 336633 " points = " 433,0 . . . '"""
|
stations = [ ]
if not self . _stations and triplist :
self . _stations = self . _CalculateYLines ( self . _TravelTimes ( triplist ) )
if not self . _stations :
self . _AddWarning ( "Failed to use traveltimes for graph" )
self . _stations = self . _CalculateYLines ( self . _Uniform ( triplist ) )
if not self . _stations :
self . _AddWarning ( "Failed to calculate station distances" )
return
stations = self . _stations
tmpstrs = [ ]
servlist = [ ]
for t in triplist :
if not colpar :
if t . service_id not in servlist :
servlist . append ( t . service_id )
shade = int ( servlist . index ( t . service_id ) * ( 200 / len ( servlist ) ) + 55 )
color = "#00%s00" % hex ( shade ) [ 2 : 4 ]
else :
color = colpar
start_offsets = [ 0 ]
first_stop = t . GetTimeStops ( ) [ 0 ]
for j , freq_offset in enumerate ( start_offsets ) :
if j > 0 and not colpar :
color = "purple"
scriptcall = 'onmouseover="LineClick(\'%s\',\'Trip %s starting %s\')"' % ( t . trip_id , t . trip_id , transitfeed . FormatSecondsSinceMidnight ( t . GetStartTime ( ) ) )
tmpstrhead = '<polyline class="T" id="%s" stroke="%s" %s points="' % ( str ( t . trip_id ) , color , scriptcall )
tmpstrs . append ( tmpstrhead )
for i , s in enumerate ( t . GetTimeStops ( ) ) :
arr_t = s [ 0 ]
dep_t = s [ 1 ]
if arr_t is None or dep_t is None :
continue
arr_x = int ( arr_t / 3600.0 * self . _hour_grid ) - self . _hour_grid * self . _offset
dep_x = int ( dep_t / 3600.0 * self . _hour_grid ) - self . _hour_grid * self . _offset
tmpstrs . append ( "%s,%s " % ( int ( arr_x + 20 ) , int ( stations [ i ] + 20 ) ) )
tmpstrs . append ( "%s,%s " % ( int ( dep_x + 20 ) , int ( stations [ i ] + 20 ) ) )
tmpstrs . append ( '" />' )
return "" . join ( tmpstrs )
|
def bind ( cls , origin , handler , * , name = None ) :
"""Bind this object to the given origin and handler .
: param origin : An instance of ` Origin ` .
: param handler : An instance of ` bones . HandlerAPI ` .
: return : A subclass of this class ."""
|
name = cls . __name__ if name is None else name
attrs = { "_origin" : origin , "_handler" : handler , "__module__" : "origin" , # Could do better ?
}
return type ( name , ( cls , ) , attrs )
|
def do_alarm_definition_delete ( mc , args ) :
'''Delete the alarm definition .'''
|
fields = { }
fields [ 'alarm_id' ] = args . id
try :
mc . alarm_definitions . delete ( ** fields )
except ( osc_exc . ClientException , k_exc . HttpError ) as he :
raise osc_exc . CommandError ( '%s\n%s' % ( he . message , he . details ) )
else :
print ( 'Successfully deleted alarm definition' )
|
def expect_file_to_be_valid_json ( self , schema = None , result_format = None , include_config = False , catch_exceptions = None , meta = None ) :
"""schema : string
optional JSON schema file on which JSON data file is validated against
result _ format ( str or None ) :
Which output mode to use : ` BOOLEAN _ ONLY ` , ` BASIC ` , ` COMPLETE ` , or ` SUMMARY ` .
For more detail , see : ref : ` result _ format < result _ format > ` .
include _ config ( boolean ) :
If True , then include the expectation config as part of the result object . For more detail , see : ref : ` include _ config ` .
catch _ exceptions ( boolean or None ) :
If True , then catch exceptions and include them as part of the result object . For more detail , see : ref : ` catch _ exceptions ` .
meta ( dict or None ) :
A JSON - serializable dictionary ( nesting allowed ) that will
be included in the output without modification .
For more detail , see : ref : ` meta ` .
Returns :
A JSON - serializable expectation result object .
Exact fields vary depending on the values passed to : ref : ` result _ format < result _ format > ` and
: ref : ` include _ config ` , : ref : ` catch _ exceptions ` , and : ref : ` meta ` ."""
|
success = False
if schema is None :
try :
with open ( self . _path , 'r' ) as f :
json . load ( f )
success = True
except ValueError :
success = False
else :
try :
with open ( schema , 'r' ) as s :
schema_data = s . read ( )
sdata = json . loads ( schema_data )
with open ( self . _path , 'r' ) as f :
json_data = f . read ( )
jdata = json . loads ( json_data )
jsonschema . validate ( jdata , sdata )
success = True
except jsonschema . ValidationError :
success = False
except jsonschema . SchemaError :
raise
except :
raise
return { "success" : success }
|
def get_bonded_structure ( self , structure , decorate = False ) :
"""Obtain a MoleculeGraph object using this NearNeighbor
class . Requires the optional dependency networkx
( pip install networkx ) .
Args :
structure : Molecule object .
decorate ( bool ) : whether to annotate site properties
with order parameters using neighbors determined by
this NearNeighbor class
Returns : a pymatgen . analysis . graphs . MoleculeGraph object"""
|
# requires optional dependency which is why it ' s not a top - level import
from pymatgen . analysis . graphs import MoleculeGraph
if decorate : # Decorate all sites in the underlying structure
# with site properties that provides information on the
# coordination number and coordination pattern based
# on the ( current ) structure of this graph .
order_parameters = [ self . get_local_order_parameters ( structure , n ) for n in range ( len ( structure ) ) ]
structure . add_site_property ( 'order_parameters' , order_parameters )
mg = MoleculeGraph . with_local_env_strategy ( structure , self )
return mg
|
def delete ( self , using = None , soft = True , * args , ** kwargs ) :
"""Soft delete object ( set its ` ` is _ removed ` ` field to True ) .
Actually delete object if setting ` ` soft ` ` to False ."""
|
if soft :
self . is_removed = True
self . save ( using = using )
else :
return super ( SoftDeletableModel , self ) . delete ( using = using , * args , ** kwargs )
|
def append ( self , key , value , format = None , append = True , columns = None , dropna = None , ** kwargs ) :
"""Append to Table in file . Node must already exist and be Table
format .
Parameters
key : object
value : { Series , DataFrame }
format : ' table ' is the default
table ( t ) : table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data
append : boolean , default True , append the input data to the
existing
data _ columns : list of columns , or True , default None
List of columns to create as indexed data columns for on - disk
queries , or True to use all columns . By default only the axes
of the object are indexed . See ` here
< http : / / pandas . pydata . org / pandas - docs / stable / io . html # query - via - data - columns > ` _ _ .
min _ itemsize : dict of columns that specify minimum string sizes
nan _ rep : string to use as string nan represenation
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
encoding : default None , provide an encoding for strings
dropna : boolean , default False , do not write an ALL nan row to
the store settable by the option ' io . hdf . dropna _ table '
Notes
Does * not * check if data being appended overlaps with existing
data in the table , so be careful"""
|
if columns is not None :
raise TypeError ( "columns is not a supported keyword in append, " "try data_columns" )
if dropna is None :
dropna = get_option ( "io.hdf.dropna_table" )
if format is None :
format = get_option ( "io.hdf.default_format" ) or 'table'
kwargs = self . _validate_format ( format , kwargs )
self . _write_to_group ( key , value , append = append , dropna = dropna , ** kwargs )
|
def _begin_disconnection_action ( self , action ) :
"""Begin a disconnection attempt
Args :
action ( ConnectionAction ) : the action object describing what we are
connecting to and what the result of the operation was"""
|
conn_key = action . data [ 'id' ]
callback = action . data [ 'callback' ]
if self . _get_connection_state ( conn_key ) != self . Idle :
callback ( conn_key , self . id , False , 'Cannot start disconnection, connection is not idle' )
return
# Cannot be None since we checked above to make sure it exists
data = self . _get_connection ( conn_key )
data [ 'state' ] = self . Disconnecting
data [ 'microstate' ] = None
data [ 'callback' ] = callback
data [ 'timeout' ] = action . timeout
|
def _parse_header_id ( line ) :
"""Pull the transcript or protein identifier from the header line
which starts with ' > '"""
|
if type ( line ) is not binary_type :
raise TypeError ( "Expected header line to be of type %s but got %s" % ( binary_type , type ( line ) ) )
if len ( line ) <= 1 :
raise ValueError ( "No identifier on FASTA line" )
# split line at first space to get the unique identifier for
# this sequence
space_index = line . find ( b" " )
if space_index >= 0 :
identifier = line [ 1 : space_index ]
else :
identifier = line [ 1 : ]
# annoyingly Ensembl83 reformatted the transcript IDs of its
# cDNA FASTA to include sequence version numbers
# . e . g .
# " ENST00000448914.1 " instead of " ENST00000448914"
# So now we have to parse out the identifier
dot_index = identifier . find ( b"." )
if dot_index >= 0 :
identifier = identifier [ : dot_index ]
return identifier . decode ( "ascii" )
|
def get_queryset ( self ) :
"""Returns the list of items for this view ."""
|
forums = self . request . forum_permission_handler . get_moderation_queue_forums ( self . request . user , )
qs = super ( ) . get_queryset ( )
qs = qs . filter ( topic__forum__in = forums , approved = False )
return qs . order_by ( '-created' )
|
def config ( self ) :
"""load the passwords from the config file"""
|
if not hasattr ( self , '_config' ) :
raw_config = configparser . RawConfigParser ( )
f = self . _open ( )
if f :
raw_config . readfp ( f )
f . close ( )
self . _config = raw_config
return self . _config
|
def is_inside_bounds ( value , params ) :
"""Return ` ` True ` ` if ` ` value ` ` is contained in ` ` params ` ` .
This method supports broadcasting in the sense that for
` ` params . ndim > = 2 ` ` , if more than one value is given , the inputs
are broadcast against each other .
Parameters
value : ` array - like `
Value ( s ) to be checked . For several inputs , the final bool
tells whether all inputs pass the check or not .
params : ` IntervalProd `
Set in which the value is / the values are supposed to lie .
Returns
is _ inside _ bounds : bool
` ` True ` ` is all values lie in ` ` params ` ` , ` ` False ` ` otherwise .
Examples
Check a single point :
> > > params = odl . IntervalProd ( [ 0 , 0 ] , [ 1 , 2 ] )
> > > is _ inside _ bounds ( [ 0 , 0 ] , params )
True
> > > is _ inside _ bounds ( [ 0 , - 1 ] , params )
False
Using broadcasting :
> > > pts _ ax0 = np . array ( [ 0 , 0 , 1 , 0 , 1 ] ) [ : , None ]
> > > pts _ ax1 = np . array ( [ 2 , 0 , 1 ] ) [ None , : ]
> > > is _ inside _ bounds ( [ pts _ ax0 , pts _ ax1 ] , params )
True
> > > pts _ ax1 = np . array ( [ - 2 , 1 ] ) [ None , : ]
> > > is _ inside _ bounds ( [ pts _ ax0 , pts _ ax1 ] , params )
False"""
|
if value in params : # Single parameter
return True
else :
if params . ndim == 1 :
return params . contains_all ( np . ravel ( value ) )
else : # Flesh out and flatten to check bounds
bcast_value = np . broadcast_arrays ( * value )
stacked_value = np . vstack ( bcast_value )
flat_value = stacked_value . reshape ( params . ndim , - 1 )
return params . contains_all ( flat_value )
|
def move_tab ( self , index_from , index_to ) :
"""Move tab ( tabs themselves have already been moved by the tabwidget )"""
|
filename = self . filenames . pop ( index_from )
client = self . clients . pop ( index_from )
self . filenames . insert ( index_to , filename )
self . clients . insert ( index_to , client )
self . update_tabs_text ( )
self . sig_update_plugin_title . emit ( )
|
def ExtractEvents ( self , parser_mediator , registry_key , ** kwargs ) :
"""Extracts events from a Windows Registry key .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
registry _ key ( dfwinreg . WinRegistryKey ) : Windows Registry key ."""
|
for subkey in registry_key . GetSubkeys ( ) :
drive_letter = subkey . name
if not drive_letter :
continue
values_dict = { 'DriveLetter' : drive_letter , 'Type' : 'Mapped Drive' }
# Get the remote path if it exists .
remote_path_value = subkey . GetValueByName ( 'RemotePath' )
if remote_path_value :
remote_path = remote_path_value . GetDataAsObject ( )
if remote_path . startswith ( '\\\\' ) :
server_name , _ , share_name = remote_path [ 2 : ] . partition ( '\\' )
values_dict [ 'RemoteServer' ] = server_name
values_dict [ 'ShareName' ] = '\\{0:s}' . format ( share_name . replace ( '#' , '\\' ) )
event_data = windows_events . WindowsRegistryEventData ( )
event_data . key_path = registry_key . path
event_data . offset = subkey . offset
event_data . regvalue = values_dict
event_data . source_append = self . _SOURCE_APPEND
event_data . urls = self . URLS
event = time_events . DateTimeValuesEvent ( subkey . last_written_time , definitions . TIME_DESCRIPTION_WRITTEN )
parser_mediator . ProduceEventWithEventData ( event , event_data )
|
def put_annotation ( self , key , value ) :
"""Annotate current active trace entity with a key - value pair .
Annotations will be indexed for later search query .
: param str key : annotation key
: param object value : annotation value . Any type other than
string / number / bool will be dropped"""
|
entity = self . get_trace_entity ( )
if entity and entity . sampled :
entity . put_annotation ( key , value )
|
def sources_to_nr_vars ( sources ) :
"""Converts a source type to number of sources mapping into
a source numbering variable to number of sources mapping .
If , for example , we have ' point ' , ' gaussian ' and ' sersic '
source types , then passing the following dict as an argument
sources _ to _ nr _ vars ( { ' point ' : 10 , ' gaussian ' : 20 } )
will return an OrderedDict
{ ' npsrc ' : 10 , ' ngsrc ' : 20 , ' nssrc ' : 0 }"""
|
sources = default_sources ( ** sources )
try :
return OrderedDict ( ( SOURCE_VAR_TYPES [ name ] , nr ) for name , nr in sources . iteritems ( ) )
except KeyError as e :
raise KeyError ( ( 'No source type ' '%s' ' is ' 'registered. Valid source types ' 'are %s' ) % ( e , SOURCE_VAR_TYPES . keys ( ) ) )
|
def clean ( self , data ) :
"""Method returns cleaned list of stock closing prices
( i . e . dict ( date = datetime . date ( 2015 , 1 , 2 ) , price = ' 23.21 ' ) ) ."""
|
cleaned_data = list ( )
if not isinstance ( data , list ) :
data = [ data ]
for item in data :
date = datetime . datetime . strptime ( item [ 'Date' ] , '%Y-%m-%d' ) . date ( )
cleaned_data . append ( dict ( price = item [ 'Adj_Close' ] , date = date ) )
return cleaned_data
|
def getAllChildNodes ( self ) :
'''getAllChildNodes - Gets all the children , and their children ,
and their children , and so on , all the way to the end as a TagCollection .
Use . childNodes for a regular list
@ return TagCollection < AdvancedTag > - A TagCollection of all children ( and their children recursive )'''
|
ret = TagCollection ( )
# Scan all the children of this node
for child in self . children : # Append each child
ret . append ( child )
# Append children ' s children recursive
ret += child . getAllChildNodes ( )
return ret
|
def _is_valid_datatype ( datatype_instance ) :
"""Returns true if datatype _ instance is a valid datatype object and false otherwise ."""
|
# Remap so we can still use the python types for the simple cases
global _simple_type_remap
if datatype_instance in _simple_type_remap :
return True
# Now set the protobuf from this interface .
if isinstance ( datatype_instance , ( Int64 , Double , String , Array ) ) :
return True
elif isinstance ( datatype_instance , Dictionary ) :
kt = datatype_instance . key_type
if isinstance ( kt , ( Int64 , String ) ) :
return True
return False
|
def get_hw_virt_ex_property ( self , property_p ) :
"""Returns the value of the specified hardware virtualization boolean property .
in property _ p of type : class : ` HWVirtExPropertyType `
Property type to query .
return value of type bool
Property value .
raises : class : ` OleErrorInvalidarg `
Invalid property ."""
|
if not isinstance ( property_p , HWVirtExPropertyType ) :
raise TypeError ( "property_p can only be an instance of type HWVirtExPropertyType" )
value = self . _call ( "getHWVirtExProperty" , in_p = [ property_p ] )
return value
|
def get_instance ( self , payload ) :
"""Build an instance of TaskInstance
: param dict payload : Payload response from the API
: returns : twilio . rest . autopilot . v1 . assistant . task . TaskInstance
: rtype : twilio . rest . autopilot . v1 . assistant . task . TaskInstance"""
|
return TaskInstance ( self . _version , payload , assistant_sid = self . _solution [ 'assistant_sid' ] , )
|
def process_request ( self , request ) :
"""Process a request ."""
|
batcher = PrioritizedBatcher . global_instance ( )
if batcher . is_started : # This can happen in old - style middleware if consequent middleware
# raises exception and thus ` process _ response ` is not called .
# Described under 3rd point of differences :
# https : / / docs . djangoproject . com / en / 1.11 / topics / http / middleware / # upgrading - pre - django - 1-10 - style - middleware
batcher . rollback ( )
logger . warning ( "Uncommited batcher transaction from previous request was rollbacked." )
batcher . start ( )
|
def map_frames ( self , old_indices ) :
'''Rewrite the feature indexes based on the next frame ' s identities
old _ indices - for each feature in the new frame , the index of the
old feature'''
|
nfeatures = len ( old_indices )
noldfeatures = len ( self . state_vec )
if nfeatures > 0 :
self . state_vec = self . state_vec [ old_indices ]
self . state_cov = self . state_cov [ old_indices ]
self . noise_var = self . noise_var [ old_indices ]
if self . has_cached_obs_vec :
self . obs_vec = self . obs_vec [ old_indices ]
if self . has_cached_predicted_state_vec :
self . p_state_vec = self . p_state_vec [ old_indices ]
if len ( self . state_noise_idx ) > 0 : # We have to renumber the new _ state _ noise indices and get rid
# of those that don ' t map to numbers . Typical index trick here :
# * create an array for each legal old element : - 1 = no match
# * give each old element in the array the new number
# * Filter out the " no match " elements .
reverse_indices = - np . ones ( noldfeatures , int )
reverse_indices [ old_indices ] = np . arange ( nfeatures )
self . state_noise_idx = reverse_indices [ self . state_noise_idx ]
self . state_noise = self . state_noise [ self . state_noise_idx != - 1 , : ]
self . state_noise_idx = self . state_noise_idx [ self . state_noise_idx != - 1 ]
|
def simulate ( t = 1000 , poly = ( 0. , ) , sinusoids = None , sigma = 0 , rw = 0 , irw = 0 , rrw = 0 ) :
"""Simulate a random signal with seasonal ( sinusoids ) , linear and quadratic trend , RW , IRW , and RRW
Arguments :
t ( int or list of float ) : number of samples or time vector , default = 1000
poly ( list of float ) : polynomial coefficients ( in decreasing " order " ) passed to ` numpy . polyval `
i . e . poly [ 0 ] * x * * ( N - 1 ) + . . . + poly [ N - 1]
sinusoids ( list of list ) : [ [ period ] , [ amplitude , period ] , or [ ampl . , period , phase ] ]
> > > len ( simulate ( poly = ( 0 , ) , rrw = 1 ) )
1000
> > > simulate ( t = range ( 3 ) , poly = ( 1,2 ) ) # doctest : + NORMALIZE _ WHITESPACE
0 2
1 3
2 4
dtype : float64
> > > all ( simulate ( t = 50 , sinusoids = ( ( 1,2,3 ) , ) ) = = simulate ( t = range ( 50 ) , sinusoids = ( ( 1,2,3 ) , ) ) )
True
> > > any ( simulate ( t = 100 ) )
False
> > > abs ( simulate ( sinusoids = 42.42 ) . values [ 1 ] + simulate ( sinusoids = 42.42 ) . values [ - 1 ] ) < 1e - 10
True
> > > simulate ( t = 17 , sinusoids = [ 42 , 16 ] ) . min ( )
-42.0
> > > all ( ( simulate ( t = range ( 10 ) , sinusoids = ( 1 , 9 , 4.5 ) ) + simulate ( t = 10 , sinusoids = ( 1,9 ) ) ) . abs ( ) < 1e - 10)
True"""
|
if t and isinstance ( t , int ) :
t = np . arange ( t , dtype = np . float64 )
else :
t = np . array ( t , dtype = np . float64 )
N = len ( t )
poly = poly or ( 0. , )
poly = listify ( poly )
y = np . polyval ( poly , t )
sinusoids = listify ( sinusoids or [ ] )
if any ( isinstance ( ATP , ( int , float ) ) for ATP in sinusoids ) :
sinusoids = [ sinusoids ]
for ATP in sinusoids : # default period is 1 more than the length of the simulated series ( no values of the cycle are repeated )
T = ( t [ - 1 ] - t [ 0 ] ) * N / ( N - 1. )
# default amplitude is 1 and phase is 0
A , P = 1. , 0
try :
A , T , P = ATP
except ( TypeError , ValueError ) :
try :
A , T = ATP
except ( TypeError , ValueError ) : # default period is 1 more than the length of the simulated series
# ( no values of the cycle are repeated )
A = ATP [ 0 ]
# print ( A , T , P )
# print ( t [ 1 ] - t [ 0 ] )
y += A * np . sin ( 2 * np . pi * ( t - P ) / T )
if sigma :
y += np . random . normal ( 0.0 , float ( sigma ) , N )
if rw :
y += np . random . normal ( 0.0 , float ( rw ) , N ) . cumsum ( )
if irw :
y += np . random . normal ( 0.0 , float ( irw ) , N ) . cumsum ( ) . cumsum ( )
if rrw :
y += np . random . normal ( 0.0 , float ( rrw ) , N ) . cumsum ( ) . cumsum ( ) . cumsum ( )
return pd . Series ( y , index = t )
|
def get_filename ( self ) :
"""Return ` ` self . filename ` ` if set otherwise return the template basename with a ` ` . pdf ` ` extension .
: rtype : str"""
|
if self . filename is None :
name = splitext ( basename ( self . template_name ) ) [ 0 ]
return '{}.pdf' . format ( name )
return self . filename
|
def comparable ( self ) :
"""str : comparable representation of the path specification ."""
|
sub_comparable_string = 'location: {0:s}' . format ( self . location )
return self . _GetComparable ( sub_comparable_string = sub_comparable_string )
|
def get_setting ( self , key , converter = None , choices = None ) :
'''Returns the settings value for the provided key .
If converter is str , unicode , bool or int the settings value will be
returned converted to the provided type .
If choices is an instance of list or tuple its item at position of the
settings value be returned .
. . note : : It is suggested to always use unicode for text - settings
because else xbmc returns utf - 8 encoded strings .
: param key : The id of the setting defined in settings . xml .
: param converter : ( Optional ) Choices are str , unicode , bool and int .
: param converter : ( Optional ) Choices are instances of list or tuple .
Examples :
* ` ` plugin . get _ setting ( ' per _ page ' , int ) ` `
* ` ` plugin . get _ setting ( ' password ' , unicode ) ` `
* ` ` plugin . get _ setting ( ' force _ viewmode ' , bool ) ` `
* ` ` plugin . get _ setting ( ' content ' , choices = ( ' videos ' , ' movies ' ) ) ` `'''
|
# TODO : allow pickling of settings items ?
# TODO : STUB THIS OUT ON CLI
value = self . addon . getSetting ( id = key )
if converter is str :
return value
elif converter is unicode :
return value . decode ( 'utf-8' )
elif converter is bool :
return value == 'true'
elif converter is int :
return int ( value )
elif isinstance ( choices , ( list , tuple ) ) :
return choices [ int ( value ) ]
elif converter is None :
log . warning ( 'No converter provided, unicode should be used, ' 'but returning str value' )
return value
else :
raise TypeError ( 'Acceptable converters are str, unicode, bool and ' 'int. Acceptable choices are instances of list ' ' or tuple.' )
|
def _from_dict ( cls , _dict ) :
"""Initialize a TranslationModels object from a json dictionary ."""
|
args = { }
if 'models' in _dict :
args [ 'models' ] = [ TranslationModel . _from_dict ( x ) for x in ( _dict . get ( 'models' ) ) ]
else :
raise ValueError ( 'Required property \'models\' not present in TranslationModels JSON' )
return cls ( ** args )
|
def draw_points ( self , * points ) :
"""Draw multiple points on the current rendering target .
Args :
* points ( Point ) : The points to draw .
Raises :
SDLError : If an error is encountered ."""
|
point_array = ffi . new ( 'SDL_Point[]' , len ( points ) )
for i , p in enumerate ( points ) :
point_array [ i ] = p . _ptr [ 0 ]
check_int_err ( lib . SDL_RenderDrawPoints ( self . _ptr , point_array , len ( points ) ) )
|
def to_dict ( self , remove_nones = False ) :
"""Creates a dictionary representation of the object .
: param remove _ nones : Whether ` ` None ` ` values should be filtered out of the dictionary . Defaults to ` ` False ` ` .
: return : The dictionary representation ."""
|
if remove_nones :
return { k : v for k , v in self . to_dict ( ) . items ( ) if v is not None }
else :
raise NotImplementedError ( )
|
def show_prediction_labels_on_image ( img_path , predictions ) :
"""Shows the face recognition results visually .
: param img _ path : path to image to be recognized
: param predictions : results of the predict function
: return :"""
|
pil_image = Image . open ( img_path ) . convert ( "RGB" )
draw = ImageDraw . Draw ( pil_image )
for name , ( top , right , bottom , left ) in predictions : # Draw a box around the face using the Pillow module
draw . rectangle ( ( ( left , top ) , ( right , bottom ) ) , outline = ( 0 , 0 , 255 ) )
# There ' s a bug in Pillow where it blows up with non - UTF - 8 text
# when using the default bitmap font
name = name . encode ( "UTF-8" )
# Draw a label with a name below the face
text_width , text_height = draw . textsize ( name )
draw . rectangle ( ( ( left , bottom - text_height - 10 ) , ( right , bottom ) ) , fill = ( 0 , 0 , 255 ) , outline = ( 0 , 0 , 255 ) )
draw . text ( ( left + 6 , bottom - text_height - 5 ) , name , fill = ( 255 , 255 , 255 , 255 ) )
# Remove the drawing library from memory as per the Pillow docs
del draw
# Display the resulting image
pil_image . show ( )
|
def addTable ( D ) :
"""Add any table type to the given dataset . Use prompts to determine index locations and table type .
: param dict D : Metadata ( dataset )
: param dict dat : Metadata ( table )
: return dict D : Metadata ( dataset )"""
|
_swap = { "1" : "measurement" , "2" : "summary" , "3" : "ensemble" , "4" : "distribution" }
print ( "What type of table would you like to add?\n" "1: measurement\n" "2: summary\n" "3: ensemble (under development)\n" "4: distribution (under development)\n" "\n Note: if you want to add a whole model, use the addModel() function" )
_ans = input ( ">" )
if _ans in [ "3" , "4" ] :
print ( "I don't know how to do that yet." )
# if this is a summary or measurement , split the csv into each column
elif _ans in [ "1" , "2" ] : # read in a csv file . have the user point to it
print ( "Locate the CSV file with the values for this table: " )
_path , _files = browse_dialog_file ( )
_path = _confirm_file_path ( _files )
_values = read_csv_from_file ( _path )
_table = _build_table ( _values )
_placement = _prompt_placement ( D , _swap [ _ans ] )
D = _put_table ( D , _placement , _table )
else :
print ( "That's not a valid option" )
return D
|
def restore ( self ) :
"""Restore the application config files .
Algorithm :
if exists mackup / file
if exists home / file
are you sure ?
if sure
rm home / file
link mackup / file home / file
else
link mackup / file home / file"""
|
# For each file used by the application
for filename in self . files :
( home_filepath , mackup_filepath ) = self . getFilepaths ( filename )
# If the file exists and is not already pointing to the mackup file
# and the folder makes sense on the current platform ( Don ' t sync
# any subfolder of ~ / Library on GNU / Linux )
file_or_dir_exists = ( os . path . isfile ( mackup_filepath ) or os . path . isdir ( mackup_filepath ) )
pointing_to_mackup = ( os . path . islink ( home_filepath ) and os . path . exists ( mackup_filepath ) and os . path . samefile ( mackup_filepath , home_filepath ) )
supported = utils . can_file_be_synced_on_current_platform ( filename )
if file_or_dir_exists and not pointing_to_mackup and supported :
if self . verbose :
print ( "Restoring\n linking {}\n to {} ..." . format ( home_filepath , mackup_filepath ) )
else :
print ( "Restoring {} ..." . format ( filename ) )
if self . dry_run :
continue
# Check if there is already a file in the home folder
if os . path . exists ( home_filepath ) : # Name it right
if os . path . isfile ( home_filepath ) :
file_type = 'file'
elif os . path . isdir ( home_filepath ) :
file_type = 'folder'
elif os . path . islink ( home_filepath ) :
file_type = 'link'
else :
raise ValueError ( "Unsupported file: {}" . format ( mackup_filepath ) )
if utils . confirm ( "You already have a {} named {} in your" " home.\nDo you want to replace it with" " your backup ?" . format ( file_type , filename ) ) :
utils . delete ( home_filepath )
utils . link ( mackup_filepath , home_filepath )
else :
utils . link ( mackup_filepath , home_filepath )
elif self . verbose :
if os . path . exists ( home_filepath ) :
print ( "Doing nothing\n {}\n already linked by\n {}" . format ( mackup_filepath , home_filepath ) )
elif os . path . islink ( home_filepath ) :
print ( "Doing nothing\n {}\n " "is a broken link, you might want to fix it." . format ( home_filepath ) )
else :
print ( "Doing nothing\n {}\n does not exist" . format ( mackup_filepath ) )
|
def wrap_json_body ( func = None , * , preserve_raw_body = False ) :
"""A middleware that parses the body of json requests and
add it to the request under the ` body ` attribute ( replacing
the previous value ) . Can preserve the original value in
a new attribute ` raw _ body ` if you give preserve _ raw _ body = True ."""
|
if func is None :
return functools . partial ( wrap_json_body , preserve_raw_body = preserve_raw_body )
@ functools . wraps ( func )
def wrapper ( request , * args , ** kwargs ) :
ctype , pdict = parse_header ( request . headers . get ( 'Content-Type' , '' ) )
if preserve_raw_body :
request . raw_body = request . body
if ctype == "application/json" :
request . body = json . loads ( request . body . decode ( "utf-8" ) ) if request . body else None
return func ( request , * args , ** kwargs )
return wrapper
|
def extractDates ( inp , tz = None , now = None ) :
"""Extract semantic date information from an input string .
This is a convenience method which would only be used if
you ' d rather not initialize a DateService object .
Args :
inp ( str ) : The input string to be parsed .
tz : An optional Pytz timezone . All datetime objects returned will
be relative to the supplied timezone , or timezone - less if none
is supplied .
now : The time to which all returned datetime objects should be
relative . For example , if the text is " In 5 hours " , the
datetime returned will be now + datetime . timedelta ( hours = 5 ) .
Uses datetime . datetime . now ( ) if none is supplied .
Returns :
A list of datetime objects extracted from input ."""
|
service = DateService ( tz = tz , now = now )
return service . extractDates ( inp )
|
def handler ( self , event , context ) :
"""An AWS Lambda function which parses specific API Gateway input into a
WSGI request , feeds it to our WSGI app , procceses the response , and returns
that back to the API Gateway ."""
|
settings = self . settings
# If in DEBUG mode , log all raw incoming events .
if settings . DEBUG :
logger . debug ( 'Zappa Event: {}' . format ( event ) )
# Set any API Gateway defined Stage Variables
# as env vars
if event . get ( 'stageVariables' ) :
for key in event [ 'stageVariables' ] . keys ( ) :
os . environ [ str ( key ) ] = event [ 'stageVariables' ] [ key ]
# This is the result of a keep alive , recertify
# or scheduled event .
if event . get ( 'detail-type' ) == u'Scheduled Event' :
whole_function = event [ 'resources' ] [ 0 ] . split ( '/' ) [ - 1 ] . split ( '-' ) [ - 1 ]
# This is a scheduled function .
if '.' in whole_function :
app_function = self . import_module_and_get_function ( whole_function )
# Execute the function !
return self . run_function ( app_function , event , context )
# Else , let this execute as it were .
# This is a direct command invocation .
elif event . get ( 'command' , None ) :
whole_function = event [ 'command' ]
app_function = self . import_module_and_get_function ( whole_function )
result = self . run_function ( app_function , event , context )
print ( "Result of %s:" % whole_function )
print ( result )
return result
# This is a direct , raw python invocation .
# It ' s _ extremely _ important we don ' t allow this event source
# to be overridden by unsanitized , non - admin user input .
elif event . get ( 'raw_command' , None ) :
raw_command = event [ 'raw_command' ]
exec ( raw_command )
return
# This is a Django management command invocation .
elif event . get ( 'manage' , None ) :
from django . core import management
try : # Support both for tests
from zappa . ext . django_zappa import get_django_wsgi
except ImportError as e : # pragma : no cover
from django_zappa_app import get_django_wsgi
# Get the Django WSGI app from our extension
# We don ' t actually need the function ,
# but we do need to do all of the required setup for it .
app_function = get_django_wsgi ( self . settings . DJANGO_SETTINGS )
# Couldn ' t figure out how to get the value into stdout with StringIO . .
# Read the log for now . : [ ]
management . call_command ( * event [ 'manage' ] . split ( ' ' ) )
return { }
# This is an AWS - event triggered invocation .
elif event . get ( 'Records' , None ) :
records = event . get ( 'Records' )
result = None
whole_function = self . get_function_for_aws_event ( records [ 0 ] )
if whole_function :
app_function = self . import_module_and_get_function ( whole_function )
result = self . run_function ( app_function , event , context )
logger . debug ( result )
else :
logger . error ( "Cannot find a function to process the triggered event." )
return result
# this is an AWS - event triggered from Lex bot ' s intent
elif event . get ( 'bot' ) :
result = None
whole_function = self . get_function_from_bot_intent_trigger ( event )
if whole_function :
app_function = self . import_module_and_get_function ( whole_function )
result = self . run_function ( app_function , event , context )
logger . debug ( result )
else :
logger . error ( "Cannot find a function to process the triggered event." )
return result
# This is an API Gateway authorizer event
elif event . get ( 'type' ) == u'TOKEN' :
whole_function = self . settings . AUTHORIZER_FUNCTION
if whole_function :
app_function = self . import_module_and_get_function ( whole_function )
policy = self . run_function ( app_function , event , context )
return policy
else :
logger . error ( "Cannot find a function to process the authorization request." )
raise Exception ( 'Unauthorized' )
# This is an AWS Cognito Trigger Event
elif event . get ( 'triggerSource' , None ) :
triggerSource = event . get ( 'triggerSource' )
whole_function = self . get_function_for_cognito_trigger ( triggerSource )
result = event
if whole_function :
app_function = self . import_module_and_get_function ( whole_function )
result = self . run_function ( app_function , event , context )
logger . debug ( result )
else :
logger . error ( "Cannot find a function to handle cognito trigger {}" . format ( triggerSource ) )
return result
# Normal web app flow
try : # Timing
time_start = datetime . datetime . now ( )
# This is a normal HTTP request
if event . get ( 'httpMethod' , None ) :
script_name = ''
is_elb_context = False
headers = merge_headers ( event )
if event . get ( 'requestContext' , None ) and event [ 'requestContext' ] . get ( 'elb' , None ) : # Related : https : / / github . com / Miserlou / Zappa / issues / 1715
# inputs / outputs for lambda loadbalancer
# https : / / docs . aws . amazon . com / elasticloadbalancing / latest / application / lambda - functions . html
is_elb_context = True
# host is lower - case when forwarded from ELB
host = headers . get ( 'host' )
# TODO : pathParameters is a first - class citizen in apigateway but not available without
# some parsing work for ELB ( is this parameter used for anything ? )
event [ 'pathParameters' ] = ''
else :
if headers :
host = headers . get ( 'Host' )
else :
host = None
logger . debug ( 'host found: [{}]' . format ( host ) )
if host :
if 'amazonaws.com' in host :
logger . debug ( 'amazonaws found in host' )
# The path provided in th event doesn ' t include the
# stage , so we must tell Flask to include the API
# stage in the url it calculates . See https : / / github . com / Miserlou / Zappa / issues / 1014
script_name = '/' + settings . API_STAGE
else : # This is a test request sent from the AWS console
if settings . DOMAIN : # Assume the requests received will be on the specified
# domain . No special handling is required
pass
else : # Assume the requests received will be to the
# amazonaws . com endpoint , so tell Flask to include the
# API stage
script_name = '/' + settings . API_STAGE
base_path = getattr ( settings , 'BASE_PATH' , None )
# Create the environment for WSGI and handle the request
environ = create_wsgi_request ( event , script_name = script_name , base_path = base_path , trailing_slash = self . trailing_slash , binary_support = settings . BINARY_SUPPORT , context_header_mappings = settings . CONTEXT_HEADER_MAPPINGS )
# We are always on https on Lambda , so tell our wsgi app that .
environ [ 'HTTPS' ] = 'on'
environ [ 'wsgi.url_scheme' ] = 'https'
environ [ 'lambda.context' ] = context
environ [ 'lambda.event' ] = event
# Execute the application
with Response . from_app ( self . wsgi_app , environ ) as response : # This is the object we ' re going to return .
# Pack the WSGI response into our special dictionary .
zappa_returndict = dict ( )
# Issue # 1715 : ALB support . ALB responses must always include
# base64 encoding and status description
if is_elb_context :
zappa_returndict . setdefault ( 'isBase64Encoded' , False )
zappa_returndict . setdefault ( 'statusDescription' , response . status )
if response . data :
if settings . BINARY_SUPPORT :
if not response . mimetype . startswith ( "text/" ) or response . mimetype != "application/json" :
zappa_returndict [ 'body' ] = base64 . b64encode ( response . data ) . decode ( 'utf-8' )
zappa_returndict [ "isBase64Encoded" ] = True
else :
zappa_returndict [ 'body' ] = response . data
else :
zappa_returndict [ 'body' ] = response . get_data ( as_text = True )
zappa_returndict [ 'statusCode' ] = response . status_code
if 'headers' in event :
zappa_returndict [ 'headers' ] = { }
for key , value in response . headers :
zappa_returndict [ 'headers' ] [ key ] = value
if 'multiValueHeaders' in event :
zappa_returndict [ 'multiValueHeaders' ] = { }
for key , value in response . headers :
zappa_returndict [ 'multiValueHeaders' ] [ key ] = response . headers . getlist ( key )
# Calculate the total response time ,
# and log it in the Common Log format .
time_end = datetime . datetime . now ( )
delta = time_end - time_start
response_time_ms = delta . total_seconds ( ) * 1000
response . content = response . data
common_log ( environ , response , response_time = response_time_ms )
return zappa_returndict
except Exception as e : # pragma : no cover
# Print statements are visible in the logs either way
print ( e )
exc_info = sys . exc_info ( )
message = ( 'An uncaught exception happened while servicing this request. ' 'You can investigate this with the `zappa tail` command.' )
# If we didn ' t even build an app _ module , just raise .
if not settings . DJANGO_SETTINGS :
try :
self . app_module
except NameError as ne :
message = 'Failed to import module: {}' . format ( ne . message )
# Call exception handler for unhandled exceptions
exception_handler = self . settings . EXCEPTION_HANDLER
self . _process_exception ( exception_handler = exception_handler , event = event , context = context , exception = e )
# Return this unspecified exception as a 500 , using template that API Gateway expects .
content = collections . OrderedDict ( )
content [ 'statusCode' ] = 500
body = { 'message' : message }
if settings . DEBUG : # only include traceback if debug is on .
body [ 'traceback' ] = traceback . format_exception ( * exc_info )
# traceback as a list for readability .
content [ 'body' ] = json . dumps ( str ( body ) , sort_keys = True , indent = 4 )
return content
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.