signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def init_argparser_source_registry ( self , argparser , default = None , help = ( 'comma separated list of registries to use for gathering ' 'JavaScript sources from the given Python packages' ) ) :
"""For setting up the source registry flag ."""
|
argparser . add_argument ( '--source-registry' , default = default , dest = CALMJS_MODULE_REGISTRY_NAMES , action = StoreDelimitedList , metavar = '<registry>[,<registry>[...]]' , help = help , )
argparser . add_argument ( '--source-registries' , default = default , dest = CALMJS_MODULE_REGISTRY_NAMES , action = StoreDelimitedList , help = SUPPRESS , )
|
def from_floats ( red , green , blue ) :
"""Return a new Color object from red / green / blue values from 0.0 to 1.0."""
|
return Color ( int ( red * Color . MAX_VALUE ) , int ( green * Color . MAX_VALUE ) , int ( blue * Color . MAX_VALUE ) )
|
def selectgt ( table , field , value , complement = False ) :
"""Select rows where the given field is greater than the given value ."""
|
value = Comparable ( value )
return selectop ( table , field , value , operator . gt , complement = complement )
|
def load_method ( path , method , class_name = None , instance_creator = None ) :
'''Returns an instance of the method specified .
Args :
path : The path to the module contianing the method or function .
method : The name of the function .
class _ name : The name of the class if the funtion is a method .
instance _ creator : The name of the method to return the class instance .'''
|
# Load the module
module = load_module ( path )
if class_name : # If a class , Create an instance
class_type = getattr ( module , class_name )
if instance_creator :
ic_rest = instance_creator
nxt = module
while ( '.' in ic_rest ) :
nxt = getattr ( nxt , instance_creator . split ( '.' ) [ 0 ] )
ic_rest = '.' . join ( ic_rest . split ( '.' ) [ 1 : ] )
instance = getattr ( module , instance_creator ) ( )
else :
instance = class_type ( )
return getattr ( instance , method )
else :
return getattr ( module , method )
|
def pull_tar ( url , name , verify = False ) :
'''Execute a ` ` machinectl pull - raw ` ` to download a . tar container image ,
and add it to / var / lib / machines as a new container .
. . note : :
* * Requires systemd > = 219 * *
url
URL from which to download the container
name
Name for the new container
verify : False
Perform signature or checksum verification on the container . See the
` ` machinectl ( 1 ) ` ` man page ( section titled " Image Transfer Commands " )
for more information on requirements for image verification . To perform
signature verification , use ` ` verify = signature ` ` . For checksum
verification , use ` ` verify = checksum ` ` . By default , no verification will
be performed .
CLI Examples :
. . code - block : : bash
salt myminion nspawn . pull _ tar http : / / foo . domain . tld / containers / archlinux - 2015.02.01 . tar . gz arch2'''
|
return _pull_image ( 'tar' , url , name , verify = verify )
|
def drp_load_data ( package , data , confclass = None ) :
"""Load the DRPS from data ."""
|
drpdict = yaml . safe_load ( data )
ins = load_instrument ( package , drpdict , confclass = confclass )
if ins . version == 'undefined' :
pkg = importlib . import_module ( package )
ins . version = getattr ( pkg , '__version__' , 'undefined' )
return ins
|
def _parseDelayImportDirectory ( self , rva , size , magic = consts . PE32 ) :
"""Parses the delay imports directory .
@ type rva : int
@ param rva : The RVA where the delay imports directory starts .
@ type size : int
@ param size : The size of the delay imports directory .
@ type magic : int
@ param magic : ( Optional ) The type of PE . This value could be L { consts . PE32 } or L { consts . PE64 } .
@ rtype : str
@ return : The delay imports directory data ."""
|
return self . getDataAtRva ( rva , size )
|
def run_and_measure_payload ( quil_program , qubits , trials , random_seed ) :
"""REST payload for : py : func : ` ForestConnection . _ run _ and _ measure `"""
|
if not quil_program :
raise ValueError ( "You have attempted to run an empty program." " Please provide gates or measure instructions to your program." )
if not isinstance ( quil_program , Program ) :
raise TypeError ( "quil_program must be a Quil program object" )
qubits = validate_qubit_list ( qubits )
if not isinstance ( trials , integer_types ) :
raise TypeError ( "trials must be an integer" )
payload = { "type" : TYPE_MULTISHOT_MEASURE , "qubits" : list ( qubits ) , "trials" : trials , "compiled-quil" : quil_program . out ( ) }
if random_seed is not None :
payload [ 'rng-seed' ] = random_seed
return payload
|
def _checkColumns ( self , varBind , ** context ) :
"""Check the consistency of all columns .
Parameters
varBind : : py : class : ` ~ pysnmp . smi . rfc1902 . ObjectType ` object representing
new : py : class : ` RowStatus ` Managed Object Instance value being set
on table row
Other Parameters
\*\*context:
Query parameters :
* ` cbFun ` ( callable ) - user - supplied callable that is invoked to
pass the new value of the Managed Object Instance or an error .
Notes
The callback functions ( e . g . ` cbFun ` ) have the same signature as this
method where ` varBind ` contains the new Managed Object Instance value .
In case of an error , the ` error ` key in the ` context ` dict will contain
an exception object .
Assume that row consistency check has been triggered by RowStatus
columnar object transition into ` active ` state ."""
|
name , val = varBind
( debug . logger & debug . FLAG_INS and debug . logger ( '%s: _checkColumns(%s, %r)' % ( self , name , val ) ) )
cbFun = context [ 'cbFun' ]
# RowStatus ! = active
if val != 1 :
cbFun ( varBind , ** context )
return
count = [ len ( self . _vars ) ]
def _cbFun ( varBind , ** context ) :
count [ 0 ] -= 1
name , val = varBind
if count [ 0 ] >= 0 :
exc = context . get ( 'error' )
if exc or not val . hasValue ( ) :
count [ 0 ] = - 1
# ignore the rest of callbacks
exc = error . InconsistentValueError ( msg = 'Inconsistent column %s: %s' % ( name , exc ) )
cbFun ( varBind , ** dict ( context , error = exc ) )
return
if not count [ 0 ] :
cbFun ( varBind , ** context )
return
colLen = len ( self . name ) + 1
for colName , colObj in self . _vars . items ( ) :
instName = colName + name [ colLen : ]
colObj . readGet ( ( instName , None ) , ** dict ( context , cbFun = _cbFun ) )
debug . logger & debug . FLAG_INS and debug . logger ( '%s: _checkColumns: checking instance %s' % ( self , instName ) )
|
def bind ( self , container , attr_name ) :
"""Get an instance of this Dependency to bind to ` container ` with
` attr _ name ` ."""
|
instance = super ( DependencyProvider , self ) . bind ( container )
instance . attr_name = attr_name
self . attr_name = attr_name
return instance
|
def remove_lib ( lib_name ) :
"""remove library .
: param lib _ name : library name ( e . g . ' PS2Keyboard ' )
: rtype : None"""
|
targ_dlib = libraries_dir ( ) / lib_name
log . debug ( 'remove %s' , targ_dlib )
targ_dlib . rmtree ( )
|
def get ( self ) :
"""Constructs a TaskQueueCumulativeStatisticsContext
: returns : twilio . rest . taskrouter . v1 . workspace . task _ queue . task _ queue _ cumulative _ statistics . TaskQueueCumulativeStatisticsContext
: rtype : twilio . rest . taskrouter . v1 . workspace . task _ queue . task _ queue _ cumulative _ statistics . TaskQueueCumulativeStatisticsContext"""
|
return TaskQueueCumulativeStatisticsContext ( self . _version , workspace_sid = self . _solution [ 'workspace_sid' ] , task_queue_sid = self . _solution [ 'task_queue_sid' ] , )
|
def _is_impossible_by_count ( self , state ) :
"""Disallow any board that has insufficient tile count to solve ."""
|
# count all the tile types and name them for readability
counts = { tile_type : 0 for tile_type in base . Tile . _all_types }
standard_wildcard_type = '2'
for p , tile in state . board . positions_with_tile ( ) : # count all wildcards as one value
tile_type = tile . _type
try :
int ( tile_type )
counts [ standard_wildcard_type ] += 1
except ValueError :
counts [ tile_type ] += 1
skullbomb = counts [ '*' ]
skull = counts [ 's' ]
wildcard = counts [ standard_wildcard_type ]
red = counts [ 'r' ]
green = counts [ 'g' ]
blue = counts [ 'b' ]
yellow = counts [ 'y' ]
exp = counts [ 'x' ]
money = counts [ 'm' ]
# always allow skullbomb with enough skulls
if skullbomb and skullbomb + skull >= 3 :
return False
# always allow wildcard with enough of one color
if wildcard :
if any ( wildcard + color >= 3 for color in ( red , green , blue , yellow ) ) :
return False
# disallow simple cases since special cases didn ' t occur
if any ( tile and tile < 3 for tile in ( red , green , blue , yellow , exp , money , skull ) ) :
return True
# allow the state if counts seem ok
return False
|
def _return_tag_task ( self , task ) :
"""Runs both SFW and Tags tasks"""
|
if self . security is None :
raise Exception ( 'Tags require security' )
tasks = [ task ]
transform_url = get_transform_url ( tasks , handle = self . handle , security = self . security , apikey = self . apikey )
response = make_call ( CDN_URL , 'get' , handle = self . handle , security = self . security , transform_url = transform_url )
return response . json ( )
|
def _process_uniprot_ids ( self , limit = None ) :
"""This method processes the mappings from ZFIN gene IDs to UniProtKB IDs .
Triples created :
< zfin _ gene _ id > a class
< zfin _ gene _ id > rdfs : label gene _ symbol
< uniprot _ id > is an Individual
< uniprot _ id > has type < polypeptide >
< zfin _ gene _ id > has _ gene _ product < uniprot _ id >
: param limit :
: return :"""
|
LOG . info ( "Processing UniProt IDs" )
if self . test_mode :
graph = self . testgraph
else :
graph = self . graph
line_counter = 0
model = Model ( graph )
geno = Genotype ( graph )
raw = '/' . join ( ( self . rawdir , self . files [ 'uniprot' ] [ 'file' ] ) )
with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile :
filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' )
for row in filereader :
line_counter += 1
( gene_id , gene_so_id , gene_symbol , uniprot_id # , empty
) = row
if self . test_mode and gene_id not in self . test_ids [ 'gene' ] :
continue
gene_id = 'ZFIN:' + gene_id . strip ( )
uniprot_id = 'UniProtKB:' + uniprot_id . strip ( )
geno . addGene ( gene_id , gene_symbol )
# TODO : Abstract to one of the model utilities
model . addIndividualToGraph ( uniprot_id , None , self . globaltt [ 'polypeptide' ] )
graph . addTriple ( gene_id , self . globaltt [ 'has gene product' ] , uniprot_id )
if not self . test_mode and limit is not None and line_counter > limit :
break
LOG . info ( "Done with UniProt IDs" )
return
|
def _read_lines ( filepath ) :
"""Read a req file to a list to support nested requirement files ."""
|
with open ( filepath , 'rt' , encoding = 'utf8' ) as fh :
for line in fh :
line = line . strip ( )
if line . startswith ( "-r" ) :
logger . debug ( "Reading deps from nested requirement file: %s" , line )
try :
nested_filename = line . split ( ) [ 1 ]
except IndexError :
logger . warning ( "Invalid format to indicate a nested requirements file: '%r'" , line )
else :
nested_filepath = os . path . join ( os . path . dirname ( filepath ) , nested_filename )
yield from _read_lines ( nested_filepath )
else :
yield line
|
def gen_cmake_command ( config ) :
"""Generate CMake command ."""
|
from autocmake . extract import extract_list
s = [ ]
s . append ( "\n\ndef gen_cmake_command(options, arguments):" )
s . append ( ' """' )
s . append ( " Generate CMake command based on options and arguments." )
s . append ( ' """' )
s . append ( " command = []" )
for env in config [ 'export' ] :
s . append ( ' command.append({0})' . format ( env ) )
s . append ( " command.append(arguments['--cmake-executable'])" )
for definition in config [ 'define' ] :
s . append ( ' command.append({0})' . format ( definition ) )
s . append ( " command.append('-DCMAKE_BUILD_TYPE={0}'.format(arguments['--type']))" )
s . append ( " command.append('-G\"{0}\"'.format(arguments['--generator']))" )
s . append ( " if arguments['--cmake-options'] != \"''\":" )
s . append ( " command.append(arguments['--cmake-options'])" )
s . append ( " if arguments['--prefix']:" )
s . append ( " command.append('-DCMAKE_INSTALL_PREFIX=\"{0}\"'.format(arguments['--prefix']))" )
s . append ( "\n return ' '.join(command)" )
return '\n' . join ( s )
|
def _check_stream_timeout ( started , timeout ) :
"""Check if the timeout has been reached and raise a ` StopIteration ` if so ."""
|
if timeout :
elapsed = datetime . datetime . utcnow ( ) - started
if elapsed . seconds > timeout :
raise StopIteration
|
def load_to_ufos ( file_or_path , include_instances = False , family_name = None , propagate_anchors = True ) :
"""Load an unpacked . glyphs object to UFO objects ."""
|
if hasattr ( file_or_path , "read" ) :
font = load ( file_or_path )
else :
with open ( file_or_path , "r" , encoding = "utf-8" ) as ifile :
font = load ( ifile )
logger . info ( "Loading to UFOs" )
return to_ufos ( font , include_instances = include_instances , family_name = family_name , propagate_anchors = propagate_anchors , )
|
def save_configuration ( self ) :
"""Save the PyPI access configuration . You must have set ` ` username ` ` and
` ` password ` ` attributes before calling this method .
Again , distutils is used to do the actual work ."""
|
self . check_credentials ( )
# get distutils to do the work
c = self . _get_pypirc_command ( )
c . _store_pypirc ( self . username , self . password )
|
def gdpool ( name , start , room ) :
"""Return the d . p . value of a kernel variable from the kernel pool .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / gdpool _ c . html
: param name : Name of the variable whose value is to be returned .
: type name : str
: param start : Which component to start retrieving for name .
: type start : int
: param room : The largest number of values to return .
: type room : int
: return : Values associated with name .
: rtype : list of float"""
|
name = stypes . stringToCharP ( name )
start = ctypes . c_int ( start )
values = stypes . emptyDoubleVector ( room )
room = ctypes . c_int ( room )
n = ctypes . c_int ( )
found = ctypes . c_int ( )
libspice . gdpool_c ( name , start , room , ctypes . byref ( n ) , ctypes . cast ( values , ctypes . POINTER ( ctypes . c_double ) ) , ctypes . byref ( found ) )
return stypes . cVectorToPython ( values ) [ 0 : n . value ] , bool ( found . value )
|
def repr_part ( self ) :
"""Return a string usable in a space ' s ` ` _ _ repr _ _ ` ` method ."""
|
optargs = [ ( 'norm' , self . norm , '' ) , ( 'exponent' , self . exponent , 2.0 ) ]
return signature_string ( [ ] , optargs , mod = [ [ ] , [ '!r' , ':.4' ] ] )
|
def rename ( old_path , new_path , edit_folders = True ) :
"""Rename files or folders
: param old _ path : name of file or folder to rename
: param new _ path : name of new file or folder
: param edit _ folders : flag to allow recursive renaming of folders . Default is ` ` True ` `
: type old _ path : str
: type new _ path : str
: type edit _ folders : bool"""
|
if edit_folders :
os . renames ( old_path , new_path )
else :
os . rename ( old_path , new_path )
|
def from_geojson ( cls , filename ) :
"""Load vector from geojson ."""
|
with open ( filename ) as fd :
geometry = json . load ( fd )
if 'type' not in geometry :
raise TypeError ( "%s is not a valid geojson." % ( filename , ) )
return cls ( to_shape ( geometry ) , WGS84_CRS )
|
def exists ( self , qname ) :
'''Check to see if a Queue exists .'''
|
try : # First if not exists ( ) - > exit
if self . conn . queue_exists ( qname ) :
return True
return False
except pyrax . exceptions as err_msg :
log . error ( 'RackSpace API got some problems during ' 'existing queue check: %s' , err_msg )
return False
|
def macro_create ( self , data , ** kwargs ) :
"https : / / developer . zendesk . com / rest _ api / docs / core / macros # create - macro"
|
api_path = "/api/v2/macros.json"
return self . call ( api_path , method = "POST" , data = data , ** kwargs )
|
def handle_delete_scan_command ( self , scan_et ) :
"""Handles < delete _ scan > command .
@ return : Response string for < delete _ scan > command ."""
|
scan_id = scan_et . attrib . get ( 'scan_id' )
if scan_id is None :
return simple_response_str ( 'delete_scan' , 404 , 'No scan_id attribute' )
if not self . scan_exists ( scan_id ) :
text = "Failed to find scan '{0}'" . format ( scan_id )
return simple_response_str ( 'delete_scan' , 404 , text )
self . check_scan_process ( scan_id )
if self . delete_scan ( scan_id ) :
return simple_response_str ( 'delete_scan' , 200 , 'OK' )
raise OSPDError ( 'Scan in progress' , 'delete_scan' )
|
def _config_sortable ( self , sortable ) :
"""Configure a new sortable state"""
|
for col in self [ "columns" ] :
command = ( lambda c = col : self . _sort_column ( c , True ) ) if sortable else ""
self . heading ( col , command = command )
self . _sortable = sortable
|
def inet_pton ( af , addr ) :
"""Convert an IP address from text representation into binary form ."""
|
# Will replace Net / Net6 objects
addr = plain_str ( addr )
# Use inet _ pton if available
try :
return socket . inet_pton ( af , addr )
except AttributeError :
try :
return _INET_PTON [ af ] ( addr )
except KeyError :
raise socket . error ( "Address family not supported by protocol" )
|
def update ( self , name ) :
"""Query a name for the relative index value to be added into the
source map name field ( optional 5th element ) ."""
|
if name is None :
return
if name not in self . _names : # add the name if it isn ' t already tracked
self . _names [ name ] = len ( self . _names )
result = self . _names [ name ] - self . _current
self . _current = self . _names [ name ]
return result
|
def insert ( self , index , item ) :
"""See list . insert ."""
|
super ( ObservableList , self ) . insert ( index , item )
length = len ( self )
if index >= length :
index = length - 1
elif index < 0 :
index += length - 1
if index < 0 :
index = 0
self . _notify_add_at ( index )
|
def _intermediary_to_markdown ( tables , relationships ) :
"""Returns the er markup source in a string ."""
|
t = '\n' . join ( t . to_markdown ( ) for t in tables )
r = '\n' . join ( r . to_markdown ( ) for r in relationships )
return '{}\n{}' . format ( t , r )
|
def encode ( self , s ) :
"""Encodes text into a list of integers ."""
|
s = tf . compat . as_text ( s )
tokens = self . _tokenizer . tokenize ( s )
tokens = _prepare_tokens_for_encode ( tokens )
ids = [ ]
for token in tokens :
ids . extend ( self . _token_to_ids ( token ) )
return text_encoder . pad_incr ( ids )
|
def check_share_permission ( self , user_id ) :
"""Check whether this user can write this project"""
|
if _is_admin ( user_id ) :
return
if int ( self . created_by ) == int ( user_id ) :
return
for owner in self . owners :
if owner . user_id == int ( user_id ) :
if owner . view == 'Y' and owner . share == 'Y' :
break
else :
raise PermissionError ( "Permission denied. User %s does not have share" " access on network %s" % ( user_id , self . id ) )
|
def generate ( basename , xml_list ) :
'''generate complete MAVLink Objective - C implemenation'''
|
generate_shared ( basename , xml_list )
for xml in xml_list :
generate_message_definitions ( basename , xml )
|
def row ( self ) :
"""Get row
: return : str"""
|
if not self . __history_mode :
return self . __current_row
else :
return self . history ( ) . entry ( self . history ( ) . position ( ) )
|
def put ( filename , target = None , serial = None ) :
"""Puts a referenced file on the LOCAL file system onto the
file system on the BBC micro : bit .
If no serial object is supplied , microfs will attempt to detect the
connection itself .
Returns True for success or raises an IOError if there ' s a problem ."""
|
if not os . path . isfile ( filename ) :
raise IOError ( 'No such file.' )
with open ( filename , 'rb' ) as local :
content = local . read ( )
filename = os . path . basename ( filename )
if target is None :
target = filename
commands = [ "fd = open('{}', 'wb')" . format ( target ) , "f = fd.write" , ]
while content :
line = content [ : 64 ]
if PY2 :
commands . append ( 'f(b' + repr ( line ) + ')' )
else :
commands . append ( 'f(' + repr ( line ) + ')' )
content = content [ 64 : ]
commands . append ( 'fd.close()' )
out , err = execute ( commands , serial )
if err :
raise IOError ( clean_error ( err ) )
return True
|
def _add_block_number_to_hash_lookup ( db : BaseDB , header : BlockHeader ) -> None :
"""Sets a record in the database to allow looking up this header by its
block number ."""
|
block_number_to_hash_key = SchemaV1 . make_block_number_to_hash_lookup_key ( header . block_number )
db . set ( block_number_to_hash_key , rlp . encode ( header . hash , sedes = rlp . sedes . binary ) , )
|
def _set_cinder_config ( cls , host , locks_path , cinder_config_params ) :
"""Setup the parser with all the known Cinder configuration ."""
|
cfg . CONF . set_default ( 'state_path' , os . getcwd ( ) )
cfg . CONF . set_default ( 'lock_path' , '$state_path' , 'oslo_concurrency' )
cls . _parser = six . moves . configparser . SafeConfigParser ( )
cls . _parser . set ( 'DEFAULT' , 'enabled_backends' , '' )
if locks_path :
cls . _parser . add_section ( 'oslo_concurrency' )
cls . _parser . set ( 'oslo_concurrency' , 'lock_path' , locks_path )
cls . _parser . add_section ( 'coordination' )
cls . _parser . set ( 'coordination' , 'backend_url' , 'file://' + locks_path )
if host :
cls . _parser . set ( 'DEFAULT' , 'host' , host )
# All other configuration options go into the DEFAULT section
cls . __set_parser_kv ( cinder_config_params , 'DEFAULT' )
# We replace the OSLO ' s default parser to read from a StringIO instead
# of reading from a file .
cls . _config_string_io = six . moves . StringIO ( )
cfg . ConfigParser . parse = six . create_unbound_method ( cls . _config_parse , cfg . ConfigParser )
# Replace command line arg parser so we ignore caller ' s args
cfg . _CachedArgumentParser . parse_args = lambda * a , ** kw : None
# Update the configuration with the options we have configured
cfg . CONF ( project = 'cinder' , version = cinderlib . __version__ , default_config_files = [ 'in_memory_file' ] )
cls . _update_cinder_config ( )
|
def observations_np ( self , boundary = 20 ) :
"""Pads the observations in all the trajectories and returns them .
Args :
boundary : integer , Observations will be padded to ( n * boundary ) + 1 where
n is an integer .
Returns :
a tuple ( padded _ observations , time _ steps ) , with shapes :
padded _ observations : ( self . batch _ size , n * boundary + 1 ) + OBS
time _ steps : integer list of length = self . batch _ size"""
|
list_observations_np_ts = [ t . observations_np for t in self . trajectories ]
# Every element in ` list _ observations _ np _ ts ` is shaped ( t , ) + OBS
OBS = list_observations_np_ts [ 0 ] . shape [ 1 : ]
# pylint : disable = invalid - name
num_time_steps = [ t . num_time_steps for t in self . trajectories ]
t_max = max ( num_time_steps )
# t _ max is rounded to the next multiple of ` boundary `
boundary = int ( boundary )
bucket_length = boundary * int ( np . ceil ( float ( t_max ) / boundary ) )
def padding_config ( obs ) : # We ' re padding the first axis only , since that is the time - step .
num_to_pad = bucket_length + 1 - obs . shape [ 0 ]
return [ ( 0 , num_to_pad ) ] + [ ( 0 , 0 ) ] * len ( OBS )
return np . stack ( [ np . pad ( obs , padding_config ( obs ) , "constant" ) for obs in list_observations_np_ts ] ) , num_time_steps
|
def sendATS_same ( self , CorpNum , TemplateCode , Sender , Content , AltContent , AltSendType , SndDT , KakaoMessages , UserID = None , RequestNum = None , ButtonList = None ) :
"""알림톡 대량 전송
: param CorpNum : 팝빌회원 사업자번호
: param TemplateCode : 템플릿코드
: param Sender : 발신번호
: param Content : [ 동보 ] 알림톡 내용
: param AltContent : [ 동보 ] 대체문자 내용
: param AltSendType : 대체문자 유형 [ 공백 - 미전송 , C - 알림톡내용 , A - 대체문자내용 ]
: param SndDT : 예약일시 [ 작성형식 : yyyyMMddHHmmss ]
: param KakaoMessages : 알림톡 내용 ( 배열 )
: param UserID : 팝빌회원 아이디
: param RequestNum : 요청번호
: return : receiptNum ( 접수번호 )"""
|
if TemplateCode is None or TemplateCode == '' :
raise PopbillException ( - 99999999 , "알림톡 템플릿코드가 입력되지 않았습니다." )
if Sender is None or Sender == '' :
raise PopbillException ( - 99999999 , "발신번호가 입력되지 않았습니다." )
req = { }
if TemplateCode is not None or TemplateCode != '' :
req [ 'templateCode' ] = TemplateCode
if Sender is not None or Sender != '' :
req [ 'snd' ] = Sender
if Content is not None or Content != '' :
req [ 'content' ] = Content
if AltContent is not None or AltContent != '' :
req [ 'altContent' ] = AltContent
if AltSendType is not None or AltSendType != '' :
req [ 'altSendType' ] = AltSendType
if SndDT is not None or SndDT != '' :
req [ 'sndDT' ] = SndDT
if KakaoMessages is not None or KakaoMessages != '' :
req [ 'msgs' ] = KakaoMessages
if ButtonList is not None :
req [ 'btns' ] = ButtonList
if RequestNum is not None or RequestNum != '' :
req [ 'requestnum' ] = RequestNum
postData = self . _stringtify ( req )
result = self . _httppost ( '/ATS' , postData , CorpNum , UserID )
return result . receiptNum
|
def open ( cls , * args , ** kwargs ) :
"""just something to make it easier to quickly open a connection , do something
and then close it"""
|
c = cls ( * args , ** kwargs )
c . connect ( )
try :
yield c
finally :
c . close ( )
|
def rot1 ( theta ) :
"""Args :
theta ( float ) : Angle in radians
Return :
Rotation matrix of angle theta around the X - axis"""
|
return np . array ( [ [ 1 , 0 , 0 ] , [ 0 , np . cos ( theta ) , np . sin ( theta ) ] , [ 0 , - np . sin ( theta ) , np . cos ( theta ) ] ] )
|
def logout ( self , request ) :
"Logs out user and redirects them to Nexus home"
|
from django . contrib . auth import logout
logout ( request )
return HttpResponseRedirect ( reverse ( 'nexus:index' , current_app = self . name ) )
|
def get_process_status ( self , pid = None ) :
'''get _ process _ status ( self , pid = None )
Get current status of a process
: Parameters :
* * pid * ( ` string ` ) - - Identifier of an existing process'''
|
pid = self . _get_pid ( pid )
return self . _call_rest_api ( 'get' , '/processes/' + pid + '/status' , error = 'Failed to fetch process status' )
|
def deserialize ( self , value , ** kwargs ) :
"""Deserialize every item of the list ."""
|
if self . allow_scalar and not isinstance ( value , ( list , tuple ) ) :
value = [ value ]
value = super ( List , self ) . deserialize ( value )
result = [ ]
errors = [ ]
for index , val in enumerate ( value ) :
try :
result . append ( self . item_type . deserialize ( val , ** kwargs ) )
except ValidationError as exc :
exc . index = index
errors . append ( exc )
if errors :
raise ValidationError ( errors )
return result
|
def wait_command ( self , start_func , turns = 1 , end_func = None ) :
"""Call ` ` start _ func ` ` , wait ` ` turns ` ` , and then call ` ` end _ func ` ` if provided
Disables input for the duration .
: param start _ func : function to call just after disabling input
: param turns : number of turns to wait
: param end _ func : function to call just before re - enabling input
: return : ` ` None ` `"""
|
self . disable_input ( )
start_func ( )
self . app . wait_turns ( turns , cb = partial ( self . enable_input , end_func ) )
|
def plot_cv ( self , tmin , tmax , ntemp , ylim = None , ** kwargs ) :
"""Plots the constant volume specific heat C _ v in a temperature range .
Args :
tmin : minimum temperature
tmax : maximum temperature
ntemp : number of steps
ylim : tuple specifying the y - axis limits .
kwargs : kwargs passed to the matplotlib function ' plot ' .
Returns :
matplotlib figure"""
|
temperatures = np . linspace ( tmin , tmax , ntemp )
if self . structure :
ylabel = r"$C_v$ (J/K/mol)"
else :
ylabel = r"$C_v$ (J/K/mol-c)"
fig = self . _plot_thermo ( self . dos . cv , temperatures , ylabel = ylabel , ylim = ylim , ** kwargs )
return fig
|
def _iter_module_subclasses ( package , module_name , base_cls ) :
"""inspect all modules in this directory for subclasses of inherit from
` ` base _ cls ` ` . inpiration from http : / / stackoverflow . com / q / 1796180/564709"""
|
module = importlib . import_module ( '.' + module_name , package )
for name , obj in inspect . getmembers ( module ) :
if inspect . isclass ( obj ) and issubclass ( obj , base_cls ) :
yield obj
|
async def spawn_slaves ( self , slave_addrs , slave_env_cls , slave_mgr_cls , slave_kwargs = None ) :
"""Spawn slave environments .
: param slave _ addrs :
List of ( HOST , PORT ) addresses for the slave - environments .
: param slave _ env _ cls : Class for the slave environments .
: param slave _ kwargs :
If not None , must be a list of the same size as * addrs * . Each item
in the list containing parameter values for one slave environment .
: param slave _ mgr _ cls :
Class of the slave environment managers ."""
|
pool , r = spawn_containers ( slave_addrs , env_cls = slave_env_cls , env_params = slave_kwargs , mgr_cls = slave_mgr_cls )
self . _pool = pool
self . _r = r
self . _manager_addrs = [ "{}{}" . format ( _get_base_url ( a ) , 0 ) for a in slave_addrs ]
|
def _check_versionlock ( ) :
'''Ensure that the appropriate versionlock plugin is present'''
|
if _yum ( ) == 'dnf' :
if int ( __grains__ . get ( 'osmajorrelease' ) ) >= 26 :
if six . PY3 :
vl_plugin = 'python3-dnf-plugin-versionlock'
else :
vl_plugin = 'python2-dnf-plugin-versionlock'
else :
if six . PY3 :
vl_plugin = 'python3-dnf-plugins-extras-versionlock'
else :
vl_plugin = 'python-dnf-plugins-extras-versionlock'
else :
vl_plugin = 'yum-versionlock' if __grains__ . get ( 'osmajorrelease' ) == '5' else 'yum-plugin-versionlock'
if vl_plugin not in list_pkgs ( ) :
raise SaltInvocationError ( 'Cannot proceed, {0} is not installed.' . format ( vl_plugin ) )
|
def is_edit_allowed ( self ) :
"""Check if edit is allowed"""
|
checkPermission = self . context . portal_membership . checkPermission
return checkPermission ( FieldEditResultsInterpretation , self . context )
|
def evaluate_bound ( distribution , x_data , parameters = None , cache = None , ) :
"""Evaluate lower and upper bounds .
Args :
distribution ( Dist ) :
Distribution to evaluate .
x _ data ( numpy . ndarray ) :
Locations for where evaluate bounds at . Relevant in the case of
multivariate distributions where the bounds are affected by the
output of other distributions .
parameters ( : py : data : typing . Any ) :
Collection of parameters to override the default ones in the
distribution .
cache ( : py : data : typing . Any ) :
A collection of previous calculations in case the same distribution
turns up on more than one occasion .
Returns :
The lower and upper bounds of ` ` distribution ` ` at location
` ` x _ data ` ` using parameters ` ` parameters ` ` ."""
|
assert len ( x_data ) == len ( distribution )
assert len ( x_data . shape ) == 2
cache = cache if cache is not None else { }
parameters = load_parameters ( distribution , "_bnd" , parameters = parameters , cache = cache )
out = numpy . zeros ( ( 2 , ) + x_data . shape )
lower , upper = distribution . _bnd ( x_data . copy ( ) , ** parameters )
out . T [ : , : , 0 ] = numpy . asfarray ( lower ) . T
out . T [ : , : , 1 ] = numpy . asfarray ( upper ) . T
cache [ distribution ] = out
return out
|
def fmt_type ( data_type , inside_namespace = None ) :
"""Returns a TypeScript type annotation for a data type .
May contain a union of enumerated subtypes .
inside _ namespace should be set to the namespace that the type reference
occurs in , or None if this parameter is not relevant ."""
|
if is_struct_type ( data_type ) and data_type . has_enumerated_subtypes ( ) :
possible_types = [ ]
possible_subtypes = data_type . get_all_subtypes_with_tags ( )
for _ , subtype in possible_subtypes :
possible_types . append ( fmt_polymorphic_type_reference ( subtype , inside_namespace ) )
if data_type . is_catch_all ( ) :
possible_types . append ( fmt_polymorphic_type_reference ( data_type , inside_namespace ) )
return fmt_union ( possible_types )
else :
return fmt_type_name ( data_type , inside_namespace )
|
def files ( self ) :
"""File uploads parsed from an ` url - encoded ` or ` multipart / form - data `
encoded POST or PUT request body . The values are instances of
: class : ` cgi . FieldStorage ` . The most important attributes are :
filename
The filename , if specified ; otherwise None ; this is the client
side filename , * not * the file name on which it is stored ( that ' s
a temporary file you don ' t deal with )
file
The file ( - like ) object from which you can read the data .
value
The value as a * string * ; for file uploads , this transparently
reads the file every time you request the value . Do not do this
on big files ."""
|
files = FormsDict ( )
for name , item in self . POST . allitems ( ) :
if hasattr ( item , 'filename' ) :
files [ name ] = item
return files
|
def force_recalculate ( obj ) :
'''Recalculate all ImageCountField and UserImageCountField fields
in object ` ` obj ` ` .
This should be used if auto - updating of these fields was disabled for
some reason .
To disable auto - update when saving AttachedImage instance
( for example when you need to save a lot of images and want to
recalculate denormalised values only after all images are saved ) use
this pattern : :
image = AttachedImage ( . . . )
image . send _ signal = False
image . save ( )'''
|
class Stub ( object ) :
content_object = obj
img = Stub ( )
image_saved . send ( sender = obj . __class__ , instance = img )
|
def weighted_mse ( self , dataset , n_jobs = - 1 ) :
"""Returns the weighted MSE over all logical networks with respect to the given : class : ` caspo . core . dataset . Dataset ` object instance .
For each logical network the weight corresponds to the number of networks having the same behavior .
Parameters
dataset : : class : ` caspo . core . dataset . Dataset `
Dataset to compute MSE
n _ jobs : int
Number of jobs to run in parallel . Default to - 1 ( all cores available )
Returns
float
Weighted MSE"""
|
predictions = np . zeros ( ( len ( self ) , len ( dataset . clampings ) , len ( dataset . setup . readouts ) ) )
predictions [ : , : , : ] = Parallel ( n_jobs = n_jobs ) ( delayed ( __parallel_predictions__ ) ( n , dataset . clampings , dataset . setup . readouts ) for n in self )
for i , _ in enumerate ( self ) :
predictions [ i , : , : ] *= self . __networks [ i ]
readouts = dataset . readouts . values
pos = ~ np . isnan ( readouts )
return mean_squared_error ( readouts [ pos ] , ( np . sum ( predictions , axis = 0 ) / np . sum ( self . __networks ) ) [ pos ] )
|
def create_group ( self , ** kwargs ) :
"""Create a group
: calls : ` POST / api / v1 / groups / < https : / / canvas . instructure . com / doc / api / groups . html # method . groups . create > ` _
: rtype : : class : ` canvasapi . group . Group `"""
|
response = self . __requester . request ( 'POST' , 'groups' , _kwargs = combine_kwargs ( ** kwargs ) )
return Group ( self . __requester , response . json ( ) )
|
def close ( self ) :
"""Persist a checksum and close the file ."""
|
fname = os . path . basename ( self . _path )
checksum_persister = _get_checksum_persister ( self . _path )
with contextlib . closing ( checksum_persister ) :
checksum_persister [ fname ] = self . _hasher . hexdigest ( )
self . _close ( )
|
def header_fields_from_table ( spec_table , keys , dtype_map ) :
"""Convert the specification table to a standardized format .
The specification table is assumed to be in
` reST grid table format
< http : / / docutils . sourceforge . net / docs / user / rst / quickref . html # tables > ` _ .
It must have the following 5 columns :
1 . ID : an arbitrary unique identifier , e . g . , a number .
2 . Byte range : Bytes in the file covered by this field , given
as number range ( e . g . ` ` 15-24 ` ` ) . The byte values start at
1 ( not 0 ) , and the upper value of the range is included .
3 . Data type : Field values are stored in this format . For multiple
entries , a shape can be specified immediately after the type
specifier , e . g . , ` ` Float32(4 ) ` ` or ` ` Int32(2,2 ) ` ` . It is also
possible to give an incomplete shape , e . g . , ` ` Int32(2 ) ` ` with
a 24 - byte field . In this case , the shape is completed to
` ` ( 3 , 2 ) ` ` automatically . By default , the one - dimensional shape
is determined from the data type and the byte range .
The data type must map to a NumPy data type ( ` ` dtype _ map ` ` ) .
4 . Name : The name of the field as used later ( in lowercase ) for
identification .
5 . Description : An explanation of the field .
The converted specification is a tuple of dictionaries , each
corresponding to one ( multi - ) row ( = field ) of the original table . Each
field has key - value pairs for the following keys :
| Name | Data type | Description |
| ' name ' | string | Name of the element |
| ' offset ' | int | Offset of the current element in bytes |
| ' size ' | int | Size of the current element in bytes |
| ' dtype ' | type | Data type of the current element as |
| | | defined by Numpy |
| ' description ' | string | Description of the element ( optional ) |
| ' dshape ' | tuple | For multi - elements : number of elements per |
| | | dimension . Optional for single elements . |
Parameters
spec _ table : str
Specification given as a string containing a definition table .
keys : dict
Dictionary with the following entries for the column headers in
the specification table :
- ` ` ' id ' ` `
- ` ` ' byte _ range ' ` `
- ` ` ' dtype ' ` `
- ` ` ' name ' ` `
- ` ` ' description ' ` `
dtype _ map : dict
Mapping from the data type specifiers in the specification table
to NumPy data types .
Returns
standardized _ fields : tuple of dicts
The standardized fields according to the above table , one for
each ( multi - ) row ."""
|
field_list = _fields_from_table ( spec_table , id_key = keys [ 'id' ] )
# Parse the fields and represent them in a unfied way
conv_list = [ ]
for field in field_list :
new_field = { }
# Name and description : lowercase name , copy description
new_field [ 'name' ] = field [ keys [ 'name' ] ] . lower ( )
new_field [ 'description' ] = field [ keys [ 'description' ] ]
# Get byte range and set start
byte_range = field [ keys [ 'byte_range' ] ] . split ( '-' )
offset_bytes = int ( byte_range [ 0 ] ) - 1
end_bytes = int ( byte_range [ - 1 ] ) - 1
size_bytes = end_bytes - offset_bytes + 1
new_field [ 'offset' ] = offset_bytes
new_field [ 'size' ] = size_bytes
# Data type : transform to Numpy format and get shape
dtype_shape = field [ keys [ 'dtype' ] ] . split ( '(' )
dtype = dtype_map [ dtype_shape [ 0 ] ]
new_field [ 'dtype' ] = dtype
if len ( dtype_shape ) == 2 : # Shape was given in data type specification
# Re - attach left parenthesis that was removed in the split
dshape = np . atleast_1d ( eval ( '(' + dtype_shape [ - 1 ] ) )
size_bytes_from_shape = np . prod ( dshape ) * dtype . itemsize
if size_bytes_from_shape >= size_bytes :
raise ValueError ( "entry '{}': field size {} from shape {} and " "dtype.itemsize {} larger than field size {} from spec" "" . format ( field [ keys [ 'name' ] ] , size_bytes_from_shape , dshape , dtype . itemsize , size_bytes ) )
# Try to complete the given shape
if size_bytes % size_bytes_from_shape :
raise ValueError ( "entry '{}': shape {} cannot be completed consistently " "using field size {} and `dtype.itemsize` {}" "" . format ( field [ keys [ 'name' ] ] , dshape , size_bytes , dtype . itemsize ) )
dshape = ( size_bytes // size_bytes_from_shape , ) + tuple ( dshape )
else :
if size_bytes % dtype . itemsize :
raise ValueError ( "entry '{}': field size {} not a multiple of " "`dtype.itemsize` {}" "" . format ( field [ keys [ 'name' ] ] , field [ keys [ 'byte_range' ] ] , dtype . itemsize , field [ keys [ 'dtype' ] ] ) )
dshape = ( size_bytes // dtype . itemsize , )
new_field [ 'dshape' ] = dshape
conv_list . append ( new_field )
return tuple ( conv_list )
|
def urlparse ( self , url , top = True , text = None , include = False , recurse = True ) :
"""Parse a quark file and , optionally , its recursive dependencies .
A quark file ( main . q ) is loaded via urlparse ( ) can have two kinds of
dependencies , ` use a . q ` or ` include b . q ` . For the ` use ` case each file
is added as a separate top - level root to self . roots . For the ` include `
case the file is added to the * current * root that is including it , so
it ' s added as a child of ` self . root ` .
There are two forms of caching : CACHE is a shared dictionary across
class instances of parsed roots . Additionally . qc file are written with
pickled versions of loaded roots . Given that both of them store a root
these forms of caching are only relevant to top - level quark files and
files referenced using ` use ` . Files loaded with ` include ` should bypass
the caching mechanism since they need to be loaded as child of the
parent root ."""
|
if os . path . exists ( url ) :
url = os . path . abspath ( url )
urlc = compiled_quark ( url )
if not include and url in self . CACHE :
self . log . debug ( "loading from cache: %s" , url )
root = self . CACHE [ url ]
self . roots . add ( root )
if recurse :
for u in root . uses :
assert u in self . CACHE , ( url , u , self . CACHE . keys ( ) )
self . roots . add ( self . CACHE [ u ] )
if not include :
self . entries [ url ] = root . files [ 0 ]
return root . files [ 0 ]
elif not include and recurse and os . path . exists ( url ) and is_newer ( urlc , url , __file__ ) :
self . log . debug ( "loading from: %sc" , url )
with open ( urlc ) as fd :
try :
unp = pickle . Unpickler ( fd )
deps = unp . load ( )
if is_newer ( urlc , * deps ) :
roots = unp . load ( )
# Check for the end record in case we
# encounter a partially written file .
end = unp . load ( )
if end == ARCHIVE_END :
for root in roots :
self . CACHE [ root . url ] = root
self . roots . add ( root )
if not include :
self . entries [ url ] = roots [ 0 ] . files [ 0 ]
return roots [ 0 ] . files [ 0 ]
except EOFError :
pass
old = None
if not include and url not in self . roots :
old = self . root
self . root = Root ( url )
self . roots . add ( self . root )
try :
if text is None :
try :
text = self . read ( url )
except IOError , e :
if top :
raise CompileError ( e )
else :
raise
self . log . debug ( "parsing %s" , url )
file = self . parse ( url , text )
if recurse :
for u in file . uses . values ( ) :
qurl = join ( url , u . url )
self . perform_use ( qurl , u )
assert qurl in self . CACHE , ( url , qurl , self . CACHE . keys ( ) )
for inc in file . includes . values ( ) :
qurl = join ( url , inc . url )
if qurl . endswith ( ".q" ) :
self . perform_quark_include ( qurl , inc )
else :
self . perform_native_include ( qurl , inc )
if not include :
self . CACHE [ url ] = self . root
if not include :
self . entries [ url ] = file
return file
finally :
if old :
self . root = old
|
def read_file_bytes ( input_file_path ) :
"""Read the file at the given file path
and return its contents as a byte string ,
or ` ` None ` ` if an error occurred .
: param string input _ file _ path : the file path
: rtype : bytes"""
|
contents = None
try :
with io . open ( input_file_path , "rb" ) as input_file :
contents = input_file . read ( )
except :
pass
return contents
|
def euclid ( a , b ) :
"""returns the Greatest Common Divisor of a and b"""
|
a = abs ( a )
b = abs ( b )
if a < b :
a , b = b , a
while b != 0 :
a , b = b , a % b
return a
|
def set_debug ( lvl ) :
"""Set PVA global debug print level . This prints directly to stdout ,
bypassing eg . sys . stdout .
: param lvl : logging . * level or logLevel *"""
|
lvl = _lvlmap . get ( lvl , lvl )
assert lvl in _lvls , lvl
_ClientProvider . set_debug ( lvl )
|
def get_collections ( self ) :
"""Returns a flat list of the names of collections in the asset
service .
[ ' wind - turbines ' , ' jet - engines ' ]"""
|
collections = [ ]
for result in self . _get_collections ( ) :
collections . append ( result [ 'collection' ] )
return collections
|
def zrange ( self , name , start , end , desc = False , withscores = False , score_cast_func = float ) :
"""Returns all the elements including between ` ` start ` ` ( non included )
and ` ` stop ` ` ( included ) .
: param name : str the name of the redis key
: param start :
: param end :
: param desc :
: param withscores :
: param score _ cast _ func :
: return :"""
|
with self . pipe as pipe :
f = Future ( )
res = pipe . zrange ( self . redis_key ( name ) , start , end , desc = desc , withscores = withscores , score_cast_func = score_cast_func )
def cb ( ) :
if withscores :
f . set ( [ ( self . valueparse . decode ( v ) , s ) for v , s in res . result ] )
else :
f . set ( [ self . valueparse . decode ( v ) for v in res . result ] )
pipe . on_execute ( cb )
return f
|
def remove ( name = None , pkgs = None , ** kwargs ) :
'''Remove a package and all its dependencies which are not in use by other
packages .
name
The name of the package to be deleted .
Multiple Package Options :
pkgs
A list of packages to delete . Must be passed as a python list . The
` ` name ` ` parameter will be ignored if this option is passed .
. . versionadded : : 0.16.0
Returns a dict containing the changes .
CLI Example :
. . code - block : : bash
salt ' * ' pkg . remove < package name >
salt ' * ' pkg . remove < package1 > , < package2 > , < package3 >
salt ' * ' pkg . remove pkgs = ' [ " foo " , " bar " ] ' '''
|
try :
pkg_params = __salt__ [ 'pkg_resource.parse_targets' ] ( name , pkgs ) [ 0 ]
except MinionError as exc :
raise CommandExecutionError ( exc )
old = list_pkgs ( )
targets = [ x for x in pkg_params if x in old ]
if not targets :
return { }
cmd = '/opt/csw/bin/pkgutil -yr {0}' . format ( ' ' . join ( targets ) )
__salt__ [ 'cmd.run_all' ] ( cmd )
__context__ . pop ( 'pkg.list_pkgs' , None )
new = list_pkgs ( )
return salt . utils . data . compare_dicts ( old , new )
|
def get_parameter_negative_warning ( model_type , model_params , parameter ) :
"""Return an empty list or a single warning wrapped in a list indicating
whether model parameter is negative .
Parameters
model _ type : : any : ` str `
Model type ( e . g . , ` ` ' cdd _ hdd ' ` ` ) .
model _ params : : any : ` dict `
Parameters as stored in : any : ` eemeter . CalTRACKUsagePerDayCandidateModel . model _ params ` .
parameter : : any : ` str `
The name of the parameter , e . g . , ` ` ' intercept ' ` ` .
Returns
warnings : : any : ` list ` of : any : ` eemeter . EEMeterWarning `
Empty list or list of single warning ."""
|
warnings = [ ]
if model_params . get ( parameter , 0 ) < 0 :
warnings . append ( EEMeterWarning ( qualified_name = ( "eemeter.caltrack_daily.{model_type}.{parameter}_negative" . format ( model_type = model_type , parameter = parameter ) ) , description = ( "Model fit {parameter} parameter is negative. Candidate model rejected." . format ( parameter = parameter ) ) , data = model_params , ) )
return warnings
|
def assert_is_instance ( value , types , message = None , extra = None ) :
"""Raises an AssertionError if value is not an instance of type ( s ) ."""
|
assert isinstance ( value , types ) , _assert_fail_message ( message , value , types , "is not an instance of" , extra )
|
def get_normalized_ratios ( psmfn , header , channels , denom_channels , min_intensity , second_psmfn , secondheader ) :
"""Calculates ratios for PSM tables containing isobaric channels with
raw intensities . Normalizes the ratios by median . NA values or values
below min _ intensity are excluded from the normalization ."""
|
ratios = [ ]
if second_psmfn is not None :
median_psmfn = second_psmfn
medianheader = secondheader
else :
median_psmfn = psmfn
medianheader = header
for psm in reader . generate_tsv_psms ( median_psmfn , medianheader ) :
ratios . append ( calc_psm_ratios ( psm , channels , denom_channels , min_intensity ) )
ch_medians = isonormalizing . get_medians ( channels , ratios )
report = ( 'Channel intensity medians used for normalization:\n' '{}' . format ( '\n' . join ( [ '{} - {}' . format ( ch , ch_medians [ ch ] ) for ch in channels ] ) ) )
sys . stdout . write ( report )
for psm in reader . generate_tsv_psms ( psmfn , header ) :
psmratios = calc_psm_ratios ( psm , channels , denom_channels , min_intensity )
psm . update ( { ch : str ( psmratios [ ix ] / ch_medians [ ch ] ) if psmratios [ ix ] != 'NA' else 'NA' for ix , ch in enumerate ( channels ) } )
yield psm
|
def _parse_header ( self ) :
"""Extract information from the header of the detector file"""
|
self . print ( "Parsing the DETX header" )
self . _det_file . seek ( 0 , 0 )
first_line = self . _readline ( )
try :
self . det_id , self . n_doms = split ( first_line , int )
self . version = 'v1'
except ValueError :
det_id , self . version = first_line . split ( )
self . det_id = int ( det_id )
validity = self . _readline ( ) . strip ( )
self . valid_from , self . valid_until = split ( validity , float )
raw_utm_info = self . _readline ( ) . strip ( ) . split ( ' ' )
try :
self . utm_info = UTMInfo ( * raw_utm_info [ 1 : ] )
except TypeError :
log . warning ( "Missing UTM information." )
n_doms = self . _readline ( )
self . n_doms = int ( n_doms )
|
def constructor ( cls , * args , ** kwargs ) :
""": param args : The contract constructor arguments as positional arguments
: param kwargs : The contract constructor arguments as keyword arguments
: return : a contract constructor object"""
|
if cls . bytecode is None :
raise ValueError ( "Cannot call constructor on a contract that does not have 'bytecode' associated " "with it" )
return ContractConstructor ( cls . web3 , cls . abi , cls . bytecode , * args , ** kwargs )
|
def generate_nhs_number_from_first_9_digits ( first9digits : str ) -> Optional [ int ] :
"""Returns a valid NHS number , as an ` ` int ` ` , given the first 9 digits .
The particular purpose is to make NHS numbers that * look * fake ( rather
than truly random NHS numbers which might accidentally be real ) .
For example :
. . code - block : : none
123456789 _ : no ; checksum 10
987654321 _ : yes , valid if completed to 9876543210
99999 _ : yes , valid if completed to 99999"""
|
if len ( first9digits ) != 9 :
log . warning ( "Not 9 digits" )
return None
try :
first9int = int ( first9digits )
except ( TypeError , ValueError ) :
log . warning ( "Not an integer" )
return None
# not an int
if len ( str ( first9int ) ) != len ( first9digits ) : # e . g . leading zeros , or some such
log . warning ( "Leading zeros?" )
return None
check_digit = nhs_check_digit ( first9digits )
if check_digit == 10 : # NHS numbers with this check digit are all invalid
log . warning ( "Can't have check digit of 10" )
return None
return int ( first9digits + str ( check_digit ) )
|
def update ( name , connection_uri = "" , id_file = "" , o = [ ] , config = None ) :
"""Enhanced version of the edit command featuring multiple
edits using regular expressions to match entries"""
|
storm_ = get_storm_instance ( config )
settings = { }
if id_file != "" :
settings [ 'identityfile' ] = id_file
for option in o :
k , v = option . split ( "=" )
settings [ k ] = v
try :
storm_ . update_entry ( name , ** settings )
print ( get_formatted_message ( '"{0}" updated successfully.' . format ( name ) , 'success' ) )
except ValueError as error :
print ( get_formatted_message ( error , 'error' ) , file = sys . stderr )
sys . exit ( 1 )
|
def parse_table ( tag ) :
"""returns tuple of type ( " class " / " func " ) and list of param strings .
: param tag :
: return :"""
|
first = True
table_header = None
table_type = 'unknown'
param_strings = [ ]
thead = tag . find ( 'thead' , recursive = False )
theads = None
# list ( items in < tr > row ) of < th > / < tr > elements .
if thead :
theads = thead . find_all ( [ "th" , "td" ] )
# end if
tbody = tag . find ( 'tbody' , recursive = False )
if tbody :
tbody_rows = tbody . find_all ( "tr" )
else :
tbody_rows = tag . find_all ( "tr" )
# end if
tbodys = [ # list ( rows ) of list ( items in < tr > row ) of < tr > elements .
row . find_all ( [ "td" , "th" ] ) for row in tbody_rows ]
if not thead : # so first row = header
theads = tbody_rows [ 0 ]
tbodys = tbody_rows [ 1 : ]
# end if
# TABLE HEADER
found_columns = [ ]
for column in theads : # Either ( a ) ` < td > < strong > . . . < / strong > < / td > `
# or new ( b ) ` < th > . . . < / th > `
col = column . find ( "strong" )
if col : # ( a ) ` < td > < strong > . . . < / strong > < / td > `
col_text = col . text
else : # ( b ) ` < th > . . . < / th > `
col_text = column . text
# end if
found_columns . append ( col_text )
# end def
# if TABLE is func
for test_columns in func_fields :
if found_columns == test_columns :
table_header = test_columns
table_type = 'func'
break
# end if
# end for
# if TABLE is class
if not table_header : # only check if we don ' t have a result yet
# search class now
for test_columns in class_fields :
if found_columns == test_columns :
if table_header is not None :
raise AssertionError ( "Table detected as func and class: {!r}" . format ( found_columns ) )
table_header = test_columns
table_type = 'class'
break
# end if
# end for
# end if
# TABLE is none of the above
if not table_header : # we don ' t have a result yet
raise AssertionError ( "Unknown table, {!r}" . format ( found_columns ) )
# end if
# TABLE BODY
for tds in tbodys :
string = "\t" . join ( [ col . text for col in tds ] )
logger . debug ( "t: " + string )
param_strings . append ( string )
pass
# end for row
return table_type , param_strings
|
def get_new_oids ( self ) :
'''Returns a list of unique oids that have not been extracted yet .
Essentially , a diff of distinct oids in the source database
compared to cube .'''
|
table = self . lconfig . get ( 'table' )
_oid = self . lconfig . get ( '_oid' )
if is_array ( _oid ) :
_oid = _oid [ 0 ]
# get the db column , not the field alias
last_id = self . container . get_last_field ( field = '_oid' )
ids = [ ]
if last_id :
try : # try to convert to integer . . . if not , assume unicode value
last_id = float ( last_id )
where = "%s.%s > %s" % ( table , _oid , last_id )
except ( TypeError , ValueError ) :
where = "%s.%s > '%s'" % ( table , _oid , last_id )
ids = self . sql_get_oids ( where )
return ids
|
def set_var_index ( self , name , index , var ) :
"""Overwrite the values in variable " name " with data
from var , at the flattened ( C - contiguous style )
indices . Indices is a vector of 0 - based
integers , of the same length as the vector var .
For some implementations it can be equivalent
and more efficient to do :
` get _ var ( name ) . flat [ index ] = var `"""
|
tmp = self . get_var ( name ) . copy ( )
tmp . flat [ index ] = var
self . set_var ( name , tmp )
|
def target_to_ipv6_long ( target ) :
"""Attempt to return a IPv6 long - range list from a target string ."""
|
splitted = target . split ( '-' )
if len ( splitted ) != 2 :
return None
try :
start_packed = inet_pton ( socket . AF_INET6 , splitted [ 0 ] )
end_packed = inet_pton ( socket . AF_INET6 , splitted [ 1 ] )
except socket . error :
return None
if end_packed < start_packed :
return None
return ipv6_range_to_list ( start_packed , end_packed )
|
def parse_sig ( sig ) :
"""Parse signature integer values from s - expr ."""
|
label , sig = sig
assert label == b'sig-val'
algo_name = sig [ 0 ]
parser = { b'rsa' : _parse_rsa_sig , b'ecdsa' : _parse_ecdsa_sig , b'eddsa' : _parse_eddsa_sig , b'dsa' : _parse_dsa_sig } [ algo_name ]
return parser ( args = sig [ 1 : ] )
|
def read_dictionary_file ( dictionary_path ) :
"""Return all words in dictionary file as set ."""
|
try :
return _user_dictionary_cache [ dictionary_path ]
except KeyError :
if dictionary_path and os . path . exists ( dictionary_path ) :
with open ( dictionary_path , "rt" ) as dict_f :
words = set ( re . findall ( r"(\w[\w']*\w|\w)" , " " . join ( dict_f . read ( ) . splitlines ( ) ) ) )
return words
return set ( )
|
async def sendAnimation ( self , chat_id , animation , duration = None , width = None , height = None , thumb = None , caption = None , parse_mode = None , disable_notification = None , reply_to_message_id = None , reply_markup = None ) :
"""See : https : / / core . telegram . org / bots / api # sendanimation
: param animation : Same as ` ` photo ` ` in : meth : ` amanobot . aio . Bot . sendPhoto `"""
|
p = _strip ( locals ( ) , more = [ 'animation' ] )
return await self . _api_request_with_file ( 'sendAnimation' , _rectify ( p ) , 'animation' , animation )
|
def convenience_calc_fisher_approx ( self , params ) :
"""Calculates the BHHH approximation of the Fisher Information Matrix for
this model / dataset ."""
|
shapes , intercepts , betas = self . convenience_split_params ( params )
args = [ betas , self . design , self . alt_id_vector , self . rows_to_obs , self . rows_to_alts , self . choice_vector , self . utility_transform , self . calc_dh_d_shape , self . calc_dh_dv , self . calc_dh_d_alpha , intercepts , shapes , self . ridge , self . weights ]
return cc . calc_fisher_info_matrix ( * args )
|
def prior_sediment_memory ( * args , ** kwargs ) :
"""Get the prior density of sediment memory
Returns
y : ndarray
Array giving the density .
x : ndarray
Array of Memory ( ratio ) values over which the density was evaluated ."""
|
# " plot the prior for the memory ( = accumulation rate varibility between neighbouring depths ) "
# PlotMemPrior @ Bacon . R ln 114 - > ln 1119 - 1141
# w _ a = mem _ strength * mem _ mean , w _ b = mem _ strength * ( 1 - mem _ mean )
# TODO ( brews ) : Check that these stats are correctly translated to scipy . stats distribs .
mem_shape = kwargs [ 'mem_strength' ]
# aka . ` mem _ shape `
mem_mean = kwargs [ 'mem_mean' ]
x = np . linspace ( 0 , 1 , 100 )
y = stats . beta . pdf ( x , a = mem_shape * mem_mean , b = mem_shape * ( 1 - mem_mean ) )
return y , x
|
def substitute ( expr , var_map ) :
"""Substitute symbols or ( sub - ) expressions with the given replacements and
re - evalute the result
Args :
expr : The expression in which to perform the substitution
var _ map ( dict ) : The substitution dictionary ."""
|
try :
if isinstance ( expr , SympyBasic ) :
sympy_var_map = { k : v for ( k , v ) in var_map . items ( ) if isinstance ( k , SympyBasic ) }
return expr . subs ( sympy_var_map )
else :
return expr . substitute ( var_map )
except AttributeError :
if expr in var_map :
return var_map [ expr ]
return expr
|
def rowmap ( table , rowmapper , header , failonerror = False ) :
"""Transform rows via an arbitrary function . E . g . : :
> > > import petl as etl
> > > table1 = [ [ ' id ' , ' sex ' , ' age ' , ' height ' , ' weight ' ] ,
. . . [ 1 , ' male ' , 16 , 1.45 , 62.0 ] ,
. . . [ 2 , ' female ' , 19 , 1.34 , 55.4 ] ,
. . . [ 3 , ' female ' , 17 , 1.78 , 74.4 ] ,
. . . [ 4 , ' male ' , 21 , 1.33 , 45.2 ] ,
. . . [ 5 , ' - ' , 25 , 1.65 , 51.9 ] ]
> > > def rowmapper ( row ) :
. . . transmf = { ' male ' : ' M ' , ' female ' : ' F ' }
. . . return [ row [ 0 ] ,
. . . transmf [ row [ ' sex ' ] ] if row [ ' sex ' ] in transmf else None ,
. . . row . age * 12,
. . . row . height / row . weight * * 2]
> > > table2 = etl . rowmap ( table1 , rowmapper ,
. . . header = [ ' subject _ id ' , ' gender ' , ' age _ months ' ,
. . . ' bmi ' ] )
> > > table2
| subject _ id | gender | age _ months | bmi |
| 1 | ' M ' | 192 | 0.0003772112382934443 |
| 2 | ' F ' | 228 | 0.0004366015456998006 |
| 3 | ' F ' | 204 | 0.0003215689675106949 |
| 4 | ' M ' | 252 | 0.0006509906805544679 |
| 5 | None | 300 | 0.0006125608384287258 |
The ` rowmapper ` function should accept a single row and return a single
row ( list or tuple ) ."""
|
return RowMapView ( table , rowmapper , header , failonerror = failonerror )
|
def add ( self , uri , methods , handler , host = None , strict_slashes = False , version = None , name = None , ) :
"""Add a handler to the route list
: param uri : path to match
: param methods : sequence of accepted method names . If none are
provided , any method is allowed
: param handler : request handler function .
When executed , it should provide a response object .
: param strict _ slashes : strict to trailing slash
: param version : current version of the route or blueprint . See
docs for further details .
: return : Nothing"""
|
if version is not None :
version = re . escape ( str ( version ) . strip ( "/" ) . lstrip ( "v" ) )
uri = "/" . join ( [ "/v{}" . format ( version ) , uri . lstrip ( "/" ) ] )
# add regular version
self . _add ( uri , methods , handler , host , name )
if strict_slashes :
return
if not isinstance ( host , str ) and host is not None : # we have gotten back to the top of the recursion tree where the
# host was originally a list . By now , we ' ve processed the strict
# slashes logic on the leaf nodes ( the individual host strings in
# the list of host )
return
# Add versions with and without trailing /
slashed_methods = self . routes_all . get ( uri + "/" , frozenset ( { } ) )
unslashed_methods = self . routes_all . get ( uri [ : - 1 ] , frozenset ( { } ) )
if isinstance ( methods , Iterable ) :
_slash_is_missing = all ( method in slashed_methods for method in methods )
_without_slash_is_missing = all ( method in unslashed_methods for method in methods )
else :
_slash_is_missing = methods in slashed_methods
_without_slash_is_missing = methods in unslashed_methods
slash_is_missing = not uri [ - 1 ] == "/" and not _slash_is_missing
without_slash_is_missing = ( uri [ - 1 ] == "/" and not _without_slash_is_missing and not uri == "/" )
# add version with trailing slash
if slash_is_missing :
self . _add ( uri + "/" , methods , handler , host , name )
# add version without trailing slash
elif without_slash_is_missing :
self . _add ( uri [ : - 1 ] , methods , handler , host , name )
|
def Contradiction ( expr1 : Expression , expr2 : Expression ) -> Expression :
"""Return expression which is the contradiction of ` expr1 ` and ` expr2 ` ."""
|
expr = Disjunction ( Conjunction ( expr1 , Negation ( expr2 ) ) , Conjunction ( Negation ( expr1 ) , expr2 ) )
return ast . fix_missing_locations ( expr )
|
def _preceding_siblings ( self , qname : Union [ QualName , bool ] = None ) -> List [ InstanceNode ] :
"""XPath - return the list of receiver ' s preceding siblings ."""
|
if qname and self . qual_name != qname :
return [ ]
res = [ ]
en = self
for _ in self . before :
en = en . previous ( )
res . append ( en )
return res
|
def _add_bad_rc ( self , rc , result ) :
"""Sets an error with a bad return code . Handles ' quiet ' logic
: param rc : The error code"""
|
if not rc :
return
self . all_ok = False
if rc == C . LCB_KEY_ENOENT and self . _quiet :
return
try :
raise pycbc_exc_lcb ( rc )
except PyCBC . default_exception as e :
e . all_results = self
e . key = result . key
e . result = result
self . _add_err ( sys . exc_info ( ) )
|
def transform_32_33 ( inst , new_inst , i , n , offset , instructions , new_asm ) :
"""MAKEFUNCTION adds another const . probably MAKECLASS as well"""
|
add_size = xdis . op_size ( new_inst . opcode , opcode_33 )
if inst . opname in ( 'MAKE_FUNCTION' , 'MAKE_CLOSURE' ) : # Previous instruction should be a load const which
# contains the name of the function to call
prev_inst = instructions [ i - 1 ]
assert prev_inst . opname == 'LOAD_CONST'
assert isinstance ( prev_inst . arg , int )
# Add the function name as an additional LOAD _ CONST
load_fn_const = Instruction ( )
load_fn_const . opname = 'LOAD_CONST'
load_fn_const . opcode = opcode_33 . opmap [ 'LOAD_CONST' ]
load_fn_const . line_no = None
prev_const = new_asm . code . co_consts [ prev_inst . arg ]
if hasattr ( prev_const , 'co_name' ) :
fn_name = new_asm . code . co_consts [ prev_inst . arg ] . co_name
else :
fn_name = 'what-is-up'
const_index = len ( new_asm . code . co_consts )
new_asm . code . co_consts = list ( new_asm . code . co_consts )
new_asm . code . co_consts . append ( fn_name )
load_fn_const . arg = const_index
load_fn_const . offset = offset
load_fn_const . starts_line = False
load_fn_const . is_jump_target = False
new_asm . code . instructions . append ( load_fn_const )
load_const_size = xdis . op_size ( load_fn_const . opcode , opcode_33 )
add_size += load_const_size
new_inst . offset = offset + add_size
pass
return add_size
|
def tweets_for_user ( screen_name , limit = 1e10 ) :
"""Collect the most recent 3200 tweets for this user , sleeping to deal with rate limits ."""
|
qu = Queue ( )
p = Thread ( target = _tweets_for_user , args = ( qu , screen_name , limit ) )
p . start ( )
p . join ( 910 )
if p . is_alive ( ) :
sys . stderr . write ( 'no results after 15 minutes for %s. Aborting.' % screen_name )
return [ ]
else :
return qu . get ( )
|
def update_batches ( ) -> None :
"Update id : md5 . ext batches from Dencensooru ' s repository ."
|
batches_data = requests . get ( BATCHES_API_URL ) . json ( )
batches_url = { i [ "name" ] : i [ "download_url" ] for i in batches_data if i [ "type" ] == "file" }
order_batches = sorted ( batches_url , key = int )
try :
existing = set ( os . listdir ( BATCHES_DIR ) )
except FileNotFoundError :
BATCHES_DIR . mkdir ( parents = True )
existing = set ( )
def get_batch ( name : str ) -> None :
if name in existing and name != order_batches [ - 1 ] :
return
answer = requests . get ( batches_url [ name ] )
try :
answer . raise_for_status ( )
except requests . RequestException :
return
with AtomicFile ( BATCHES_DIR / name , "w" ) as file :
file . write ( answer . text )
ThreadPool ( 8 ) . map ( get_batch , order_batches )
|
def send_static_file ( self , filename ) :
"""Send static files from the static folder
in the current selected theme prior to the global static folder .
: param filename : static filename
: return : response object"""
|
if self . config [ 'MODE' ] == 'api-only' : # if ' api - only ' mode is set , we should not send static files
abort ( 404 )
theme_static_folder = getattr ( self , 'theme_static_folder' , None )
if theme_static_folder :
try :
return send_from_directory ( theme_static_folder , filename )
except NotFound :
pass
return super ( CustomFlask , self ) . send_static_file ( filename )
|
def cli ( ctx , verbose , fake , install , uninstall , config ) :
"""legit command line interface"""
|
# Create a repo object and remember it as as the context object . From
# this point onwards other commands can refer to it by using the
# @ pass _ scm decorator .
ctx . obj = SCMRepo ( )
ctx . obj . fake = fake
ctx . obj . verbose = fake or verbose
if install :
do_install ( ctx , verbose , fake )
ctx . exit ( )
elif uninstall :
do_uninstall ( ctx , verbose , fake )
ctx . exit ( )
elif config :
do_edit_settings ( fake )
ctx . exit ( )
else :
if ctx . invoked_subcommand is None : # Display help to user if no commands were passed .
click . echo ( format_help ( ctx . get_help ( ) ) )
|
def _contains_wildcards ( cls , s ) :
"""Return True if the string contains any unquoted special characters
( question - mark or asterisk ) , otherwise False .
Ex : _ contains _ wildcards ( " foo " ) = > FALSE
Ex : _ contains _ wildcards ( " foo \ ? " ) = > FALSE
Ex : _ contains _ wildcards ( " foo ? " ) = > TRUE
Ex : _ contains _ wildcards ( " \ * bar " ) = > FALSE
Ex : _ contains _ wildcards ( " * bar " ) = > TRUE
: param string s : string to check
: returns : True if string contains any unquoted special characters ,
False otherwise .
: rtype : boolean
This function is a support function for _ compare ( ) ."""
|
idx = s . find ( "*" )
if idx != - 1 :
if idx == 0 :
return True
else :
if s [ idx - 1 ] != "\\" :
return True
idx = s . find ( "?" )
if idx != - 1 :
if idx == 0 :
return True
else :
if s [ idx - 1 ] != "\\" :
return True
return False
|
def tweet ( self , status , images ) : # time . sleep ( 10)
template = "%s #soyprice"
"""if not images :
self . twitter . update _ status ( status = template % status )
else :
medias = map ( lambda i : self . upload _ media ( i ) , images )
self . twitter . post ( ' / statuses / update _ with _ media ' ,
params = { ' status ' : template % status ,
' media ' : medias [ 0 ] } )"""
|
print template % status , len ( template % status )
|
def generate_scheduling_block_id ( num_blocks , project = 'test' ) :
"""Generate a scheduling _ block id"""
|
_date = strftime ( "%Y%m%d" , gmtime ( ) )
_project = project
for i in range ( num_blocks ) :
yield '{}-{}-sbi{:03d}' . format ( _date , _project , i )
|
def check_session ( self , key , * , session = None ) :
"""Fails the transaction if Key is not currently locked by Session
Parameters :
key ( str ) : Key to check
session ( ObjectID ) : Session ID"""
|
self . append ( { "Verb" : "check-session" , "Key" : key , "Session" : extract_attr ( session , keys = [ "ID" ] ) } )
return self
|
def get_queryset ( self ) :
"""extend parent class queryset by filtering nodes of the specified layer"""
|
self . get_layer ( )
return super ( LayerNodeListMixin , self ) . get_queryset ( ) . filter ( layer_id = self . layer . id )
|
def makemigrations ( application , merge = False , dry_run = False , empty = False , extra_applications = None ) :
"""Generate migrations"""
|
from django . core . management import call_command
apps = [ application ]
if extra_applications :
if isinstance ( extra_applications , text_type ) :
apps += [ extra_applications ]
elif isinstance ( extra_applications , list ) :
apps += extra_applications
for app in apps :
call_command ( 'makemigrations' , * ( app , ) , merge = merge , dry_run = dry_run , empty = empty )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.