signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def _request ( self , method , url , params = None , uploads = None ) : """Request to server and handle transfer status ."""
c = pycurl . Curl ( ) if method == self . POST : c . setopt ( c . POST , 1 ) if uploads is not None : if isinstance ( uploads , dict ) : # handle single upload uploads = [ uploads ] for upload in uploads : params += [ ( upload [ 'field' ] , ( c . FORM_FILE , upload [ 'path' ] ) ) ] c . setopt ( c . HTTPPOST , params ) else : # XXX memory leak in pyCurl / 7.29.0? data = urllib . urlencode ( params ) c . setopt ( c . POSTFIELDS , data ) elif method == self . GET : c . setopt ( c . HTTPGET , 1 ) if params : url += '?%s' % urllib . urlencode ( params ) elif method == self . DELETE : c . setopt ( pycurl . CUSTOMREQUEST , self . DELETE ) else : raise NotSupportedError ( str ( method ) ) buffer = [ ] def _write_cb ( data ) : buffer . append ( data ) c . setopt ( c . HTTPHEADER , self . _hook_header ( params ) ) c . setopt ( pycurl . SSL_VERIFYPEER , 0 ) c . setopt ( pycurl . SSL_VERIFYHOST , 0 ) c . setopt ( c . URL , url ) c . setopt ( c . NOPROGRESS , 0 ) c . setopt ( c . PROGRESSFUNCTION , self . _updated_cb ) c . setopt ( c . WRITEFUNCTION , _write_cb ) c . setopt ( c . FOLLOWLOCATION , 1 ) # c . setopt ( c . VERBOSE , True ) try : self . emit ( 'started' ) c . perform ( ) except pycurl . error , e : raise TransferError ( str ( e ) ) else : code = c . getinfo ( c . HTTP_CODE ) if not 200 <= code < 300 : raise ResponseError ( code ) finally : c . close ( ) return '' . join ( buffer )
def _shutdown ( self ) : """Shut down the server ."""
for exit_handler in self . _exit_handlers : exit_handler ( ) if self . _socket : self . _socket . close ( ) self . _socket = None
def listMemberHelps ( TargetGroup ) : r"""Gets help on a group ' s children ."""
Members = [ ] for Member in TargetGroup . Members . values ( ) : # get unique children ( by discarding aliases ) if Member not in Members : Members . append ( Member ) Ret = [ ] for Member in Members : Config = Member . Config Ret . append ( ( '%s%s' % ( Config [ 'name' ] , ', %s' % Config [ 'alias' ] if 'alias' in Config else '' ) , Config . get ( 'desc' , '' ) ) ) return Ret
async def editMessageLiveLocation ( self , msg_identifier , latitude , longitude , reply_markup = None ) : """See : https : / / core . telegram . org / bots / api # editmessagelivelocation : param msg _ identifier : Same as in : meth : ` . Bot . editMessageText `"""
p = _strip ( locals ( ) , more = [ 'msg_identifier' ] ) p . update ( _dismantle_message_identifier ( msg_identifier ) ) return await self . _api_request ( 'editMessageLiveLocation' , _rectify ( p ) )
def _called_from_setup ( run_frame ) : """Attempt to detect whether run ( ) was called from setup ( ) or by another command . If called by setup ( ) , the parent caller will be the ' run _ command ' method in ' distutils . dist ' , and * its * caller will be the ' run _ commands ' method . If called any other way , the immediate caller * might * be ' run _ command ' , but it won ' t have been called by ' run _ commands ' . Return True in that case or if a call stack is unavailable . Return False otherwise ."""
if run_frame is None : msg = "Call stack not available. bdist_* commands may fail." warnings . warn ( msg ) if platform . python_implementation ( ) == 'IronPython' : msg = "For best results, pass -X:Frames to enable call stack." warnings . warn ( msg ) return True res = inspect . getouterframes ( run_frame ) [ 2 ] caller , = res [ : 1 ] info = inspect . getframeinfo ( caller ) caller_module = caller . f_globals . get ( '__name__' , '' ) return ( caller_module == 'distutils.dist' and info . function == 'run_commands' )
def error ( self , msg , previous = False ) : """Raise a ParseException . We provide information to help locate the error in the config to allow easy config debugging for users . previous indicates that the error actually occurred at the end of the previous line ."""
token = self . tokens [ self . current_token - 1 ] line_no = self . line if previous : line_no -= 1 line = self . raw [ line_no ] position = token [ "start" ] - self . line_start if previous : position = len ( line ) + 2 raise ParseException ( msg , line , line_no + 1 , position , token [ "value" ] )
def create_data_dir ( ) : """Creates the DATA _ DIR . : return :"""
from django_productline . context import PRODUCT_CONTEXT if not os . path . exists ( PRODUCT_CONTEXT . DATA_DIR ) : os . mkdir ( PRODUCT_CONTEXT . DATA_DIR ) print ( '*** Created DATA_DIR in %s' % PRODUCT_CONTEXT . DATA_DIR ) else : print ( '...DATA_DIR already exists.' )
def add_header ( self , name : str , value : _HeaderTypes ) -> None : """Adds the given response header and value . Unlike ` set _ header ` , ` add _ header ` may be called multiple times to return multiple values for the same header ."""
self . _headers . add ( name , self . _convert_header_value ( value ) )
def update ( self , data ) : """: see : : meth : RedisMap . update"""
result = None if data : pipe = self . _client . pipeline ( transaction = False ) for k in data . keys ( ) : pipe . exists ( self . get_key ( k ) ) exists = pipe . execute ( ) exists = exists . count ( True ) _rk , _dumps = self . get_key , self . _dumps data = { _rk ( key ) : _dumps ( value ) for key , value in data . items ( ) } pipe . mset ( data ) pipe . hincrby ( self . _bucket_key , self . key_prefix , len ( data ) - exists ) result = pipe . execute ( ) [ 0 ] return result
def input_waiting ( self ) : """Query the number of bytes waiting to be read from the serial port . Returns : int : number of bytes waiting to be read . Raises : SerialError : if an I / O or OS error occurs ."""
# Get input waiting buf = array . array ( 'I' , [ 0 ] ) try : fcntl . ioctl ( self . _fd , termios . TIOCINQ , buf , True ) except OSError as e : raise SerialError ( e . errno , "Querying input waiting: " + e . strerror ) return buf [ 0 ]
def rtc_as_set ( self ) : """Returns current RTC AS configured for current neighbors ."""
rtc_as_set = set ( ) for neigh in self . _neighbors . values ( ) : rtc_as_set . add ( neigh . rtc_as ) return rtc_as_set
def load_plugins ( self ) : """Load plugins from entry point ( s ) ."""
from pkg_resources import iter_entry_points seen = set ( ) for entry_point in self . entry_points : for ep in iter_entry_points ( entry_point ) : if ep . name in seen : continue seen . add ( ep . name ) try : plugincls = ep . load ( ) except Exception as exc : # never let a plugin load kill us warn ( "Unable to load plugin %s: %s" % ( ep , exc ) , RuntimeWarning ) continue plugin = plugincls ( ) self . add_plugin ( plugin ) super ( EntryPointPluginManager , self ) . load_plugins ( )
def input ( self , data ) : """小数据片段拼接成完整数据包 如果内容足够则yield数据包"""
self . buf += data while len ( self . buf ) > HEADER_SIZE : data_len = struct . unpack ( 'i' , self . buf [ 0 : HEADER_SIZE ] ) [ 0 ] if len ( self . buf ) >= data_len + HEADER_SIZE : content = self . buf [ HEADER_SIZE : data_len + HEADER_SIZE ] self . buf = self . buf [ data_len + HEADER_SIZE : ] yield content else : break
def _next_job ( self ) : """execute the next job from the top of the queue"""
if self . __job_queue : # Produce message from the top of the queue job = self . __job_queue . pop ( ) # logging . debug ( " queue = % s , popped % r " , self . _ _ job _ queue , job ) job . process ( )
def yaml ( self , dirPath = None ) : """* Render the results in yaml format * * * Key Arguments : * * - ` ` dirPath ` ` - - the path to the directory to save the rendered results to . Default * None * * * Return : * * - ` yamlSources ` - - the top - level transient data - ` yamlPhot ` - - all photometry associated with the transients - ` yamlSpec ` - - all spectral data associated with the transients - ` yamlFiles ` - - all files associated with the matched transients found on the tns * * Usage : * * To render the results in yaml format : . . code - block : : python yamlSources , yamlPhot , yamlSpec , yamlFiles = tns . yaml ( ) print yamlSources . . code - block : : text - TNSId : 2016asf TNSName : SN2016asf decDeg : 31.1126 decSex : ' + 31:06:45.36' discDate : ' 2016-03-06 08:09:36' discMag : ' 17.1' discMagFilter : V - Johnson discSurvey : ASAS - SN discoveryName : ASASSN - 16cs hostName : KUG 0647 + 311 hostRedshift : null objectUrl : http : / / wis - tns . weizmann . ac . il / object / 2016asf raDeg : 102.653041666667 raSex : ' 06:50:36.73' separationArcsec : ' 0.66' separationEastArcsec : ' - 0.13' separationNorthArcsec : ' 0.65' specType : SN Ia transRedshift : ' 0.021' You can save the results to file by passing in a directory path within which to save the files to . The four flavours of data ( sources , photometry , spectra and files ) are saved to separate files but all data can be assoicated with its transient source using the transient ' s unique ` TNSId ` . . . code - block : : python tns . yaml ( " ~ / tns " ) . . image : : https : / / i . imgur . com / ZpJIC6p . png : width : 800px : alt : yaml output"""
if dirPath : p = self . _file_prefix ( ) yamlSources = self . sourceResults . yaml ( filepath = dirPath + "/" + p + "sources.yaml" ) yamlPhot = self . photResults . yaml ( filepath = dirPath + "/" + p + "phot.yaml" ) yamlSpec = self . specResults . yaml ( filepath = dirPath + "/" + p + "spec.yaml" ) yamlFiles = self . relatedFilesResults . yaml ( filepath = dirPath + "/" + p + "relatedFiles.yaml" ) else : yamlSources = self . sourceResults . yaml ( ) yamlPhot = self . photResults . yaml ( ) yamlSpec = self . specResults . yaml ( ) yamlFiles = self . relatedFilesResults . yaml ( ) return yamlSources , yamlPhot , yamlSpec , yamlFiles
def column_family_definition ( keyspace , column_family ) : '''Return a dictionary of column family definitions for the given keyspace / column _ family CLI Example : . . code - block : : bash salt ' * ' cassandra . column _ family _ definition < keyspace > < column _ family >'''
sys = _sys_mgr ( ) try : return vars ( sys . get_keyspace_column_families ( keyspace ) [ column_family ] ) except Exception : log . debug ( 'Invalid Keyspace/CF combination' ) return None
def get_by_flags ( self , flags ) : """Iterate all register infos matching the given flags ."""
for reg in self . _reg_infos : if reg . flags & flags == flags : yield reg
def request ( self , uri , method , * args , ** kwargs ) : """Formats the request into a dict representing the headers and body that will be used to make the API call ."""
if self . timeout : kwargs [ "timeout" ] = self . timeout kwargs [ "verify" ] = self . verify_ssl kwargs . setdefault ( "headers" , kwargs . get ( "headers" , { } ) ) kwargs [ "headers" ] [ "User-Agent" ] = self . user_agent kwargs [ "headers" ] [ "Accept" ] = "application/json" if ( "body" in kwargs ) or ( "data" in kwargs ) : if "Content-Type" not in kwargs [ "headers" ] : kwargs [ "headers" ] [ "Content-Type" ] = "application/json" elif kwargs [ "headers" ] [ "Content-Type" ] is None : del kwargs [ "headers" ] [ "Content-Type" ] # Allow subclasses to add their own headers self . _add_custom_headers ( kwargs [ "headers" ] ) resp , body = pyrax . http . request ( method , uri , * args , ** kwargs ) if resp . status_code >= 400 : raise exc . from_response ( resp , body ) return resp , body
def _marginal_loglike ( self , x ) : """Internal function to calculate and cache the marginal likelihood"""
yedge = self . _nuis_pdf . marginalization_bins ( ) yw = yedge [ 1 : ] - yedge [ : - 1 ] yc = 0.5 * ( yedge [ 1 : ] + yedge [ : - 1 ] ) s = self . like ( x [ : , np . newaxis ] , yc [ np . newaxis , : ] ) # This does the marginalization integral z = 1. * np . sum ( s * yw , axis = 1 ) self . _marg_z = np . zeros ( z . shape ) msk = z > 0 self . _marg_z [ msk ] = - 1 * np . log ( z [ msk ] ) # Extrapolate to unphysical values # FIXME , why is this needed dlogzdx = ( np . log ( z [ msk ] [ - 1 ] ) - np . log ( z [ msk ] [ - 2 ] ) ) / ( x [ msk ] [ - 1 ] - x [ msk ] [ - 2 ] ) self . _marg_z [ ~ msk ] = self . _marg_z [ msk ] [ - 1 ] + ( self . _marg_z [ ~ msk ] - self . _marg_z [ msk ] [ - 1 ] ) * dlogzdx self . _marg_interp = castro . Interpolator ( x , self . _marg_z ) return self . _marg_z
def _parse_canonical_regex ( doc ) : """Decode a JSON regex to bson . regex . Regex ."""
regex = doc [ '$regularExpression' ] if len ( doc ) != 1 : raise TypeError ( 'Bad $regularExpression, extra field(s): %s' % ( doc , ) ) if len ( regex ) != 2 : raise TypeError ( 'Bad $regularExpression must include only "pattern"' 'and "options" components: %s' % ( doc , ) ) return Regex ( regex [ 'pattern' ] , regex [ 'options' ] )
def to_str ( number ) : """Convert a task state ID number to a string . : param int number : task state ID , eg . 1 : returns : state name like eg . " OPEN " , or " ( unknown ) " if we don ' t know the name of this task state ID number ."""
states = globals ( ) for name , value in states . items ( ) : if number == value and name . isalpha ( ) and name . isupper ( ) : return name return '(unknown state %d)' % number
def prune ( self , edge , length = None ) : """Prunes a subtree from the main Tree , retaining an edge length specified by length ( defaults to entire length ) . The length is sanity - checked by edge _ length _ check , to ensure it is within the bounds [0 , edge . length ] . Returns the basal node of the pruned subtree ."""
length = length or edge . length edge_length_check ( length , edge ) n = edge . head_node self . tree . _tree . prune_subtree ( n , suppress_unifurcations = False ) n . edge_length = length self . tree . _dirty = True return n
def get_next_action ( self , request , application , label , roles ) : """Process the get _ next _ action request at the current step ."""
# if user is logged and and not applicant , steal the # application if 'is_applicant' in roles : # if we got this far , then we either we are logged in as applicant , # or we know the secret for this application . new_person = None reason = None details = None attrs , _ = saml . parse_attributes ( request ) saml_id = attrs [ 'persistent_id' ] if saml_id is not None : query = Person . objects . filter ( saml_id = saml_id ) if application . content_type . model == "person" : query = query . exclude ( pk = application . applicant . pk ) if query . count ( ) > 0 : new_person = Person . objects . get ( saml_id = saml_id ) reason = "SAML id is already in use by existing person." details = ( "It is not possible to continue this application " + "as is because the saml identity already exists " + "as a registered user." ) del query if request . user . is_authenticated : new_person = request . user reason = "%s was logged in " "and accessed the secret URL." % new_person details = ( "If you want to access this application " + "as %s " % application . applicant + "without %s stealing it, " % new_person + "you will have to ensure %s is " % new_person + "logged out first." ) if new_person is not None : if application . applicant != new_person : if 'steal' in request . POST : old_applicant = application . applicant application . applicant = new_person application . save ( ) log . change ( application . application_ptr , "Stolen application from %s" % old_applicant ) messages . success ( request , "Stolen application from %s" % old_applicant ) url = base . get_url ( request , application , roles , label ) return HttpResponseRedirect ( url ) else : return render ( template_name = 'kgapplications' '/project_aed_steal.html' , context = { 'application' : application , 'person' : new_person , 'reason' : reason , 'details' : details , } , request = request ) # if the user is the leader , show him the leader specific page . if ( 'is_leader' in roles or 'is_delegate' in roles ) and 'is_admin' not in roles and 'is_applicant' not in roles : actions = [ 'reopen' ] if 'reopen' in request . POST : return 'reopen' return render ( template_name = 'kgapplications/project_aed_for_leader.html' , context = { 'application' : application , 'actions' : actions , 'roles' : roles , } , request = request ) # otherwise do the default behaviour for StateWithSteps return super ( StateApplicantEnteringDetails , self ) . get_next_action ( request , application , label , roles )
def delete_tag ( self , tag ) : """DELETE / : login / machines / : id / tags / : tag Delete a tag and its corresponding value on the machine ."""
j , r = self . datacenter . request ( 'DELETE' , self . path + '/tags/' + tag ) r . raise_for_status ( )
def remove_scene ( self , scene_id ) : """remove a scene by Scene ID"""
if self . state . activeSceneId == scene_id : err_msg = "Requested to delete scene {sceneNum}, which is currently active. Cannot delete active scene." . format ( sceneNum = scene_id ) logging . info ( err_msg ) return ( False , 0 , err_msg ) try : del self . state . scenes [ scene_id ] logging . debug ( "Deleted scene {sceneNum}" . format ( sceneNum = scene_id ) ) except KeyError : err_msg = "Requested to delete scene {sceneNum}, which does not exist" . format ( sceneNum = scene_id ) logging . info ( err_msg ) return ( False , 0 , err_msg ) # if we are here , we deleted a scene , so publish it sequence_number = self . zmq_publisher . publish_scene_remove ( scene_id ) logging . debug ( "Removed scene {sceneNum}" . format ( sceneNum = scene_id ) ) return ( True , sequence_number , "OK" )
def print_subcommands ( data , nested_content , markDownHelp = False , settings = None ) : """Each subcommand is a dictionary with the following keys : [ ' usage ' , ' action _ groups ' , ' bare _ usage ' , ' name ' , ' help ' ] In essence , this is all tossed in a new section with the title ' name ' . Apparently there can also be a ' description ' entry ."""
definitions = map_nested_definitions ( nested_content ) items = [ ] if 'children' in data : subCommands = nodes . section ( ids = [ "Sub-commands:" ] ) subCommands += nodes . title ( 'Sub-commands:' , 'Sub-commands:' ) for child in data [ 'children' ] : sec = nodes . section ( ids = [ child [ 'name' ] ] ) sec += nodes . title ( child [ 'name' ] , child [ 'name' ] ) if 'description' in child and child [ 'description' ] : desc = [ child [ 'description' ] ] elif child [ 'help' ] : desc = [ child [ 'help' ] ] else : desc = [ 'Undocumented' ] # Handle nested content subContent = [ ] if child [ 'name' ] in definitions : classifier , s , subContent = definitions [ child [ 'name' ] ] if classifier == '@replace' : desc = [ s ] elif classifier == '@after' : desc . append ( s ) elif classifier == '@before' : desc . insert ( 0 , s ) for element in renderList ( desc , markDownHelp ) : sec += element sec += nodes . literal_block ( text = child [ 'bare_usage' ] ) for x in print_action_groups ( child , nested_content + subContent , markDownHelp , settings = settings ) : sec += x for x in print_subcommands ( child , nested_content + subContent , markDownHelp , settings = settings ) : sec += x if 'epilog' in child and child [ 'epilog' ] : for element in renderList ( [ child [ 'epilog' ] ] , markDownHelp ) : sec += element subCommands += sec items . append ( subCommands ) return items
def make_chunk_for ( output_dir = LOCAL_DIR , local_dir = LOCAL_DIR , game_dir = None , model_num = 1 , positions = EXAMPLES_PER_GENERATION , threads = 8 , sampling_frac = 0.02 ) : """Explicitly make a golden chunk for a given model ` model _ num ` ( not necessarily the most recent one ) . While we haven ' t yet got enough samples ( EXAMPLES _ PER _ GENERATION ) Add samples from the games of previous model ."""
game_dir = game_dir or fsdb . selfplay_dir ( ) ensure_dir_exists ( output_dir ) models = [ model for model in fsdb . get_models ( ) if model [ 0 ] < model_num ] buf = ExampleBuffer ( positions , sampling_frac = sampling_frac ) files = [ ] for _ , model in sorted ( models , reverse = True ) : local_model_dir = os . path . join ( local_dir , model ) if not tf . gfile . Exists ( local_model_dir ) : print ( "Rsyncing" , model ) _rsync_dir ( os . path . join ( game_dir , model ) , local_model_dir ) files . extend ( tf . gfile . Glob ( os . path . join ( local_model_dir , '*.zz' ) ) ) print ( "{}: {} games" . format ( model , len ( files ) ) ) if len ( files ) * 200 * sampling_frac > positions : break print ( "Filling from {} files" . format ( len ( files ) ) ) buf . parallel_fill ( files , threads = threads ) print ( buf ) output = os . path . join ( output_dir , str ( model_num ) + '.tfrecord.zz' ) print ( "Writing to" , output ) buf . flush ( output )
def _get_ignore_from_manifest_lines ( lines ) : """Gather the various ignore patterns from a MANIFEST . in . ' lines ' should be a list of strings with comments removed and continuation lines joined . Returns a list of standard ignore patterns and a list of regular expressions to ignore ."""
ignore = [ ] ignore_regexps = [ ] for line in lines : try : cmd , rest = line . split ( None , 1 ) except ValueError : # no whitespace , so not interesting continue for part in rest . split ( ) : # distutils enforces these warnings on Windows only if part . startswith ( '/' ) : warning ( "ERROR: Leading slashes are not allowed in MANIFEST.in on Windows: %s" % part ) if part . endswith ( '/' ) : warning ( "ERROR: Trailing slashes are not allowed in MANIFEST.in on Windows: %s" % part ) if cmd == 'exclude' : # An exclude of ' dirname / * css ' can match ' dirname / foo . css ' # but not ' dirname / subdir / bar . css ' . We need a regular # expression for that , since fnmatch doesn ' t pay attention to # directory separators . for pat in rest . split ( ) : if '*' in pat or '?' in pat or '[!' in pat : ignore_regexps . append ( _glob_to_regexp ( pat ) ) else : # No need for special handling . ignore . append ( pat ) elif cmd == 'global-exclude' : ignore . extend ( rest . split ( ) ) elif cmd == 'recursive-exclude' : try : dirname , patterns = rest . split ( None , 1 ) except ValueError : # Wrong MANIFEST . in line . warning ( "You have a wrong line in MANIFEST.in: %r\n" "'recursive-exclude' expects <dir> <pattern1> " "<pattern2> ..." % line ) continue # Strip path separator for clarity . dirname = dirname . rstrip ( os . path . sep ) for pattern in patterns . split ( ) : if pattern . startswith ( '*' ) : ignore . append ( dirname + os . path . sep + pattern ) else : # ' recursive - exclude plone metadata . xml ' should # exclude plone / metadata . xml and # plone / * / metadata . xml , where * can be any number # of sub directories . We could use a regexp , but # two ignores seems easier . ignore . append ( dirname + os . path . sep + pattern ) ignore . append ( dirname + os . path . sep + '*' + os . path . sep + pattern ) elif cmd == 'prune' : # rest is considered to be a directory name . It should # not contain a path separator , as it actually has no # effect in that case , but that could differ per python # version . We strip it here to avoid double separators . # XXX : mg : I ' m not 100 % sure the above is correct , AFAICS # all pythons from 2.6 complain if the path has a leading or # trailing slash - - on Windows , that is . rest = rest . rstrip ( '/\\' ) ignore . append ( rest ) ignore . append ( rest + os . path . sep + '*' ) return ignore , ignore_regexps
def subnet_2_json ( self ) : """transform ariane _ clip3 subnet object to Ariane server JSON obj : return : Ariane JSON obj"""
LOGGER . debug ( "Subnet.subnet_2_json" ) json_obj = { 'subnetID' : self . id , 'subnetName' : self . name , 'subnetDescription' : self . description , 'subnetIP' : self . ip , 'subnetMask' : self . mask , 'subnetRoutingAreaID' : self . routing_area_id , 'subnetIPAddressesID' : self . ipAddress_ids , 'subnetLocationsID' : self . loc_ids , 'subnetOSInstancesID' : self . osi_ids } return json . dumps ( json_obj )
def raw_datastream ( request , pid , dsid , repo = None , headers = None , as_of_date = None ) : '''Access raw datastream content from a Fedora object . Returns : class : ` ~ django . http . HttpResponse ` for HEAD requests , : class : ` ~ django . http . StreamingHttpResponse ` for GET requests . The headers and status code from Fedora response are set on the django response ; any headers specified in the parameters will override Fedora headers . If an HTTP _ RANGE header is present on the request , it is passed through to Fedora . This view method is wrapped with ETag and last modified conditionals . : param request : HttpRequest : param pid : Fedora object PID : param dsid : datastream ID : param repo : : class : ` ~ eulcore . django . fedora . server . Repository ` instance to use , in case your application requires custom repository initialization ( optional ) : param headers : dictionary of additional headers to include in the response : param as _ of _ date : access a historical version of the datastream'''
return _raw_datastream ( request , pid , dsid , repo = repo , headers = headers , as_of_date = as_of_date )
def get_output_key ( self , args , kwargs ) : """Return the key that the output should be cached with , given arguments , keyword arguments , and a list of arguments to ignore ."""
# Get a dictionary mapping argument names to argument values where # ignored arguments are omitted . filtered_args = joblib . func_inspect . filter_args ( self . func , self . ignore , args , kwargs ) # Get a sorted tuple of the filtered argument . filtered_args = tuple ( sorted ( filtered_args . values ( ) ) ) # Use native hash when hashing arguments . return db . generate_key ( filtered_args )
def state_create ( history_id_key , table_name , collision_checker , always_set = [ ] ) : """Decorator for the check ( ) method on state - creating operations . Makes sure that : * there is a _ _ preorder _ _ field set , which contains the state - creating operation ' s associated preorder * there is a _ _ table _ _ field set , which contains the table into which to insert this state into * there is a _ _ history _ id _ key _ _ field set , which identifies the table ' s primary key name * there are no unexpired , duplicate instances of this state with this history id . ( i . e . if we ' re preordering a name that had previously expired , we need to preserve its history )"""
def wrap ( check ) : def wrapped_check ( state_engine , nameop , block_id , checked_ops ) : rc = check ( state_engine , nameop , block_id , checked_ops ) # pretty sure this isn ' t necessary any longer , but leave this is an assert just in case assert op_get_opcode_name ( nameop [ 'op' ] ) in OPCODE_CREATION_OPS , 'BUG: opcode became {}' . format ( nameop [ 'op' ] ) # succeeded ? if rc : # ensure that there ' s now a _ _ preorder _ _ try : assert '__preorder__' in nameop . keys ( ) , "Missing __preorder__" except Exception , e : log . exception ( e ) log . error ( "FATAL: missing fields" ) os . abort ( ) # propagate _ _ table _ _ and _ _ history _ id _ key _ _ nameop [ '__table__' ] = table_name nameop [ '__history_id_key__' ] = history_id_key nameop [ '__state_create__' ] = True nameop [ '__always_set__' ] = always_set # sanity check - - - we need to have the appropriate metadata for this operation invariant_tags = state_create_invariant_tags ( ) for tag in invariant_tags : assert tag in nameop , "BUG: missing invariant tag '%s'" % tag # sanity check - - - all required consensus fields must be present for required_field in CONSENSUS_FIELDS_REQUIRED : assert required_field in nameop , 'BUG: missing required consensus field {}' . format ( required_field ) # verify no duplicates rc = state_check_collisions ( state_engine , nameop , history_id_key , block_id , checked_ops , collision_checker ) if rc : # this is a duplicate ! log . debug ( "COLLISION on %s '%s'" % ( history_id_key , nameop [ history_id_key ] ) ) rc = False else : # no collision rc = True return rc return wrapped_check return wrap
def collect_num ( self ) : """获取答案收藏数 : return : 答案收藏数量 : rtype : int"""
element = self . soup . find ( "a" , { "data-za-a" : "click_answer_collected_count" } ) if element is None : return 0 else : return int ( element . get_text ( ) )
def handle_set_citation_double ( self , line : str , position : int , tokens : ParseResults ) -> ParseResults : """Handle a ` ` SET Citation = { " X " , " Y " } ` ` statement ."""
values = tokens [ 'values' ] if values [ 0 ] == CITATION_TYPE_PUBMED and not is_int ( values [ 1 ] ) : raise InvalidPubMedIdentifierWarning ( self . get_line_number ( ) , line , position , values [ 1 ] ) self . citation = dict ( zip ( ( CITATION_TYPE , CITATION_REFERENCE ) , values ) ) return tokens
def executeAndWait ( self , query , clientCtx ) : """run a query synchronously and return a handle ( QueryHandle ) . Parameters : - query - clientCtx"""
self . send_executeAndWait ( query , clientCtx ) return self . recv_executeAndWait ( )
def retrieve_xml ( pdb_id , silent = True ) : '''The RCSB website now compresses XML files .'''
xml_gz = retrieve_file_from_RCSB ( get_rcsb_files_connection ( ) , "/download/%s.xml.gz" % pdb_id , silent = silent ) cf = StringIO . StringIO ( ) cf . write ( xml_gz ) cf . seek ( 0 ) df = gzip . GzipFile ( fileobj = cf , mode = 'rb' ) contents = df . read ( ) df . close ( ) return contents
def jplace_split ( self , original_jplace , cluster_dict ) : '''To make GraftM more efficient , reads are dereplicated and merged into one file prior to placement using pplacer . This function separates the single jplace file produced by this process into the separate jplace files , one per input file ( if multiple were provided ) and backfills abundance ( re - replicates ? ) into the placement file so analyses can be done using the placement files . Parameters original _ jplace : dict ( json ) json . jplace file from the pplacer step . cluster _ dict : dict dictionary stores information on pre - placement clustering Returns A dict containing placement hashes to write to new jplace file . Each key represents a file alias'''
output_hash = { } for placement in original_jplace [ 'placements' ] : # for each placement alias_placements_list = [ ] nm_dict = { } p = placement [ 'p' ] if 'nm' in placement . keys ( ) : nm = placement [ 'nm' ] elif 'n' in placement . keys ( ) : nm = placement [ 'n' ] else : raise Exception ( "Unexpected jplace format: Either 'nm' or 'n' are expected as keys in placement jplace .JSON file" ) for nm_entry in nm : nm_list = [ ] placement_read_name , plval = nm_entry read_alias_idx = placement_read_name . split ( '_' ) [ - 1 ] # Split the alias # index out of the read name , which # corresponds to the input file from # which the read originated . read_name = '_' . join ( placement_read_name . split ( '_' ) [ : - 1 ] ) read_cluster = cluster_dict [ read_alias_idx ] [ read_name ] for read in read_cluster : nm_list . append ( [ read . name , plval ] ) if read_alias_idx not in nm_dict : nm_dict [ read_alias_idx ] = nm_list else : nm_dict [ read_alias_idx ] += nm_entry for alias_idx , nm_list in nm_dict . iteritems ( ) : placement_hash = { 'p' : p , 'nm' : nm_list } if alias_idx not in output_hash : output_hash [ alias_idx ] = [ placement_hash ] else : output_hash [ alias_idx ] . append ( placement_hash ) return output_hash
def _var ( self ) : """Get / Set a variable ."""
class Variables ( object ) : def __getitem__ ( _self , name ) : return self . getVariable ( name ) def __setitem__ ( _self , name , value ) : self . getVariable ( name ) . setValue ( value ) def __iter__ ( _self ) : return self . getVariables ( ) return Variables ( )
def rerender ( self ) : '''Rerender all derived images from the original . If optmization settings or expected sizes changed , they will be used for the new rendering .'''
with self . fs . open ( self . original , 'rb' ) as f_img : img = io . BytesIO ( f_img . read ( ) ) # Store the image in memory to avoid overwritting self . save ( img , filename = self . filename , bbox = self . bbox , overwrite = True )
def xs ( self , key , axis = 0 , level = None , drop_level = True ) : """Return cross - section from the Series / DataFrame . This method takes a ` key ` argument to select data at a particular level of a MultiIndex . Parameters key : label or tuple of label Label contained in the index , or partially in a MultiIndex . axis : { 0 or ' index ' , 1 or ' columns ' } , default 0 Axis to retrieve cross - section on . level : object , defaults to first n levels ( n = 1 or len ( key ) ) In case of a key partially contained in a MultiIndex , indicate which levels are used . Levels can be referred by label or position . drop _ level : bool , default True If False , returns object with same levels as self . Returns Series or DataFrame Cross - section from the original Series or DataFrame corresponding to the selected index levels . See Also DataFrame . loc : Access a group of rows and columns by label ( s ) or a boolean array . DataFrame . iloc : Purely integer - location based indexing for selection by position . Notes ` xs ` can not be used to set values . MultiIndex Slicers is a generic way to get / set values on any level or levels . It is a superset of ` xs ` functionality , see : ref : ` MultiIndex Slicers < advanced . mi _ slicers > ` . Examples > > > d = { ' num _ legs ' : [ 4 , 4 , 2 , 2 ] , . . . ' num _ wings ' : [ 0 , 0 , 2 , 2 ] , . . . ' class ' : [ ' mammal ' , ' mammal ' , ' mammal ' , ' bird ' ] , . . . ' animal ' : [ ' cat ' , ' dog ' , ' bat ' , ' penguin ' ] , . . . ' locomotion ' : [ ' walks ' , ' walks ' , ' flies ' , ' walks ' ] } > > > df = pd . DataFrame ( data = d ) > > > df = df . set _ index ( [ ' class ' , ' animal ' , ' locomotion ' ] ) > > > df num _ legs num _ wings class animal locomotion mammal cat walks 4 0 dog walks 4 0 bat flies 2 2 bird penguin walks 2 2 Get values at specified index > > > df . xs ( ' mammal ' ) num _ legs num _ wings animal locomotion cat walks 4 0 dog walks 4 0 bat flies 2 2 Get values at several indexes > > > df . xs ( ( ' mammal ' , ' dog ' ) ) num _ legs num _ wings locomotion walks 4 0 Get values at specified index and level > > > df . xs ( ' cat ' , level = 1) num _ legs num _ wings class locomotion mammal walks 4 0 Get values at several indexes and levels > > > df . xs ( ( ' bird ' , ' walks ' ) , . . . level = [ 0 , ' locomotion ' ] ) num _ legs num _ wings animal penguin 2 2 Get values at specified column and axis > > > df . xs ( ' num _ wings ' , axis = 1) class animal locomotion mammal cat walks 0 dog walks 0 bat flies 2 bird penguin walks 2 Name : num _ wings , dtype : int64"""
axis = self . _get_axis_number ( axis ) labels = self . _get_axis ( axis ) if level is not None : loc , new_ax = labels . get_loc_level ( key , level = level , drop_level = drop_level ) # create the tuple of the indexer indexer = [ slice ( None ) ] * self . ndim indexer [ axis ] = loc indexer = tuple ( indexer ) result = self . iloc [ indexer ] setattr ( result , result . _get_axis_name ( axis ) , new_ax ) return result if axis == 1 : return self [ key ] self . _consolidate_inplace ( ) index = self . index if isinstance ( index , MultiIndex ) : loc , new_index = self . index . get_loc_level ( key , drop_level = drop_level ) else : loc = self . index . get_loc ( key ) if isinstance ( loc , np . ndarray ) : if loc . dtype == np . bool_ : inds , = loc . nonzero ( ) return self . _take ( inds , axis = axis ) else : return self . _take ( loc , axis = axis ) if not is_scalar ( loc ) : new_index = self . index [ loc ] if is_scalar ( loc ) : new_values = self . _data . fast_xs ( loc ) # may need to box a datelike - scalar # if we encounter an array - like and we only have 1 dim # that means that their are list / ndarrays inside the Series ! # so just return them ( GH 6394) if not is_list_like ( new_values ) or self . ndim == 1 : return com . maybe_box_datetimelike ( new_values ) result = self . _constructor_sliced ( new_values , index = self . columns , name = self . index [ loc ] , dtype = new_values . dtype ) else : result = self . iloc [ loc ] result . index = new_index # this could be a view # but only in a single - dtyped view slicable case result . _set_is_copy ( self , copy = not result . _is_view ) return result
def get_recp_symmetry_operation ( structure , symprec = 0.01 ) : """Find the symmetric operations of the reciprocal lattice , to be used for hkl transformations Args : structure ( Structure ) : conventional unit cell symprec : default is 0.001"""
recp_lattice = structure . lattice . reciprocal_lattice_crystallographic # get symmetry operations from input conventional unit cell # Need to make sure recp lattice is big enough , otherwise symmetry # determination will fail . We set the overall volume to 1. recp_lattice = recp_lattice . scale ( 1 ) recp = Structure ( recp_lattice , [ "H" ] , [ [ 0 , 0 , 0 ] ] ) # Creates a function that uses the symmetry operations in the # structure to find Miller indices that might give repetitive slabs analyzer = SpacegroupAnalyzer ( recp , symprec = symprec ) recp_symmops = analyzer . get_symmetry_operations ( ) return recp_symmops
def clearAllowList ( self ) : """clear all entries in whitelist table Returns : True : successful to clear the whitelist False : fail to clear the whitelist"""
print '%s call clearAllowList' % self . port # remove all entries in whitelist try : print 'clearing whitelist entries:' for addr in self . _addressfilterSet : print addr # disable whitelist if self . __setAddressfilterMode ( 'disable' ) : self . _addressfilterMode = 'disable' # clear ops cmd = WPANCTL_CMD + 'insert MAC:Whitelist:Entries' if self . __sendCommand ( cmd ) [ 0 ] != 'Fail' : self . _addressfilterSet . clear ( ) return True return False except Exception , e : ModuleHelper . WriteIntoDebugLogger ( 'clearAllowList() Error: ' + str ( e ) )
def advance_by ( self , amount ) : """Advance the time reference by the given amount . : param ` float ` amount : number of seconds to advance . : raise ` ValueError ` : if * amount * is negative ."""
if amount < 0 : raise ValueError ( "cannot retreat time reference: amount {} < 0" . format ( amount ) ) self . __delta += amount
def _get_all_merges ( routing_table ) : """Get possible sets of entries to merge . Yields : py : class : ` ~ . Merge `"""
# Memorise entries that have been considered as part of a merge considered_entries = set ( ) for i , entry in enumerate ( routing_table ) : # If we ' ve already considered this entry then skip if i in considered_entries : continue # Construct a merge by including other routing table entries below this # one which have equivalent routes . merge = set ( [ i ] ) merge . update ( j for j , other_entry in enumerate ( routing_table [ i + 1 : ] , start = i + 1 ) if entry . route == other_entry . route ) # Mark all these entries as considered considered_entries . update ( merge ) # If the merge contains multiple entries then yield it if len ( merge ) > 1 : yield _Merge ( routing_table , merge )
def error ( self , message , set_error_state = False ) : """Log an error messsage . : param message : Log message ."""
if set_error_state : if message not in self . _errors : self . _errors . append ( message ) self . set_error_state ( ) self . logger . error ( message )
def logon ( ) : '''Logs into the bluecoat _ sslv device and returns the session cookies .'''
session = requests . session ( ) payload = { "jsonrpc" : "2.0" , "id" : "ID0" , "method" : "login" , "params" : [ DETAILS [ 'username' ] , DETAILS [ 'password' ] , DETAILS [ 'auth' ] , True ] } logon_response = session . post ( DETAILS [ 'url' ] , data = json . dumps ( payload ) , verify = False ) if logon_response . status_code != 200 : log . error ( "Error logging into proxy. HTTP Error code: %s" , logon_response . status_code ) raise salt . exceptions . CommandExecutionError ( "Did not receive a valid response from host." ) try : cookies = { 'sslng_csrf_token' : logon_response . cookies [ 'sslng_csrf_token' ] , 'sslng_session_id' : logon_response . cookies [ 'sslng_session_id' ] } csrf_token = logon_response . cookies [ 'sslng_csrf_token' ] except KeyError : log . error ( "Unable to authentication to the bluecoat_sslv proxy." ) raise salt . exceptions . CommandExecutionError ( "Did not receive a valid response from host." ) return session , cookies , csrf_token
def simple_response ( self , status , msg = '' ) : """Write a simple response back to the client ."""
status = str ( status ) proto_status = '%s %s\r\n' % ( self . server . protocol , status ) content_length = 'Content-Length: %s\r\n' % len ( msg ) content_type = 'Content-Type: text/plain\r\n' buf = [ proto_status . encode ( 'ISO-8859-1' ) , content_length . encode ( 'ISO-8859-1' ) , content_type . encode ( 'ISO-8859-1' ) , ] if status [ : 3 ] in ( '413' , '414' ) : # Request Entity Too Large / Request - URI Too Long self . close_connection = True if self . response_protocol == 'HTTP/1.1' : # This will not be true for 414 , since read _ request _ line # usually raises 414 before reading the whole line , and we # therefore cannot know the proper response _ protocol . buf . append ( b'Connection: close\r\n' ) else : # HTTP / 1.0 had no 413/414 status nor Connection header . # Emit 400 instead and trust the message body is enough . status = '400 Bad Request' buf . append ( CRLF ) if msg : if isinstance ( msg , six . text_type ) : msg = msg . encode ( 'ISO-8859-1' ) buf . append ( msg ) try : self . conn . wfile . write ( EMPTY . join ( buf ) ) except socket . error as ex : if ex . args [ 0 ] not in errors . socket_errors_to_ignore : raise
def game_events ( game_id , innings_endpoint = False ) : """Return dictionary of events for a game with matching id ."""
# get data from data module if not innings_endpoint : data = mlbgame . data . get_game_events ( game_id ) endpoint = 'game_events' else : data = mlbgame . data . get_innings ( game_id ) endpoint = 'innings' # parse XML parsed = etree . parse ( data ) root = parsed . getroot ( ) # empty output file output = { } # loop through innings innings = root . findall ( 'inning' ) for x in innings : output [ x . attrib [ 'num' ] ] = { 'top' : __inning_info ( x , 'top' , endpoint ) , 'bottom' : __inning_info ( x , 'bottom' , endpoint ) } return output
def stp ( self , val = True ) : """Turn STP protocol on / off ."""
if val : state = 'on' else : state = 'off' _runshell ( [ brctlexe , 'stp' , self . name , state ] , "Could not set stp on %s." % self . name )
def _run ( self ) : """Calls self . run ( ) and wraps for errors ."""
try : if self . run ( ) : broadcaster . success ( self ) else : broadcaster . failure ( self ) except StandardError : import traceback traceback . print_exc ( ) self . _stop ( ) raise except Exception : self . _stop ( ) raise return True
def migrate_big_urls ( big_urls = BIG_URLS , inplace = True ) : r"""Migrate the big _ urls table schema / structure from a dict of lists to a dict of dicts > > > big _ urls = { ' x ' : ( 1 , 2 , 3 , " 4x " ) , ' y ' : ( " yme " , " cause " ) } > > > inplace = migrate _ big _ urls ( big _ urls = big _ urls ) > > > inplace { ' x ' : { 0 : 1 , 1 : 2 , 2 : 3 , 3 : ' 4x ' } , ' y ' : { 0 : ' yme ' , 1 : ' cause ' } } > > > inplace is big _ urls True > > > big _ urls = { ' x ' : [ 1 , 2 , 3 , " 4x " ] , ' y ' : [ " yme " , " cause " ] } > > > copied = migrate _ big _ urls ( big _ urls = big _ urls , inplace = False ) > > > copied { ' x ' : { 0 : 1 , 1 : 2 , 2 : 3 , 3 : ' 4x ' } , ' y ' : { 0 : ' yme ' , 1 : ' cause ' } } > > > copied is big _ urls False > > > copied [ ' x ' ] is big _ urls [ ' x ' ] False > > > 1 is copied [ ' x ' ] [ 0 ] is big _ urls [ ' x ' ] [ 0] True"""
if not inplace : big_urls = deepcopy ( big_urls ) for name , meta in big_urls . items ( ) : big_urls [ name ] = dict ( zip ( range ( len ( meta ) ) , meta ) ) big_urls [ name ] = dict ( zip ( range ( len ( meta ) ) , meta ) ) # big _ urls [ name ] [ ' filenames ' ] = [ normalize _ ext ( big _ urls ) ] return big_urls
def _unsigned_bounds ( self ) : """Get lower bound and upper bound for ` self ` in unsigned arithmetic . : return : a list of ( lower _ bound , upper _ bound ) tuples ."""
ssplit = self . _ssplit ( ) if len ( ssplit ) == 1 : lb = ssplit [ 0 ] . lower_bound ub = ssplit [ 0 ] . upper_bound return [ ( lb , ub ) ] elif len ( ssplit ) == 2 : # ssplit [ 0 ] is on the left hemisphere , and ssplit [ 1 ] is on the right hemisphere lb_1 = ssplit [ 0 ] . lower_bound ub_1 = ssplit [ 0 ] . upper_bound lb_2 = ssplit [ 1 ] . lower_bound ub_2 = ssplit [ 1 ] . upper_bound return [ ( lb_1 , ub_1 ) , ( lb_2 , ub_2 ) ] else : raise Exception ( 'WTF' )
def fill_out ( self , data , prefix = '' , skip_reset = False ) : """Fill out ` ` data ` ` by dictionary ( key is name attribute of inputs ) . You can pass normal Pythonic data and don ' t have to care about how to use API of WebDriver . By ` ` prefix ` ` you can specify prefix of all name attributes . For example you can have inputs called ` ` client . name ` ` and ` ` client . surname ` ` - then you will pass to ` ` prefix ` ` string ` ` " client . " ` ` and in dictionary just ` ` " name " ` ` . Option ` ` skip _ reset ` ` is for skipping reset , so it can go faster . For example for multiple selects it calls ` ` deselect _ all ` ` first , but it need to for every option check if it is selected and it is very slow for really big multiple selects . If you know that it is not filled , you can skip it and safe in some cases up to one minute ! Also same with text inputs , but first is called ` ` clear ` ` . Example : . . code - block : : python driver . get _ elm ( ' formid ' ) . fill _ out ( { ' name ' : ' Michael ' , ' surname ' : ' Horejsek ' , ' age ' : 24, ' enabled ' : True , ' multibox ' : [ ' value1 ' , ' value2 ' ] } , prefix = ' user _ ' ) . . versionchanged : : 2.2 ` ` turbo ` ` renamed to ` ` skip _ reset ` ` and used also for common elements like text inputs or textareas ."""
for elm_name , value in data . items ( ) : FormElement ( self , prefix + elm_name ) . fill_out ( value , skip_reset )
def list_files ( self , offset = None , limit = None , api = None ) : """List files in a folder : param api : Api instance : param offset : Pagination offset : param limit : Pagination limit : return : List of files"""
api = api or self . _API if not self . is_folder ( ) : raise SbgError ( '{name} is not a folder' . format ( name = self . name ) ) url = self . _URL [ 'list_folder' ] . format ( id = self . id ) return super ( File , self . __class__ ) . _query ( api = api , url = url , offset = offset , limit = limit , fields = '_all' )
def _raise_error_if_not_sarray ( dataset , variable_name = "SArray" ) : """Check if the input is an SArray . Provide a proper error message otherwise ."""
err_msg = "Input %s is not an SArray." if not isinstance ( dataset , _SArray ) : raise ToolkitError ( err_msg % variable_name )
def Cylinder ( center = ( 0. , 0. , 0. ) , direction = ( 1. , 0. , 0. ) , radius = 0.5 , height = 1.0 , resolution = 100 , ** kwargs ) : """Create the surface of a cylinder . Parameters center : list or np . ndarray Location of the centroid in [ x , y , z ] direction : list or np . ndarray Direction cylinder points to in [ x , y , z ] radius : float Radius of the cylinder . height : float Height of the cylinder . resolution : int Number of points on the circular face of the cylinder . capping : bool , optional Cap cylinder ends with polygons . Default True Returns cylinder : vtki . PolyData Cylinder surface . Examples > > > import vtki > > > import numpy as np > > > cylinder = vtki . Cylinder ( np . array ( [ 1 , 2 , 3 ] ) , np . array ( [ 1 , 1 , 1 ] ) , 1 , 1) > > > cylinder . plot ( ) # doctest : + SKIP"""
capping = kwargs . get ( 'capping' , kwargs . get ( 'cap_ends' , True ) ) cylinderSource = vtk . vtkCylinderSource ( ) cylinderSource . SetRadius ( radius ) cylinderSource . SetHeight ( height ) cylinderSource . SetCapping ( capping ) cylinderSource . SetResolution ( resolution ) cylinderSource . Update ( ) surf = PolyData ( cylinderSource . GetOutput ( ) ) surf . rotate_z ( - 90 ) translate ( surf , center , direction ) return surf
def predict ( self , h = 5 , intervals = False ) : """Makes forecast with the estimated model Parameters h : int ( default : 5) How many steps ahead would you like to forecast ? intervals : boolean ( default : False ) Whether to return prediction intervals Returns - pd . DataFrame with predicted values"""
if self . latent_variables . estimated is False : raise Exception ( "No latent variables estimated!" ) else : lmda , Y , scores = self . _model ( self . latent_variables . get_z_values ( ) ) date_index = self . shift_dates ( h ) if self . latent_variables . estimation_method in [ 'M-H' ] : sim_vector = self . _sim_prediction_bayes ( h , 15000 ) error_bars = [ ] for pre in range ( 5 , 100 , 5 ) : error_bars . append ( np . insert ( [ np . percentile ( i , pre ) for i in sim_vector ] , 0 , lmda [ - 1 ] ) ) forecasted_values = np . array ( [ np . mean ( i ) for i in sim_vector ] ) prediction_01 = np . array ( [ np . percentile ( i , 1 ) for i in sim_vector ] ) prediction_05 = np . array ( [ np . percentile ( i , 5 ) for i in sim_vector ] ) prediction_95 = np . array ( [ np . percentile ( i , 95 ) for i in sim_vector ] ) prediction_99 = np . array ( [ np . percentile ( i , 99 ) for i in sim_vector ] ) else : t_z = self . transform_z ( ) if intervals is True : sim_values = self . _sim_prediction ( lmda , Y , scores , h , t_z , 15000 ) else : sim_values = self . _sim_prediction ( lmda , Y , scores , h , t_z , 2 ) mean_values = self . _sim_predicted_mean ( lmda , Y , scores , h , t_z , 15000 ) forecasted_values = mean_values [ - h : ] if intervals is False : result = pd . DataFrame ( np . exp ( forecasted_values / 2.0 ) ) result . rename ( columns = { 0 : self . data_name } , inplace = True ) else : if self . latent_variables . estimation_method not in [ 'M-H' ] : sim_values = self . _sim_prediction ( lmda , Y , scores , h , t_z , 15000 ) prediction_01 = np . array ( [ np . percentile ( i , 1 ) for i in sim_values ] ) prediction_05 = np . array ( [ np . percentile ( i , 5 ) for i in sim_values ] ) prediction_95 = np . array ( [ np . percentile ( i , 95 ) for i in sim_values ] ) prediction_99 = np . array ( [ np . percentile ( i , 99 ) for i in sim_values ] ) result = np . exp ( pd . DataFrame ( [ forecasted_values , prediction_01 , prediction_05 , prediction_95 , prediction_99 ] ) . T / 2.0 ) result . rename ( columns = { 0 : self . data_name , 1 : "1% Prediction Interval" , 2 : "5% Prediction Interval" , 3 : "95% Prediction Interval" , 4 : "99% Prediction Interval" } , inplace = True ) result . index = date_index [ - h : ] return result
def render ( self ) : """Render the form and all sections to HTML"""
return Markup ( env . get_template ( 'form.html' ) . render ( form = self , render_open_tag = True , render_close_tag = True , render_before = True , render_sections = True , render_after = True , generate_csrf_token = None if self . disable_csrf else _csrf_generation_function ) )
def get_merge ( self , path , local_destination ) : """Using snakebite getmerge to implement this . : param path : HDFS directory : param local _ destination : path on the system running Luigi : return : merge of the directory"""
return list ( self . get_bite ( ) . getmerge ( path = path , dst = local_destination ) )
def unload ( filename ) : """Unload a SPICE kernel . http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / unload _ c . html : param filename : The name of a kernel to unload . : type filename : str"""
if isinstance ( filename , list ) : for f in filename : libspice . unload_c ( stypes . stringToCharP ( f ) ) return filename = stypes . stringToCharP ( filename ) libspice . unload_c ( filename )
def sync_role_definitions ( self ) : """Inits the Superset application with security roles and such"""
from superset import conf logging . info ( 'Syncing role definition' ) self . create_custom_permissions ( ) # Creating default roles self . set_role ( 'Admin' , self . is_admin_pvm ) self . set_role ( 'Alpha' , self . is_alpha_pvm ) self . set_role ( 'Gamma' , self . is_gamma_pvm ) self . set_role ( 'granter' , self . is_granter_pvm ) self . set_role ( 'sql_lab' , self . is_sql_lab_pvm ) if conf . get ( 'PUBLIC_ROLE_LIKE_GAMMA' , False ) : self . set_role ( 'Public' , self . is_gamma_pvm ) self . create_missing_perms ( ) # commit role and view menu updates self . get_session . commit ( ) self . clean_perms ( )
def normalize_eols ( text , eol = '\n' ) : """Use the same eol ' s in text"""
for eol_char , _ in EOL_CHARS : if eol_char != eol : text = text . replace ( eol_char , eol ) return text
def _handle_sighup ( self , signum : int , frame : Any ) -> None : """Used internally to fail the task when connection to RabbitMQ is lost during the execution of the task ."""
logger . warning ( "Catched SIGHUP" ) exc_info = self . _heartbeat_exc_info self . _heartbeat_exc_info = None # Format exception info to see in tools like Sentry . formatted_exception = '' . join ( traceback . format_exception ( * exc_info ) ) # noqa raise HeartbeatError ( exc_info )
def any_i18n ( * args ) : """Return True if any argument appears to be an i18n string ."""
for a in args : if a is not None and not isinstance ( a , str ) : return True return False
def main ( ) : """NAME plotdi _ a . py DESCRIPTION plots equal area projection from dec inc data and fisher mean , cone of confidence INPUT FORMAT takes dec , inc , alpha95 as first three columns in space delimited file SYNTAX plotdi _ a . py [ - i ] [ - f FILE ] OPTIONS - f FILE to read file name from command line - fmt [ png , jpg , eps , pdf , svg ] set plot file format [ ' svg ' is default ] - sav save plot and quit"""
fmt , plot = 'svg' , 0 if len ( sys . argv ) > 0 : if '-h' in sys . argv : # check if help is needed print ( main . __doc__ ) sys . exit ( ) # graceful quit if '-fmt' in sys . argv : ind = sys . argv . index ( '-fmt' ) fmt = sys . argv [ ind + 1 ] if '-sav' in sys . argv : plot = 1 if '-f' in sys . argv : ind = sys . argv . index ( '-f' ) file = sys . argv [ ind + 1 ] f = open ( file , 'r' ) data = f . readlines ( ) else : data = sys . stdin . readlines ( ) # read in data from standard input DIs , Pars = [ ] , [ ] for line in data : # read in the data from standard input pars = [ ] rec = line . split ( ) # split each line on space to get records DIs . append ( [ float ( rec [ 0 ] ) , float ( rec [ 1 ] ) ] ) pars . append ( float ( rec [ 0 ] ) ) pars . append ( float ( rec [ 1 ] ) ) pars . append ( float ( rec [ 2 ] ) ) pars . append ( float ( rec [ 0 ] ) ) isign = abs ( float ( rec [ 1 ] ) ) / float ( rec [ 1 ] ) pars . append ( float ( rec [ 1 ] ) - isign * 90. ) # Beta inc pars . append ( float ( rec [ 2 ] ) ) # gamma pars . append ( float ( rec [ 0 ] ) + 90. ) # Beta dec pars . append ( 0. ) # Beta inc Pars . append ( pars ) EQ = { 'eq' : 1 } # make plot dictionary pmagplotlib . plot_init ( EQ [ 'eq' ] , 5 , 5 ) title = 'Equal area projection' pmagplotlib . plot_eq ( EQ [ 'eq' ] , DIs , title ) # plot directions for k in range ( len ( Pars ) ) : pmagplotlib . plot_ell ( EQ [ 'eq' ] , Pars [ k ] , 'b' , 0 , 1 ) # plot ellipses files = { } for key in list ( EQ . keys ( ) ) : files [ key ] = key + '.' + fmt titles = { } titles [ 'eq' ] = 'Equal Area Plot' if pmagplotlib . isServer : black = '#000000' purple = '#800080' EQ = pmagplotlib . add_borders ( EQ , titles , black , purple ) pmagplotlib . save_plots ( EQ , files ) elif plot == 0 : pmagplotlib . draw_figs ( EQ ) ans = input ( " S[a]ve to save plot, [q]uit, Return to continue: " ) if ans == "q" : sys . exit ( ) if ans == "a" : pmagplotlib . save_plots ( EQ , files ) else : pmagplotlib . save_plots ( EQ , files )
def uninstall_python ( python , runas = None ) : '''Uninstall a python implementation . python The version of python to uninstall . Should match one of the versions listed by : mod : ` pyenv . versions < salt . modules . pyenv . versions > ` CLI Example : . . code - block : : bash salt ' * ' pyenv . uninstall _ python 2.0.0 - p0'''
python = re . sub ( r'^python-' , '' , python ) args = '--force {0}' . format ( python ) _pyenv_exec ( 'uninstall' , args , runas = runas ) return True
def evaluate_single_config ( hparams , sampling_temp , max_num_noops , agent_model_dir , eval_fn = _eval_fn_with_learner ) : """Evaluate the PPO agent in the real environment ."""
tf . logging . info ( "Evaluating metric %s" , get_metric_name ( sampling_temp , max_num_noops , clipped = False ) ) eval_hparams = trainer_lib . create_hparams ( hparams . base_algo_params ) env = setup_env ( hparams , batch_size = hparams . eval_batch_size , max_num_noops = max_num_noops , rl_env_max_episode_steps = hparams . eval_rl_env_max_episode_steps , env_name = hparams . rl_env_name ) env . start_new_epoch ( 0 ) eval_fn ( env , hparams , eval_hparams , agent_model_dir , sampling_temp ) rollouts = env . current_epoch_rollouts ( ) env . close ( ) return tuple ( compute_mean_reward ( rollouts , clipped ) for clipped in ( True , False ) )
def update_gradients_full ( self , dL_dK , X , X2 = None , reset = True ) : """Given the derivative of the objective wrt the covariance matrix ( dL _ dK ) , compute the gradient wrt the parameters of this kernel , and store in the parameters object as e . g . self . variance . gradient"""
self . variance . gradient = np . sum ( self . K ( X , X2 ) * dL_dK ) / self . variance # now the lengthscale gradient ( s ) dL_dr = self . dK_dr_via_X ( X , X2 ) * dL_dK if self . ARD : tmp = dL_dr * self . _inv_dist ( X , X2 ) if X2 is None : X2 = X if use_stationary_cython : self . lengthscale . gradient = self . _lengthscale_grads_cython ( tmp , X , X2 ) else : self . lengthscale . gradient = self . _lengthscale_grads_pure ( tmp , X , X2 ) else : r = self . _scaled_dist ( X , X2 ) self . lengthscale . gradient = - np . sum ( dL_dr * r ) / self . lengthscale
def pairinplace ( args ) : """% prog pairinplace bulk . fasta Pair up the records in bulk . fasta by comparing the names for adjacent records . If they match , print to bulk . pairs . fasta , else print to bulk . frags . fasta ."""
from jcvi . utils . iter import pairwise p = OptionParser ( pairinplace . __doc__ ) p . add_option ( "-r" , dest = "rclip" , default = 1 , type = "int" , help = "pair ID is derived from rstrip N chars [default: %default]" ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) fastafile , = args base = op . basename ( fastafile ) . split ( "." ) [ 0 ] frags = base + ".frags.fasta" pairs = base + ".pairs.fasta" if fastafile . endswith ( ".gz" ) : frags += ".gz" pairs += ".gz" fragsfw = must_open ( frags , "w" ) pairsfw = must_open ( pairs , "w" ) N = opts . rclip strip_name = lambda x : x [ : - N ] if N else str skipflag = False # controls the iterator skip fastaiter = SeqIO . parse ( fastafile , "fasta" ) for a , b in pairwise ( fastaiter ) : aid , bid = [ strip_name ( x ) for x in ( a . id , b . id ) ] if skipflag : skipflag = False continue if aid == bid : SeqIO . write ( [ a , b ] , pairsfw , "fasta" ) skipflag = True else : SeqIO . write ( [ a ] , fragsfw , "fasta" ) # don ' t forget the last one , when b is None if not skipflag : SeqIO . write ( [ a ] , fragsfw , "fasta" ) logging . debug ( "Reads paired into `%s` and `%s`" % ( pairs , frags ) )
def local_symbol_table ( imports = None , symbols = ( ) ) : """Constructs a local symbol table . Args : imports ( Optional [ SymbolTable ] ) : Shared symbol tables to import . symbols ( Optional [ Iterable [ Unicode ] ] ) : Initial local symbols to add . Returns : SymbolTable : A mutable local symbol table with the seeded local symbols ."""
return SymbolTable ( table_type = LOCAL_TABLE_TYPE , symbols = symbols , imports = imports )
def ext_pillar ( minion_id , # pylint : disable = W0613 pillar , # pylint : disable = W0613 command ) : '''Execute a command and read the output as YAML'''
try : command = command . replace ( '%s' , minion_id ) output = __salt__ [ 'cmd.run_stdout' ] ( command , python_shell = True ) return salt . utils . yaml . safe_load ( output ) except Exception : log . critical ( 'YAML data from \'%s\' failed to parse. Command output:\n%s' , command , output ) return { }
def refresh_entitlement ( owner , repo , identifier , show_tokens ) : """Refresh an entitlement in a repository ."""
client = get_entitlements_api ( ) with catch_raise_api_exception ( ) : data , _ , headers = client . entitlements_refresh_with_http_info ( owner = owner , repo = repo , identifier = identifier , show_tokens = show_tokens ) ratelimits . maybe_rate_limit ( client , headers ) return data . to_dict ( )
def makeUserLoginMethod ( username , password , locale = None ) : '''Return a function that will call the vim . SessionManager . Login ( ) method with the given parameters . The result of this function can be passed as the " loginMethod " to a SessionOrientedStub constructor .'''
def _doLogin ( soapStub ) : si = vim . ServiceInstance ( "ServiceInstance" , soapStub ) sm = si . content . sessionManager if not sm . currentSession : si . content . sessionManager . Login ( username , password , locale ) return _doLogin
def sign ( self , subject , ** prefs ) : """Sign text , a message , or a timestamp using this key . : param subject : The text to be signed : type subject : ` ` str ` ` , : py : obj : ` ~ pgpy . PGPMessage ` , ` ` None ` ` : raises : : py : exc : ` ~ pgpy . errors . PGPError ` if the key is passphrase - protected and has not been unlocked : raises : : py : exc : ` ~ pgpy . errors . PGPError ` if the key is public : returns : : py : obj : ` PGPSignature ` The following optional keyword arguments can be used with : py : meth : ` PGPKey . sign ` , as well as : py : meth : ` PGPKey . certify ` , : py : meth : ` PGPKey . revoke ` , and : py : meth : ` PGPKey . bind ` : : keyword expires : Set an expiration date for this signature : type expires : : py : obj : ` ~ datetime . datetime ` , : py : obj : ` ~ datetime . timedelta ` : keyword notation : Add arbitrary notation data to this signature . : type notation : ` ` dict ` ` : keyword policy _ uri : Add a URI to the signature that should describe the policy under which the signature was issued . : type policy _ uri : ` ` str ` ` : keyword revocable : If ` ` False ` ` , this signature will be marked non - revocable : type revocable : ` ` bool ` ` : keyword user : Specify which User ID to use when creating this signature . Also adds a " Signer ' s User ID " to the signature . : type user : ` ` str ` `"""
sig_type = SignatureType . BinaryDocument hash_algo = prefs . pop ( 'hash' , None ) if subject is None : sig_type = SignatureType . Timestamp if isinstance ( subject , PGPMessage ) : if subject . type == 'cleartext' : sig_type = SignatureType . CanonicalDocument subject = subject . message sig = PGPSignature . new ( sig_type , self . key_algorithm , hash_algo , self . fingerprint . keyid ) return self . _sign ( subject , sig , ** prefs )
def publish_report ( report , args , old_commit , new_commit ) : """Publish the RST report based on the user request ."""
# Print the report to stdout unless the user specified - - quiet . output = "" if not args . quiet and not args . gist and not args . file : return report if args . gist : gist_url = post_gist ( report , old_commit , new_commit ) output += "\nReport posted to GitHub Gist: {0}" . format ( gist_url ) if args . file is not None : with open ( args . file , 'w' ) as f : f . write ( report . encode ( 'utf-8' ) ) output += "\nReport written to file: {0}" . format ( args . file ) return output
def qteRegisterMacro ( self , macroCls , replaceMacro : bool = False , macroName : str = None ) : """Register a macro . If ` ` macroName ` ` is * * None * * then its named is deduced from its class name ( see ` ` qteMacroNameMangling ` ` for details ) . Multiple macros with the same name can co - exist as long as their applet - and widget signatures , as reported by the ` ` qteAppletSignature ` ` and ` ` qteWidgetSignature ` ` methods , differ . If ` ` macroCls ` ` has the same name and signatures as an already registered macro then the ` ` replaceMacro ` ` flag decides : * * * True * * : the existing macro will be replaced for all applet - and widget signatures specified by the new macro ` ` macroCls ` ` . * * * False * * : the ` ` macroCls ` ` will not be registered . The method returns * * None * * if an error occurred ( eg . the macro constructor is faulty ) , or the macro name as a string . If a macro was already registered and not replaced ( ie . ` ` replaceMacro ` ` ) then the macro name is returned nonetheless . . . note : : if an existing macro is replaced the old macro is not deleted ( it probably should be , though ) . | Args | * ` ` macroCls ` ` ( * * QtmacsMacro * * ) : QtmacsMacro or derived ( not type checked ! ) * ` ` replaceMacro ` ` ( * * bool * * ) : whether or not to replace an existing macro . * ` ` macroName ` ` ( * * str * * ) : the name under which the macro should be registered . | Returns | * * str * * : the name of the just registered macro , or * * None * * if that failed . | Raises | * * * QtmacsArgumentError * * if at least one argument has an invalid type ."""
# Check type of input arguments . if not issubclass ( macroCls , QtmacsMacro ) : args = ( 'macroCls' , 'class QtmacsMacro' , inspect . stack ( ) [ 0 ] [ 3 ] ) raise QtmacsArgumentError ( * args ) # Try to instantiate the macro class . try : macroObj = macroCls ( ) except Exception : msg = 'The macro <b>{}</b> has a faulty constructor.' msg = msg . format ( macroCls . __name__ ) self . qteLogger . error ( msg , stack_info = True ) return None # The three options to determine the macro name , in order of # precedence , are : passed to this function , specified in the # macro constructor , name mangled . if macroName is None : # No macro name was passed to the function . if macroObj . qteMacroName ( ) is None : # The macro has already named itself . macroName = self . qteMacroNameMangling ( macroCls ) else : # The macro name is inferred from the class name . macroName = macroObj . qteMacroName ( ) # Let the macro know under which name it is known inside Qtmacs . macroObj . _qteMacroName = macroName # Ensure the macro has applet signatures . if len ( macroObj . qteAppletSignature ( ) ) == 0 : msg = 'Macro <b>{}</b> has no applet signatures.' . format ( macroName ) self . qteLogger . error ( msg , stack_info = True ) return None # Ensure the macro has widget signatures . if len ( macroObj . qteWidgetSignature ( ) ) == 0 : msg = 'Macro <b>{}</b> has no widget signatures.' . format ( macroName ) self . qteLogger . error ( msg , stack_info = True ) return None # Flag to indicate that at least one new macro type was # registered . anyRegistered = False # Iterate over all applet signatures . for app_sig in macroObj . qteAppletSignature ( ) : # Iterate over all widget signatures . for wid_sig in macroObj . qteWidgetSignature ( ) : # Infer the macro name from the class name of the # passed macro object . macroNameInternal = ( macroName , app_sig , wid_sig ) # If a macro with this name already exists then either # replace it , or skip the registration process for the # new one . if macroNameInternal in self . _qteRegistryMacros : if replaceMacro : # Remove existing macro . tmp = self . _qteRegistryMacros . pop ( macroNameInternal ) msg = 'Replacing existing macro <b>{}</b> with new {}.' msg = msg . format ( macroNameInternal , macroObj ) self . qteLogger . info ( msg ) tmp . deleteLater ( ) else : msg = 'Macro <b>{}</b> already exists (not replaced).' msg = msg . format ( macroNameInternal ) self . qteLogger . info ( msg ) # Macro was not registered for this widget # signature . continue # Add macro object to the registry . self . _qteRegistryMacros [ macroNameInternal ] = macroObj msg = ( 'Macro <b>{}</b> successfully registered.' . format ( macroNameInternal ) ) self . qteLogger . info ( msg ) anyRegistered = True # Return the name of the macro , irrespective of whether or not # it is a newly created macro , or if the old macro was kept # ( in case of a name conflict ) . return macroName
def makeLeader ( self , node_id ) : """Add an annotation property to the given ` ` ` node _ id ` ` ` to be the clique _ leader . This is a monarchism . : param node _ id : : return :"""
self . graph . addTriple ( node_id , self . globaltt [ 'clique_leader' ] , True , object_is_literal = True , literal_type = 'xsd:boolean' )
def get_user_summary ( self , begin_date , end_date ) : """获取用户增减数据 详情请参考 http : / / mp . weixin . qq . com / wiki / 3 / ecfed6e1a0a03b5f35e5efac98e864b7 . html : param begin _ date : 起始日期 : param end _ date : 结束日期 : return : 统计数据列表"""
res = self . _post ( 'getusersummary' , data = { 'begin_date' : self . _to_date_str ( begin_date ) , 'end_date' : self . _to_date_str ( end_date ) } ) return res [ 'list' ]
def leading_whitespace ( self , inputstring ) : """Count leading whitespace ."""
count = 0 for i , c in enumerate ( inputstring ) : if c == " " : count += 1 elif c == "\t" : count += tabworth - ( i % tabworth ) else : break if self . indchar is None : self . indchar = c elif c != self . indchar : self . strict_err_or_warn ( "found mixing of tabs and spaces" , inputstring , i ) return count
def findAddressCandidates ( self , addressDict = None , singleLine = None , maxLocations = 10 , outFields = "*" , outSR = 4326 , searchExtent = None , location = None , distance = 2000 , magicKey = None , category = None ) : """The findAddressCandidates operation is performed on a geocode service resource . The result of this operation is a resource representing the list of address candidates . This resource provides information about candidates , including the address , location , and match score . Locators published using ArcGIS Server 10 or later support the single line address field for the findAddressCandidates operation . You can provide arguments to the findAddressCandidates operation as query parameters defined in the following parameters table : Inputs : addressDict - The various address fields accepted by the corresponding geocode service . These fields are listed in the addressFields property of the JSON representation associated geocode service resource . Example : Suppose that addressFields of a geocode service resource includes fields with the following names : Street , City , State , and Zone . If you want to perform the findAddressCandidates operation by providing values for the Street and Zone fields , you ' d set the query parameters as Street = 380 + New + York + St & Zone = 92373 singleLine - Specifies the location to be geocoded . The input address components are formatted as a single string . The singleLine parameter and < addressField > parameters should not be passed in the same request . maxLocations - The maximum number of locations to be returned by a search , up to the maximum number allowed by the geocode service . If not specified , the maximum number of candidates for which the service is configured will be returned . outFields - The list of fields to be included in the returned result set . This list is a comma - delimited list of field names . If you specify the shape field in the list of return fields , it is ignored . For non - intersection addresses , you can specify the candidate fields from the geocode service resource . For intersection addresses , you can specify the intersection candidate fields from the geocode service resource . outSR - The well - known ID ( WKID ) of the spatial reference or a spatial reference JSON object for the returned address candidates . For a list of valid WKID values , see Projected coordinate systems and Geographic coordinate systems . searchExtent - The spatial extent ( bounding box ) to be used in geocoding . The response will return only the candidates that are within this spatial extent . Unless the spatialReference is included in the searchExtent , the coordinates are assumed to be in the spatial reference of the locator . Simple syntax : < xmin > , < ymin > , < xmax > , < ymax > location - Defines an origin point location that is used with the distance parameter to sort geocoding candidates based on their proximity to the location . The distance parameter specifies the radial distance from the location in meters . The priority of candidates within this radius is boosted relative to those outside the radius . This is useful in mobile applications where a user searches for places in the vicinity of their current GPS location ; the location and distance parameters can be used in this scenario . The location parameter can be specified without specifying a distance . If distance is not specified , it defaults to 2000 meters . The location can be represented with a simple comma - separated syntax ( x , y ) , or as a JSON point object . If the spatial reference of the location coordinates is different than that of the geocode service , then it must be defined in the JSON object . If the comma - separated syntax is used , or if the spatial reference is not included in the JSON object , then the spatial reference of the location is assumed to be the same as that of the geocode service . This parameter was added at 10.3 and is only supported by geocode services published with ArcGIS 10.3 for Server and later versions . distance - Specifies the radius of an area around a point location that is used to boost the rank of geocoding candidates so that candidates closest to the location are returned first . The distance value is in meters . If the distance parameter is specified , the location parameter must be specified as well . Unlike the searchExtent parameter , the location and distance parameters allow searches to extend beyond the specified search radius . They are not used to filter results , but rather to rank resulting candidates based on their distance from a location . You must pass a searchExtent value in addition to location and distance if you want to confine the search results to a specific area . magicKey - The findAddressCandidates operation retrieves results more quickly when you pass in valid singleLine and magicKey values than when you don ' t pass in magicKey . However , to get this advantage , you need to make a prior request to the suggest operation , which provides a magicKey . This may or may not be relevant to your workflow . The suggest operation is often called on to improve the user experience of search boxes by analyzing partial text and providing complete names of places , addresses , points of interest , and so on . For instance , typing Mbu into a search box offers Mbuji - Mayi , Democratic Republic of the Congo as a suggestion , so the user doesn ' t need to type the complete name . Looking at the suggestion process from another perspective , as the user types , the suggest operation performs a text search , which is a redundant part of the overall search that the findAddressCandidates operation can also perform . The user chooses a place name or type - narrowing the results to a specific record . The results from suggest include text and magicKey values that contain the information the user chose ; passing these values from suggest into findAddressCandidates results in faster and more accurate find operations . In summary , using the magicKey parameter in findAddressCandidates is a two - step process : 1 . Make a request to suggest . The response includes text and magicKey properties . 2 . Make a request to findAddressCandidates and pass in the text and magicKey values returned from suggest as the singleLine and magicKey input parameters respectively . category - The category parameter is only supported by geocode services published using StreetMap Premium locators ."""
url = self . _url + "/findAddressCandidates" params = { "f" : "json" , "distance" : distance } if addressDict is None and singleLine is None : raise Exception ( "A singleline address or an address dictionary must be passed into this function" ) if not magicKey is None : params [ 'magicKey' ] = magicKey if not category is None : params [ 'category' ] = category if not addressDict is None : params = params . update ( addressDict ) if not singleLine is None : params [ 'SingleLine' ] = singleLine if not maxLocations is None : params [ 'maxLocations' ] = maxLocations if not outFields is None : params [ 'outFields' ] = outFields if not outSR is None : params [ 'outSR' ] = { "wkid" : outSR } if not searchExtent is None : params [ 'searchExtent' ] = searchExtent if isinstance ( location , Point ) : params [ 'location' ] = location . asDictionary elif isinstance ( location , list ) : params [ 'location' ] = "%s,%s" % ( location [ 0 ] , location [ 1 ] ) return self . _get ( url = url , param_dict = params , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port )
def _ensure_module_folder_exists ( ) : """Checks to see if the module folder exists . If it does not , create it . If there is an existing file with the same name , we raise a RuntimeError ."""
if not os . path . isdir ( MODULES_FOLDER_PATH ) : try : os . mkdir ( MODULES_FOLDER_PATH ) except OSError , e : if "file already exists" in str ( e ) : raise RuntimeError ( "Could not create modules folder: file exists with the same name" )
def cas2tas ( cas , h ) : """cas2tas conversion both m / s h in m"""
p , rho , T = atmos ( h ) qdyn = p0 * ( ( 1. + rho0 * cas * cas / ( 7. * p0 ) ) ** 3.5 - 1. ) tas = np . sqrt ( 7. * p / rho * ( ( 1. + qdyn / p ) ** ( 2. / 7. ) - 1. ) ) tas = - 1 * tas if cas < 0 else tas return tas
def CountFlowLogEntries ( self , client_id , flow_id ) : """Returns number of flow log entries of a given flow ."""
return len ( self . ReadFlowLogEntries ( client_id , flow_id , 0 , sys . maxsize ) )
def clean_data ( data ) : """Shift to lower case , replace unknowns with UNK , and listify"""
new_data = [ ] VALID = 'abcdefghijklmnopqrstuvwxyz123456789"\'?!.,:; ' for sample in data : new_sample = [ ] for char in sample [ 1 ] . lower ( ) : # Just grab the string , not the label if char in VALID : new_sample . append ( char ) else : new_sample . append ( 'UNK' ) new_data . append ( new_sample ) return new_data
def write_to_table ( self , query , dataset = None , table = None , external_udf_uris = None , allow_large_results = None , use_query_cache = None , priority = None , create_disposition = None , write_disposition = None , use_legacy_sql = None , maximum_billing_tier = None , flatten = None , project_id = None , ) : """Write query result to table . If dataset or table is not provided , Bigquery will write the result to temporary table . Optional arguments that are not specified are determined by BigQuery as described : https : / / developers . google . com / bigquery / docs / reference / v2 / jobs Parameters query : str BigQuery query string dataset : str , optional String id of the dataset table : str , optional String id of the table external _ udf _ uris : list , optional Contains external UDF URIs . If given , URIs must be Google Cloud Storage and have . js extensions . allow _ large _ results : bool , optional Whether or not to allow large results use _ query _ cache : bool , optional Whether or not to use query cache priority : str , optional One of the JOB _ PRIORITY _ * constants create _ disposition : str , optional One of the JOB _ CREATE _ * constants write _ disposition : str , optional One of the JOB _ WRITE _ * constants use _ legacy _ sql : bool , optional If False , the query will use BigQuery ' s standard SQL ( https : / / cloud . google . com / bigquery / sql - reference / ) maximum _ billing _ tier : integer , optional Limits the billing tier for this job . Queries that have resource usage beyond this tier will fail ( without incurring a charge ) . If unspecified , this will be set to your project default . For more information , see https : / / cloud . google . com / bigquery / pricing # high - compute flatten : bool , optional Whether or not to flatten nested and repeated fields in query results project _ id : str , optional String id of the project Returns dict A BigQuery job resource Raises JobInsertException On http / auth failures or error in result"""
configuration = { "query" : query , } project_id = self . _get_project_id ( project_id ) if dataset and table : configuration [ 'destinationTable' ] = { "projectId" : project_id , "tableId" : table , "datasetId" : dataset } if allow_large_results is not None : configuration [ 'allowLargeResults' ] = allow_large_results if flatten is not None : configuration [ 'flattenResults' ] = flatten if maximum_billing_tier is not None : configuration [ 'maximumBillingTier' ] = maximum_billing_tier if use_query_cache is not None : configuration [ 'useQueryCache' ] = use_query_cache if use_legacy_sql is not None : configuration [ 'useLegacySql' ] = use_legacy_sql if priority : configuration [ 'priority' ] = priority if create_disposition : configuration [ 'createDisposition' ] = create_disposition if write_disposition : configuration [ 'writeDisposition' ] = write_disposition if external_udf_uris : configuration [ 'userDefinedFunctionResources' ] = [ { 'resourceUri' : u } for u in external_udf_uris ] body = { "configuration" : { 'query' : configuration } } logger . info ( "Creating write to table job %s" % body ) job_resource = self . _insert_job ( body ) self . _raise_insert_exception_if_error ( job_resource ) return job_resource
def encode_field ( self , field , value ) : """Encode the given value as JSON . Args : field : a messages . Field for the field we ' re encoding . value : a value for field . Returns : A python value suitable for json . dumps ."""
for encoder in _GetFieldCodecs ( field , 'encoder' ) : result = encoder ( field , value ) value = result . value if result . complete : return value if isinstance ( field , messages . EnumField ) : if field . repeated : remapped_value = [ GetCustomJsonEnumMapping ( field . type , python_name = e . name ) or e . name for e in value ] else : remapped_value = GetCustomJsonEnumMapping ( field . type , python_name = value . name ) if remapped_value : return remapped_value if ( isinstance ( field , messages . MessageField ) and not isinstance ( field , message_types . DateTimeField ) ) : value = json . loads ( self . encode_message ( value ) ) return super ( _ProtoJsonApiTools , self ) . encode_field ( field , value )
def use_comparative_asset_view ( self ) : """Pass through to provider AssetLookupSession . use _ comparative _ asset _ view"""
self . _object_views [ 'asset' ] = COMPARATIVE # self . _ get _ provider _ session ( ' asset _ lookup _ session ' ) # To make sure the session is tracked for session in self . _get_provider_sessions ( ) : try : session . use_comparative_asset_view ( ) except AttributeError : pass
def WriteClientSnapshotHistory ( self , clients ) : """Writes the full history for a particular client ."""
if clients [ 0 ] . client_id not in self . metadatas : raise db . UnknownClientError ( clients [ 0 ] . client_id ) for client in clients : startup_info = client . startup_info client . startup_info = None snapshots = self . clients . setdefault ( client . client_id , { } ) snapshots [ client . timestamp ] = client . SerializeToString ( ) startup_infos = self . startup_history . setdefault ( client . client_id , { } ) startup_infos [ client . timestamp ] = startup_info . SerializeToString ( ) client . startup_info = startup_info
def check_future_import ( node ) : """If this is a future import , return set of symbols that are imported , else return None ."""
# node should be the import statement here savenode = node if not ( node . type == syms . simple_stmt and node . children ) : return set ( ) node = node . children [ 0 ] # now node is the import _ from node if not ( node . type == syms . import_from and # node . type = = token . NAME and # seems to break it hasattr ( node . children [ 1 ] , 'value' ) and node . children [ 1 ] . value == u'__future__' ) : return set ( ) if node . children [ 3 ] . type == token . LPAR : node = node . children [ 4 ] else : node = node . children [ 3 ] # now node is the import _ as _ name [ s ] # print ( python _ grammar . number2symbol [ node . type ] ) # breaks sometimes if node . type == syms . import_as_names : result = set ( ) for n in node . children : if n . type == token . NAME : result . add ( n . value ) elif n . type == syms . import_as_name : n = n . children [ 0 ] assert n . type == token . NAME result . add ( n . value ) return result elif node . type == syms . import_as_name : node = node . children [ 0 ] assert node . type == token . NAME return set ( [ node . value ] ) elif node . type == token . NAME : return set ( [ node . value ] ) else : # TODO : handle brackets like this : # from _ _ future _ _ import ( absolute _ import , division ) assert False , "strange import: %s" % savenode
def ungrab_hotkey ( self , item ) : """Ungrab a hotkey . If the hotkey has no filter regex , it is global and is grabbed recursively from the root window If it has a filter regex , iterate over all children of the root and ungrab from matching windows"""
import copy newItem = copy . copy ( item ) if item . get_applicable_regex ( ) is None : self . __enqueue ( self . __ungrabHotkey , newItem . hotKey , newItem . modifiers , self . rootWindow ) if self . __needsMutterWorkaround ( item ) : self . __enqueue ( self . __ungrabRecurse , newItem , self . rootWindow , False ) else : self . __enqueue ( self . __ungrabRecurse , newItem , self . rootWindow )
def typecast ( type_ , value ) : """Tries to smartly typecast the given value with the given type . : param type _ : The type to try to use for the given value : param value : The value to try and typecast to the given type : return : The typecasted value if possible , otherwise just the original value"""
# NOTE : does not do any special validation of types before casting # will just raise errors on type casting failures if is_builtin_type ( type_ ) or is_collections_type ( type_ ) or is_enum_type ( type_ ) : # FIXME : move to Types enum and TYPE _ MAPPING entry if is_bytes_type ( type_ ) : return decode_bytes ( value ) return type_ ( value ) elif is_regex_type ( type_ ) : return typecast ( str , value ) elif is_typing_type ( type_ ) : try : base_type = type_ . __extra__ except AttributeError : # NOTE : when handling typing . _ GenericAlias _ _ extra _ _ is actually _ _ origin _ _ base_type = type_ . __origin__ arg_types = type_ . __args__ if is_array_type ( type_ ) : if len ( arg_types ) == 1 : item_type = arg_types [ 0 ] return base_type ( [ typecast ( item_type , item ) for item in value ] ) else : return base_type ( value ) elif is_object_type ( type_ ) : if len ( arg_types ) == 2 : ( key_type , item_type ) = arg_types return base_type ( { typecast ( key_type , key ) : typecast ( item_type , item ) for ( key , item ) in value . items ( ) } ) else : return base_type ( value ) else : return base_type ( value ) else : return value
def _get_mod_mtime ( self , filepath ) : """Gets the modified time of the file or its accompanying XML file , whichever is greater ."""
file_mtime = self . tramp . getmtime ( filepath ) xmlpath = self . get_xmldoc_path ( filepath ) if self . tramp . exists ( xmlpath ) : xml_mtime = self . tramp . getmtime ( xmlpath ) if xml_mtime > file_mtime : file_mtime = xml_mtime return file_mtime
def _setup_language_variables ( self ) : """Check for availability of corpora for a language . TODO : Make the selection of available languages dynamic from dirs within ` ` corpora ` ` which contain a ` ` corpora . py ` ` file ."""
if self . language not in AVAILABLE_LANGUAGES : # If no official repos , check if user has custom user_defined_corpora = self . _check_distributed_corpora_file ( ) if user_defined_corpora : return user_defined_corpora else : msg = 'Corpora not available (either core or user-defined) for the "{}" language.' . format ( self . language ) logger . info ( msg ) raise CorpusImportError ( msg ) else : user_defined_corpora = self . _check_distributed_corpora_file ( ) return user_defined_corpora
def show_frontpage ( self ) : """If on a subreddit , remember it and head back to the front page . If this was pressed on the front page , go back to the last subreddit ."""
if self . content . name != '/r/front' : target = '/r/front' self . toggled_subreddit = self . content . name else : target = self . toggled_subreddit # target still may be empty string if this command hasn ' t yet been used if target is not None : self . refresh_content ( order = 'ignore' , name = target )
def GMailer ( recipients , username , password , subject = 'Log message from lggr.py' ) : """Sends messages as emails to the given list of recipients , from a GMail account ."""
import smtplib srvr = smtplib . SMTP ( 'smtp.gmail.com' , 587 ) srvr . ehlo ( ) srvr . starttls ( ) srvr . ehlo ( ) srvr . login ( username , password ) if not ( isinstance ( recipients , list ) or isinstance ( recipients , tuple ) ) : recipients = [ recipients ] gmail_sender = '{0}@gmail.com' . format ( username ) msg = 'To: {0}\nFrom: ' + gmail_sender + '\nSubject: ' + subject + '\n' msg = msg + '\n{1}\n\n' try : while True : logstr = ( yield ) for rcp in recipients : message = msg . format ( rcp , logstr ) srvr . sendmail ( gmail_sender , rcp , message ) except GeneratorExit : srvr . quit ( )
def set_cache_url ( self ) : """Set the URL to be used for caching ."""
# remove anchor from cached target url since we assume # URLs with different anchors to have the same content self . cache_url = urlutil . urlunsplit ( self . urlparts [ : 4 ] + [ u'' ] ) if self . cache_url is not None : assert isinstance ( self . cache_url , unicode ) , repr ( self . cache_url )
def handle_put_account ( self , req ) : """Handles the PUT v2 / < account > call for adding an account to the auth system . Can only be called by a . reseller _ admin . By default , a newly created UUID4 will be used with the reseller prefix as the account id used when creating corresponding service accounts . However , you can provide an X - Account - Suffix header to replace the UUID4 part . : param req : The swob . Request to process . : returns : swob . Response , 2xx on success ."""
if not self . is_reseller_admin ( req ) : return self . denied_response ( req ) account = req . path_info_pop ( ) if req . path_info or not account or account [ 0 ] == '.' : return HTTPBadRequest ( request = req ) account_suffix = req . headers . get ( 'x-account-suffix' ) if not account_suffix : account_suffix = str ( uuid4 ( ) ) # Create the new account in the Swift cluster path = quote ( '%s/%s%s' % ( self . dsc_parsed2 . path , self . reseller_prefix , account_suffix ) ) try : conn = self . get_conn ( ) conn . request ( 'PUT' , path , headers = { 'X-Auth-Token' : self . get_itoken ( req . environ ) , 'Content-Length' : '0' } ) resp = conn . getresponse ( ) resp . read ( ) if resp . status // 100 != 2 : raise Exception ( 'Could not create account on the Swift ' 'cluster: %s %s %s' % ( path , resp . status , resp . reason ) ) except ( Exception , TimeoutError ) : self . logger . error ( _ ( 'ERROR: Exception while trying to communicate ' 'with %(scheme)s://%(host)s:%(port)s/%(path)s' ) , { 'scheme' : self . dsc_parsed2 . scheme , 'host' : self . dsc_parsed2 . hostname , 'port' : self . dsc_parsed2 . port , 'path' : path } ) raise # Ensure the container in the main auth account exists ( this # container represents the new account ) path = quote ( '/v1/%s/%s' % ( self . auth_account , account ) ) resp = self . make_pre_authed_request ( req . environ , 'HEAD' , path ) . get_response ( self . app ) if resp . status_int == 404 : resp = self . make_pre_authed_request ( req . environ , 'PUT' , path ) . get_response ( self . app ) if resp . status_int // 100 != 2 : raise Exception ( 'Could not create account within main auth ' 'account: %s %s' % ( path , resp . status ) ) elif resp . status_int // 100 == 2 : if 'x-container-meta-account-id' in resp . headers : # Account was already created return HTTPAccepted ( request = req ) else : raise Exception ( 'Could not verify account within main auth ' 'account: %s %s' % ( path , resp . status ) ) # Record the mapping from account id back to account name path = quote ( '/v1/%s/.account_id/%s%s' % ( self . auth_account , self . reseller_prefix , account_suffix ) ) resp = self . make_pre_authed_request ( req . environ , 'PUT' , path , account ) . get_response ( self . app ) if resp . status_int // 100 != 2 : raise Exception ( 'Could not create account id mapping: %s %s' % ( path , resp . status ) ) # Record the cluster url ( s ) for the account path = quote ( '/v1/%s/%s/.services' % ( self . auth_account , account ) ) services = { 'storage' : { } } services [ 'storage' ] [ self . dsc_name ] = '%s/%s%s' % ( self . dsc_url , self . reseller_prefix , account_suffix ) services [ 'storage' ] [ 'default' ] = self . dsc_name resp = self . make_pre_authed_request ( req . environ , 'PUT' , path , json . dumps ( services ) ) . get_response ( self . app ) if resp . status_int // 100 != 2 : raise Exception ( 'Could not create .services object: %s %s' % ( path , resp . status ) ) # Record the mapping from account name to the account id path = quote ( '/v1/%s/%s' % ( self . auth_account , account ) ) resp = self . make_pre_authed_request ( req . environ , 'POST' , path , headers = { 'X-Container-Meta-Account-Id' : '%s%s' % ( self . reseller_prefix , account_suffix ) } ) . get_response ( self . app ) if resp . status_int // 100 != 2 : raise Exception ( 'Could not record the account id on the account: ' '%s %s' % ( path , resp . status ) ) return HTTPCreated ( request = req )
def get_turbine_data_from_oedb ( turbine_type , fetch_curve , overwrite = False ) : r"""Fetches data for one wind turbine type from the OpenEnergy Database ( oedb ) . If turbine data exists in local repository it is loaded from this file . The file is created when turbine data was loaded from oedb in : py : func : ` ~ . load _ turbine _ data _ from _ oedb ` . Use this function with ` overwrite = True ` to overwrite your file with newly fetched data . Use : py : func : ` ~ . check _ local _ turbine _ data ` to check weather your local file is up to date . Parameters turbine _ type : string Specifies the turbine type data is fetched for . Use : py : func : ` ~ . get _ turbine _ types ` to see a table of all wind turbines for which power ( coefficient ) curve data is provided . fetch _ curve : string Parameter to specify whether a power or power coefficient curve should be retrieved from the provided turbine data . Valid options are ' power _ curve ' and ' power _ coefficient _ curve ' . Default : None . overwrite : boolean If True local file is overwritten by newly fetch data from oedb , if False turbine data is fetched from previously saved file . Returns Tuple ( pandas . DataFrame , float ) Power curve or power coefficient curve ( pandas . DataFrame ) and nominal power ( float ) of one wind turbine type . Power ( coefficient ) curve DataFrame contains power coefficient curve values ( dimensionless ) or power curve values in W with the corresponding wind speeds in m / s ."""
# hdf5 filename filename = os . path . join ( os . path . dirname ( __file__ ) , 'data' , 'turbine_data_oedb.h5' ) if os . path . isfile ( filename ) and not overwrite : logging . debug ( "Turbine data is fetched from {}" . format ( filename ) ) with pd . HDFStore ( filename ) as hdf_store : turbine_data = hdf_store . get ( 'turbine_data' ) else : turbine_data = load_turbine_data_from_oedb ( ) turbine_data . set_index ( 'turbine_type' , inplace = True ) # Set ` curve ` depending on ` fetch _ curve ` to match names in oedb curve = ( 'cp_curve' if fetch_curve == 'power_coefficient_curve' else fetch_curve ) # Select curve and nominal power of turbine type try : df = turbine_data . loc [ turbine_type ] except KeyError : raise KeyError ( "Turbine type '{}' not in database. " . format ( turbine_type ) + "Use 'get_turbine_types()' to see a table of " + "possible wind turbine types." ) if df [ curve ] is not None : df = pd . DataFrame ( df [ curve ] ) else : sys . exit ( "{} of {} not available in " . format ( curve , turbine_type ) + "oedb. Use 'get_turbine_types()' to see for which turbine " + "types power coefficient curves are available." ) nominal_power = turbine_data . loc [ turbine_type ] [ 'installed_capacity_kw' ] * 1000 df . columns = [ 'wind_speed' , 'value' ] if fetch_curve == 'power_curve' : # power in W df [ 'value' ] = df [ 'value' ] * 1000 return df , nominal_power
def find_usage ( self ) : """Determine the current usage for each limit of this service , and update corresponding Limit via : py : meth : ` ~ . AwsLimit . _ add _ current _ usage ` ."""
logger . debug ( "Checking usage for service %s" , self . service_name ) self . connect ( ) for lim in self . limits . values ( ) : lim . _reset_usage ( ) try : self . _find_usage_filesystems ( ) except ( EndpointConnectionError , ClientError , ConnectTimeout ) as ex : logger . warning ( 'Caught exception when trying to use EFS (' 'perhaps the EFS service is not available in this ' 'region?): %s' , ex ) self . _have_usage = True logger . debug ( "Done checking usage." )
def nb_fit ( data , P_init = None , R_init = None , epsilon = 1e-8 , max_iters = 100 ) : """Fits the NB distribution to data using method of moments . Args : data ( array ) : genes x cells P _ init ( array , optional ) : NB success prob param - genes x 1 R _ init ( array , optional ) : NB stopping param - genes x 1 Returns : P , R - fit to data"""
means = data . mean ( 1 ) variances = data . var ( 1 ) if ( means > variances ) . any ( ) : raise ValueError ( "For NB fit, means must be less than variances" ) genes , cells = data . shape # method of moments P = 1.0 - means / variances R = means * ( 1 - P ) / P for i in range ( genes ) : result = minimize ( nb_ll_row , [ P [ i ] , R [ i ] ] , args = ( data [ i , : ] , ) , bounds = [ ( 0 , 1 ) , ( eps , None ) ] ) params = result . x P [ i ] = params [ 0 ] R [ i ] = params [ 1 ] # R [ i ] = fsolve ( nb _ r _ deriv , R [ i ] , args = ( data [ i , : ] , ) ) # P [ i ] = data [ i , : ] . mean ( ) / ( data [ i , : ] . mean ( ) + R [ i ] ) return P , R