query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
For remote communication . Sets the communication dispatcher of the host at the address and port specified .
def load_transport ( self , url ) : aurl = urlparse ( url ) addrl = aurl . netloc . split ( ':' ) self . addr = addrl [ 0 ] , addrl [ 1 ] self . transport = aurl . scheme self . host_url = aurl if aurl . scheme == 'http' : self . launch_actor ( 'http' , rpcactor . RPCDispatcher ( url , self , 'rpc' ) ) elif aurl . scheme == 'amqp' : self . launch_actor ( 'amqp' , rpcactor . RPCDispatcher ( url , self , 'rabbit' ) )
4,500
https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/context.py#L152-L176
[ "def", "prepare_blobs", "(", "self", ")", ":", "self", ".", "raw_header", "=", "self", ".", "extract_header", "(", ")", "if", "self", ".", "cache_enabled", ":", "self", ".", "_cache_offsets", "(", ")" ]
Checks if the given id is used in the host by some actor .
def has_actor ( self , aid ) : url = '%s://%s/%s' % ( self . transport , self . host_url . netloc , aid ) return url in self . actors . keys ( )
4,501
https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/context.py#L235-L243
[ "def", "publish", "(", "build", ")", ":", "build", ".", "packages", ".", "install", "(", "\"wheel\"", ")", "build", ".", "packages", ".", "install", "(", "\"twine\"", ")", "build", ".", "executables", ".", "run", "(", "[", "\"python\"", ",", "\"setup.py\"", ",", "\"sdist\"", ",", "\"bdist_wheel\"", ",", "\"--universal\"", ",", "\"--release\"", "]", ")", "build", ".", "executables", ".", "run", "(", "[", "\"twine\"", ",", "\"upload\"", ",", "\"dist/*\"", "]", ")" ]
This method removes one actor from the Host stoping it and deleting all its references .
def stop_actor ( self , aid ) : url = '%s://%s/%s' % ( self . transport , self . host_url . netloc , aid ) if url != self . url : actor = self . actors [ url ] Proxy ( actor ) . stop ( ) actor . thread . join ( ) del self . actors [ url ] del self . threads [ actor . thread ]
4,502
https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/context.py#L304-L317
[ "def", "_required_byte_num", "(", "mode", ",", "fmt", ",", "n_samp", ")", ":", "if", "fmt", "==", "'212'", ":", "n_bytes", "=", "math", ".", "ceil", "(", "n_samp", "*", "1.5", ")", "elif", "fmt", "in", "[", "'310'", ",", "'311'", "]", ":", "n_extra", "=", "n_samp", "%", "3", "if", "n_extra", "==", "2", ":", "if", "fmt", "==", "'310'", ":", "n_bytes", "=", "upround", "(", "n_samp", "*", "4", "/", "3", ",", "4", ")", "# 311", "else", ":", "if", "mode", "==", "'read'", ":", "n_bytes", "=", "math", ".", "ceil", "(", "n_samp", "*", "4", "/", "3", ")", "# Have to write more bytes for wfdb c to work", "else", ":", "n_bytes", "=", "upround", "(", "n_samp", "*", "4", "/", "3", ",", "4", ")", "# 0 or 1", "else", ":", "n_bytes", "=", "math", ".", "ceil", "(", "n_samp", "*", "4", "/", "3", ")", "else", ":", "n_bytes", "=", "n_samp", "*", "BYTES_PER_SAMPLE", "[", "fmt", "]", "return", "int", "(", "n_bytes", ")" ]
Gets a proxy reference to the actor indicated by the URL in the parameters . It can be a local reference or a remote direction to another host .
def lookup_url ( self , url , klass , module = None ) : if not self . alive : raise HostDownError ( ) aurl = urlparse ( url ) if self . is_local ( aurl ) : if url not in self . actors . keys ( ) : raise NotFoundError ( url ) else : return Proxy ( self . actors [ url ] ) else : try : dispatcher = self . actors [ aurl . scheme ] if module is not None : try : module_ = __import__ ( module , globals ( ) , locals ( ) , [ klass ] , - 1 ) klass_ = getattr ( module_ , klass ) except Exception , e : raise HostError ( "At lookup_url: " + "Import failed for module " + module + ", class " + klass + ". Check this values for the lookup." + " ERROR: " + str ( e ) ) elif isinstance ( klass , ( types . TypeType , types . ClassType ) ) : klass_ = klass else : raise HostError ( "The class specified to look up is" + " not a class." ) remote_actor = actor . ActorRef ( url , klass_ , dispatcher . channel ) return Proxy ( remote_actor ) except HostError : raise except Exception , e : raise HostError ( "ERROR looking for the actor on another " + "server. Hosts must " + "be in http to work properly. " + str ( e ) )
4,503
https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/context.py#L319-L373
[ "def", "wait_for_start", "(", "self", ")", ":", "if", "self", ".", "wait_matchers", ":", "matcher", "=", "UnorderedMatcher", "(", "*", "self", ".", "wait_matchers", ")", "self", ".", "wait_for_logs_matching", "(", "matcher", ",", "timeout", "=", "self", ".", "wait_timeout", ")" ]
Checks the parameters generating new proxy instances to avoid query concurrences from shared proxies and creating proxies for actors from another host .
def dumps ( self , param ) : if isinstance ( param , Proxy ) : module_name = param . actor . klass . __module__ filename = sys . modules [ module_name ] . __file__ return ProxyRef ( param . actor . url , param . actor . klass . __name__ , module_name ) elif isinstance ( param , list ) : return [ self . dumps ( elem ) for elem in param ] elif isinstance ( param , dict ) : new_dict = param for key in new_dict . keys ( ) : new_dict [ key ] = self . dumps ( new_dict [ key ] ) return new_dict elif isinstance ( param , tuple ) : return tuple ( [ self . dumps ( elem ) for elem in param ] ) else : return param
4,504
https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/context.py#L419-L440
[ "def", "get_exif_data", "(", "self", ",", "image", ")", ":", "exif_data", "=", "{", "}", "info", "=", "image", ".", "_getexif", "(", ")", "if", "info", ":", "for", "tag", ",", "value", "in", "info", ".", "items", "(", ")", ":", "decoded", "=", "TAGS", ".", "get", "(", "tag", ",", "tag", ")", "if", "decoded", "==", "\"GPSInfo\"", ":", "gps_data", "=", "{", "}", "for", "t", "in", "value", ":", "sub_decoded", "=", "GPSTAGS", ".", "get", "(", "t", ",", "t", ")", "gps_data", "[", "sub_decoded", "]", "=", "value", "[", "t", "]", "exif_data", "[", "decoded", "]", "=", "gps_data", "else", ":", "exif_data", "[", "decoded", "]", "=", "value", "return", "exif_data" ]
Checks the return parameters generating new proxy instances to avoid query concurrences from shared proxies and creating proxies for actors from another host .
def loads ( self , param ) : if isinstance ( param , ProxyRef ) : try : return self . lookup_url ( param . url , param . klass , param . module ) except HostError : print "Can't lookup for the actor received with the call. \ It does not exist or the url is unreachable." , param raise HostError ( param ) elif isinstance ( param , list ) : return [ self . loads ( elem ) for elem in param ] elif isinstance ( param , tuple ) : return tuple ( [ self . loads ( elem ) for elem in param ] ) elif isinstance ( param , dict ) : new_dict = param for key in new_dict . keys ( ) : new_dict [ key ] = self . loads ( new_dict [ key ] ) return new_dict else : return param
4,505
https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/context.py#L442-L465
[ "def", "get_exif_data", "(", "self", ",", "image", ")", ":", "exif_data", "=", "{", "}", "info", "=", "image", ".", "_getexif", "(", ")", "if", "info", ":", "for", "tag", ",", "value", "in", "info", ".", "items", "(", ")", ":", "decoded", "=", "TAGS", ".", "get", "(", "tag", ",", "tag", ")", "if", "decoded", "==", "\"GPSInfo\"", ":", "gps_data", "=", "{", "}", "for", "t", "in", "value", ":", "sub_decoded", "=", "GPSTAGS", ".", "get", "(", "t", ",", "t", ")", "gps_data", "[", "sub_decoded", "]", "=", "value", "[", "t", "]", "exif_data", "[", "decoded", "]", "=", "gps_data", "else", ":", "exif_data", "[", "decoded", "]", "=", "value", "return", "exif_data" ]
Register a new thread executing a parallel method .
def new_parallel ( self , function , * params ) : # Create a pool if not created (processes or Gevent...) if self . ppool is None : if core_type == 'thread' : from multiprocessing . pool import ThreadPool self . ppool = ThreadPool ( 500 ) else : from gevent . pool import Pool self . ppool = Pool ( 500 ) # Add the new task to the pool self . ppool . apply_async ( function , * params )
4,506
https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/context.py#L467-L480
[ "def", "get_registration_email_link", "(", "application", ")", ":", "url", "=", "'%s/applications/%d/'", "%", "(", "settings", ".", "REGISTRATION_BASE_URL", ",", "application", ".", "pk", ")", "is_secret", "=", "False", "return", "url", ",", "is_secret" ]
Open a remote file and write it locally .
def write_to_local ( self , filepath_from , filepath_to , mtime_dt = None ) : self . __log . debug ( "Writing R[%s] -> L[%s]." % ( filepath_from , filepath_to ) ) with SftpFile ( self , filepath_from , 'r' ) as sf_from : with open ( filepath_to , 'wb' ) as file_to : while 1 : part = sf_from . read ( MAX_MIRROR_WRITE_CHUNK_SIZE ) file_to . write ( part ) if len ( part ) < MAX_MIRROR_WRITE_CHUNK_SIZE : break if mtime_dt is None : mtime_dt = datetime . now ( ) mtime_epoch = mktime ( mtime_dt . timetuple ( ) ) utime ( filepath_to , ( mtime_epoch , mtime_epoch ) )
4,507
https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L570-L589
[ "def", "catalogFactory", "(", "name", ",", "*", "*", "kwargs", ")", ":", "fn", "=", "lambda", "member", ":", "inspect", ".", "isclass", "(", "member", ")", "and", "member", ".", "__module__", "==", "__name__", "catalogs", "=", "odict", "(", "inspect", ".", "getmembers", "(", "sys", ".", "modules", "[", "__name__", "]", ",", "fn", ")", ")", "if", "name", "not", "in", "list", "(", "catalogs", ".", "keys", "(", ")", ")", ":", "msg", "=", "\"%s not found in catalogs:\\n %s\"", "%", "(", "name", ",", "list", "(", "kernels", ".", "keys", "(", ")", ")", ")", "logger", ".", "error", "(", "msg", ")", "msg", "=", "\"Unrecognized catalog: %s\"", "%", "name", "raise", "Exception", "(", "msg", ")", "return", "catalogs", "[", "name", "]", "(", "*", "*", "kwargs", ")" ]
Open a local file and write it remotely .
def write_to_remote ( self , filepath_from , filepath_to , mtime_dt = None ) : self . __log . debug ( "Writing L[%s] -> R[%s]." % ( filepath_from , filepath_to ) ) with open ( filepath_from , 'rb' ) as file_from : with SftpFile ( self , filepath_to , 'w' ) as sf_to : while 1 : part = file_from . read ( MAX_MIRROR_WRITE_CHUNK_SIZE ) sf_to . write ( part ) if len ( part ) < MAX_MIRROR_WRITE_CHUNK_SIZE : break if mtime_dt is None : mtime_dt = datetime . now ( ) self . utimes_dt ( filepath_to , mtime_dt , mtime_dt )
4,508
https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L591-L609
[ "def", "_unregister_bundle_factories", "(", "self", ",", "bundle", ")", ":", "# type: (Bundle) -> None", "with", "self", ".", "__factories_lock", ":", "# Find out which factories must be removed", "to_remove", "=", "[", "factory_name", "for", "factory_name", "in", "self", ".", "__factories", "if", "self", ".", "get_factory_bundle", "(", "factory_name", ")", "is", "bundle", "]", "# Remove all of them", "for", "factory_name", "in", "to_remove", ":", "try", ":", "self", ".", "unregister_factory", "(", "factory_name", ")", "except", "ValueError", "as", "ex", ":", "_logger", ".", "warning", "(", "\"Error unregistering factory '%s': %s\"", ",", "factory_name", ",", "ex", ")" ]
This is the only way to open a file resource .
def open ( self ) : self . __sf = _sftp_open ( self . __sftp_session_int , self . __filepath , self . access_type_int , self . __create_mode ) if self . access_type_is_append is True : self . seek ( self . filesize ) return SftpFileObject ( self )
4,509
https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L698-L709
[ "def", "unsubscribe", "(", "request", ",", "message_id", ",", "dispatch_id", ",", "hashed", ",", "redirect_to", "=", "None", ")", ":", "return", "_generic_view", "(", "'handle_unsubscribe_request'", ",", "sig_unsubscribe_failed", ",", "request", ",", "message_id", ",", "dispatch_id", ",", "hashed", ",", "redirect_to", "=", "redirect_to", ")" ]
Read a length of bytes . Return empty on EOF . If size is omitted return whole file .
def read ( self , size = None ) : if size is not None : return self . __sf . read ( size ) block_size = self . __class__ . __block_size b = bytearray ( ) received_bytes = 0 while 1 : partial = self . __sf . read ( block_size ) # self.__log.debug("Reading (%d) bytes. (%d) bytes returned." % # (block_size, len(partial))) b . extend ( partial ) received_bytes += len ( partial ) if len ( partial ) < block_size : self . __log . debug ( "End of file." ) break self . __log . debug ( "Read (%d) bytes for total-file." % ( received_bytes ) ) return b
4,510
https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L886-L912
[ "def", "unmount", "(", "self", ")", ":", "self", ".", "unmount_bindmounts", "(", ")", "self", ".", "unmount_mounts", "(", ")", "self", ".", "unmount_volume_groups", "(", ")", "self", ".", "unmount_loopbacks", "(", ")", "self", ".", "unmount_base_images", "(", ")", "self", ".", "clean_dirs", "(", ")" ]
Reposition the file pointer .
def seek ( self , offset , whence = SEEK_SET ) : if whence == SEEK_SET : self . __sf . seek ( offset ) elif whence == SEEK_CUR : self . __sf . seek ( self . tell ( ) + offset ) elif whence == SEEK_END : self . __sf . seek ( self . __sf . filesize - offset )
4,511
https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L919-L927
[ "def", "_get_search_prefix_map", "(", "self", ")", ":", "# TuneIn does not have a pmap. Its search keys are is search:station,", "# search:show, search:host", "# Presentation maps can also define custom categories. See eg", "# http://sonos-pmap.ws.sonos.com/hypemachine_pmap.6.xml", "# <SearchCategories>", "# ...", "# <CustomCategory mappedId=\"SBLG\" stringId=\"Blogs\"/>", "# </SearchCategories>", "# Is it already cached? If so, return it", "if", "self", ".", "_search_prefix_map", "is", "not", "None", ":", "return", "self", ".", "_search_prefix_map", "# Not cached. Fetch and parse presentation map", "self", ".", "_search_prefix_map", "=", "{", "}", "# Tunein is a special case. It has no pmap, but supports searching", "if", "self", ".", "service_name", "==", "\"TuneIn\"", ":", "self", ".", "_search_prefix_map", "=", "{", "'stations'", ":", "'search:station'", ",", "'shows'", ":", "'search:show'", ",", "'hosts'", ":", "'search:host'", ",", "}", "return", "self", ".", "_search_prefix_map", "if", "self", ".", "presentation_map_uri", "is", "None", ":", "# Assume not searchable?", "return", "self", ".", "_search_prefix_map", "log", ".", "info", "(", "'Fetching presentation map from %s'", ",", "self", ".", "presentation_map_uri", ")", "pmap", "=", "requests", ".", "get", "(", "self", ".", "presentation_map_uri", ",", "timeout", "=", "9", ")", "pmap_root", "=", "XML", ".", "fromstring", "(", "pmap", ".", "content", ")", "# Search translations can appear in Category or CustomCategory elements", "categories", "=", "pmap_root", ".", "findall", "(", "\".//SearchCategories/Category\"", ")", "if", "categories", "is", "None", ":", "return", "self", ".", "_search_prefix_map", "for", "cat", "in", "categories", ":", "self", ".", "_search_prefix_map", "[", "cat", ".", "get", "(", "'id'", ")", "]", "=", "cat", ".", "get", "(", "'mappedId'", ")", "custom_categories", "=", "pmap_root", ".", "findall", "(", "\".//SearchCategories/CustomCategory\"", ")", "for", "cat", "in", "custom_categories", ":", "self", ".", "_search_prefix_map", "[", "cat", ".", "get", "(", "'stringId'", ")", "]", "=", "cat", ".", "get", "(", "'mappedId'", ")", "return", "self", ".", "_search_prefix_map" ]
Read a single line of text with EOF .
def readline ( self , size = None ) : # TODO: Add support for Unicode. ( line , nl ) = self . __buffer . read_until_nl ( self . __retrieve_data ) if self . __sf . access_type_has_universal_nl and nl is not None : self . __newlines [ nl ] = True return line
4,512
https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L960-L969
[ "def", "remove_stale_javascripts", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"Removing stale javascripts ...\"", ")", "for", "js", "in", "JAVASCRIPTS_TO_REMOVE", ":", "logger", ".", "info", "(", "\"Unregistering JS %s\"", "%", "js", ")", "portal", ".", "portal_javascripts", ".", "unregisterResource", "(", "js", ")" ]
Read more data from the file .
def __retrieve_data ( self ) : if self . __eof is True : return b'' logging . debug ( "Reading another block." ) block = self . read ( self . __block_size ) if block == b'' : self . __log . debug ( "We've encountered the EOF." ) self . __eof = True return block
4,513
https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L971-L983
[ "def", "user_deleted_from_site_event", "(", "event", ")", ":", "userid", "=", "event", ".", "principal", "catalog", "=", "api", ".", "portal", ".", "get_tool", "(", "'portal_catalog'", ")", "query", "=", "{", "'object_provides'", ":", "WORKSPACE_INTERFACE", "}", "query", "[", "'workspace_members'", "]", "=", "userid", "workspaces", "=", "[", "IWorkspace", "(", "b", ".", "_unrestrictedGetObject", "(", ")", ")", "for", "b", "in", "catalog", ".", "unrestrictedSearchResults", "(", "query", ")", "]", "for", "workspace", "in", "workspaces", ":", "workspace", ".", "remove_from_team", "(", "userid", ")" ]
Adds a mask from a shapefile
def set_mask_from_shapefile ( self , shapefile_path , cell_size ) : # make sure paths are absolute as the working directory changes shapefile_path = os . path . abspath ( shapefile_path ) # ADD MASK with tmp_chdir ( self . project_directory ) : mask_name = '{0}.msk' . format ( self . project_manager . name ) msk_file = WatershedMaskFile ( project_file = self . project_manager , session = self . db_session ) msk_file . generateFromWatershedShapefile ( shapefile_path , cell_size = cell_size , out_raster_path = mask_name , load_raster_to_db = self . load_rasters_to_db )
4,514
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/model.py#L196-L211
[ "def", "register", "(", "self", ",", "slug", ",", "bundle", ",", "order", "=", "1", ",", "title", "=", "None", ")", ":", "if", "slug", "in", "self", ".", "_registry", ":", "raise", "AlreadyRegistered", "(", "'The url %s is already registered'", "%", "slug", ")", "# Instantiate the admin class to save in the registry.", "self", ".", "_registry", "[", "slug", "]", "=", "bundle", "self", ".", "_order", "[", "slug", "]", "=", "order", "if", "title", ":", "self", ".", "_titles", "[", "slug", "]", "=", "title", "bundle", ".", "set_admin_site", "(", "self", ")" ]
Adds elevation file to project
def set_elevation ( self , elevation_grid_path , mask_shapefile ) : # ADD ELEVATION FILE ele_file = ElevationGridFile ( project_file = self . project_manager , session = self . db_session ) ele_file . generateFromRaster ( elevation_grid_path , mask_shapefile , load_raster_to_db = self . load_rasters_to_db )
4,515
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/model.py#L213-L222
[ "def", "getShareInfo", "(", "item", ")", ":", "key", "=", "f'_syn_sharinfo_{item.__class__.__module__}_{item.__class__.__qualname__}'", "info", "=", "getattr", "(", "item", ",", "key", ",", "None", ")", "if", "info", "is", "not", "None", ":", "return", "info", "meths", "=", "{", "}", "info", "=", "{", "'meths'", ":", "meths", "}", "for", "name", "in", "dir", "(", "item", ")", ":", "if", "name", ".", "startswith", "(", "'_'", ")", ":", "continue", "attr", "=", "getattr", "(", "item", ",", "name", ",", "None", ")", "if", "not", "callable", "(", "attr", ")", ":", "continue", "# We know we can cleanly unwrap these functions", "# for asyncgenerator inspection.", "wrapped", "=", "getattr", "(", "attr", ",", "'__syn_wrapped__'", ",", "None", ")", "if", "wrapped", "in", "unwraps", ":", "real", "=", "inspect", ".", "unwrap", "(", "attr", ")", "if", "inspect", ".", "isasyncgenfunction", "(", "real", ")", ":", "meths", "[", "name", "]", "=", "{", "'genr'", ":", "True", "}", "continue", "if", "inspect", ".", "isasyncgenfunction", "(", "attr", ")", ":", "meths", "[", "name", "]", "=", "{", "'genr'", ":", "True", "}", "try", ":", "setattr", "(", "item", ",", "key", ",", "info", ")", "except", "Exception", "as", "e", ":", "# pragma: no cover", "logger", ".", "exception", "(", "f'Failed to set magic on {item}'", ")", "try", ":", "setattr", "(", "item", ".", "__class__", ",", "key", ",", "info", ")", "except", "Exception", "as", "e", ":", "# pragma: no cover", "logger", ".", "exception", "(", "f'Failed to set magic on {item.__class__}'", ")", "return", "info" ]
Adds outlet point to project
def set_outlet ( self , latitude , longitude , outslope ) : self . project_manager . setOutlet ( latitude = latitude , longitude = longitude , outslope = outslope )
4,516
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/model.py#L224-L229
[ "def", "load_paired_notebook", "(", "notebook", ",", "fmt", ",", "nb_file", ",", "log", ")", ":", "formats", "=", "notebook", ".", "metadata", ".", "get", "(", "'jupytext'", ",", "{", "}", ")", ".", "get", "(", "'formats'", ")", "if", "not", "formats", ":", "raise", "ValueError", "(", "\"'{}' is not a paired notebook\"", ".", "format", "(", "nb_file", ")", ")", "max_mtime_inputs", "=", "None", "max_mtime_outputs", "=", "None", "latest_inputs", "=", "None", "latest_outputs", "=", "None", "for", "alt_path", ",", "alt_fmt", "in", "paired_paths", "(", "nb_file", ",", "fmt", ",", "formats", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "alt_path", ")", ":", "continue", "info", "=", "os", ".", "lstat", "(", "alt_path", ")", "if", "not", "max_mtime_inputs", "or", "info", ".", "st_mtime", ">", "max_mtime_inputs", ":", "max_mtime_inputs", "=", "info", ".", "st_mtime", "latest_inputs", ",", "input_fmt", "=", "alt_path", ",", "alt_fmt", "if", "alt_path", ".", "endswith", "(", "'.ipynb'", ")", ":", "if", "not", "max_mtime_outputs", "or", "info", ".", "st_mtime", ">", "max_mtime_outputs", ":", "max_mtime_outputs", "=", "info", ".", "st_mtime", "latest_outputs", "=", "alt_path", "if", "latest_outputs", "and", "latest_outputs", "!=", "latest_inputs", ":", "log", "(", "\"[jupytext] Loading input cells from '{}'\"", ".", "format", "(", "latest_inputs", ")", ")", "inputs", "=", "notebook", "if", "latest_inputs", "==", "nb_file", "else", "readf", "(", "latest_inputs", ",", "input_fmt", ")", "check_file_version", "(", "inputs", ",", "latest_inputs", ",", "latest_outputs", ")", "log", "(", "\"[jupytext] Loading output cells from '{}'\"", ".", "format", "(", "latest_outputs", ")", ")", "outputs", "=", "notebook", "if", "latest_outputs", "==", "nb_file", "else", "readf", "(", "latest_outputs", ")", "combine_inputs_with_outputs", "(", "inputs", ",", "outputs", ",", "input_fmt", ")", "return", "inputs", ",", "latest_inputs", ",", "latest_outputs", "log", "(", "\"[jupytext] Loading notebook from '{}'\"", ".", "format", "(", "latest_inputs", ")", ")", "if", "latest_inputs", "!=", "nb_file", ":", "notebook", "=", "readf", "(", "latest_inputs", ",", "input_fmt", ")", "return", "notebook", ",", "latest_inputs", ",", "latest_outputs" ]
Initializes event for GSSHA model
def set_event ( self , simulation_start = None , simulation_duration = None , simulation_end = None , rain_intensity = 2 , rain_duration = timedelta ( seconds = 30 * 60 ) , event_type = 'EVENT' , ) : # ADD TEMPORTAL EVENT INFORMAITON if event_type == 'LONG_TERM' : self . event = LongTermMode ( self . project_manager , self . db_session , self . project_directory , simulation_start = simulation_start , simulation_end = simulation_end , simulation_duration = simulation_duration , ) else : # 'EVENT' self . event = EventMode ( self . project_manager , self . db_session , self . project_directory , simulation_start = simulation_start , simulation_duration = simulation_duration , ) self . event . add_uniform_precip_event ( intensity = rain_intensity , duration = rain_duration )
4,517
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/model.py#L259-L287
[ "def", "_to_ctfile_property_block", "(", "self", ")", ":", "ctab_properties_data", "=", "defaultdict", "(", "list", ")", "for", "atom", "in", "self", ".", "atoms", ":", "for", "ctab_property_key", ",", "ctab_property_value", "in", "atom", ".", "_ctab_property_data", ".", "items", "(", ")", ":", "ctab_properties_data", "[", "ctab_property_key", "]", ".", "append", "(", "OrderedDict", "(", "zip", "(", "self", ".", "ctab_conf", "[", "self", ".", "version", "]", "[", "ctab_property_key", "]", "[", "'values'", "]", ",", "[", "atom", ".", "atom_number", ",", "ctab_property_value", "]", ")", ")", ")", "ctab_property_lines", "=", "[", "]", "for", "ctab_property_key", ",", "ctab_property_value", "in", "ctab_properties_data", ".", "items", "(", ")", ":", "for", "entry", "in", "ctab_property_value", ":", "ctab_property_line", "=", "'{} {}{}'", ".", "format", "(", "self", ".", "ctab_conf", "[", "self", ".", "version", "]", "[", "ctab_property_key", "]", "[", "'fmt'", "]", ",", "1", ",", "''", ".", "join", "(", "[", "str", "(", "value", ")", ".", "rjust", "(", "4", ")", "for", "value", "in", "entry", ".", "values", "(", ")", "]", ")", ")", "ctab_property_lines", ".", "append", "(", "ctab_property_line", ")", "if", "ctab_property_lines", ":", "return", "'{}\\n'", ".", "format", "(", "'\\n'", ".", "join", "(", "ctab_property_lines", ")", ")", "return", "''" ]
Write project to directory
def write ( self ) : # write data self . project_manager . writeInput ( session = self . db_session , directory = self . project_directory , name = self . project_manager . name )
4,518
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/model.py#L289-L296
[ "def", "rate_limit", "(", "f", ")", ":", "def", "new_f", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "errors", "=", "0", "while", "True", ":", "resp", "=", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "resp", ".", "status_code", "==", "200", ":", "errors", "=", "0", "return", "resp", "elif", "resp", ".", "status_code", "==", "401", ":", "# Hack to retain the original exception, but augment it with", "# additional context for the user to interpret it. In a Python", "# 3 only future we can raise a new exception of the same type", "# with a new message from the old error.", "try", ":", "resp", ".", "raise_for_status", "(", ")", "except", "requests", ".", "HTTPError", "as", "e", ":", "message", "=", "\"\\nThis is a protected or locked account, or\"", "+", "\" the credentials provided are no longer valid.\"", "e", ".", "args", "=", "(", "e", ".", "args", "[", "0", "]", "+", "message", ",", ")", "+", "e", ".", "args", "[", "1", ":", "]", "log", ".", "warning", "(", "\"401 Authentication required for %s\"", ",", "resp", ".", "url", ")", "raise", "elif", "resp", ".", "status_code", "==", "429", ":", "reset", "=", "int", "(", "resp", ".", "headers", "[", "'x-rate-limit-reset'", "]", ")", "now", "=", "time", ".", "time", "(", ")", "seconds", "=", "reset", "-", "now", "+", "10", "if", "seconds", "<", "1", ":", "seconds", "=", "10", "log", ".", "warning", "(", "\"rate limit exceeded: sleeping %s secs\"", ",", "seconds", ")", "time", ".", "sleep", "(", "seconds", ")", "elif", "resp", ".", "status_code", ">=", "500", ":", "errors", "+=", "1", "if", "errors", ">", "30", ":", "log", ".", "warning", "(", "\"too many errors from Twitter, giving up\"", ")", "resp", ".", "raise_for_status", "(", ")", "seconds", "=", "60", "*", "errors", "log", ".", "warning", "(", "\"%s from Twitter API, sleeping %s\"", ",", "resp", ".", "status_code", ",", "seconds", ")", "time", ".", "sleep", "(", "seconds", ")", "else", ":", "resp", ".", "raise_for_status", "(", ")", "return", "new_f" ]
Recursively mirror the contents of path_from into path_to . handler should be self . mirror_to_local_no_recursion or self . mirror_to_remote_no_recursion to represent which way the files are moving .
def mirror ( self , handler , path_from , path_to , log_files = False ) : q = deque ( [ '' ] ) while q : path = q . popleft ( ) full_from = ( '%s/%s' % ( path_from , path ) ) if path else path_from full_to = ( '%s/%s' % ( path_to , path ) ) if path else path_to subdirs = handler ( full_from , full_to , log_files ) for subdir in subdirs : q . append ( ( '%s/%s' % ( path , subdir ) ) if path else subdir )
4,519
https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/sftp_mirror.py#L26-L42
[ "def", "write_response", "(", "self", ",", "status_code", ":", "Union", "[", "int", ",", "constants", ".", "HttpStatusCode", "]", ",", "*", ",", "headers", ":", "Optional", "[", "_HeaderType", "]", "=", "None", ")", "->", "\"writers.HttpResponseWriter\"", ":", "self", ".", "_writer", "=", "self", ".", "__delegate", ".", "write_response", "(", "constants", ".", "HttpStatusCode", "(", "status_code", ")", ",", "headers", "=", "headers", ")", "return", "self", ".", "_writer" ]
Create database relationships between the link node dataset and the channel input file .
def linkToChannelInputFile ( self , session , channelInputFile , force = False ) : # Only perform operation if the channel input file has not been assigned or the force parameter is true if self . channelInputFile is not None and not force : return # Set the channel input file relationship self . channelInputFile = channelInputFile # Retrieve the fluvial stream links orderedLinks = channelInputFile . getOrderedLinks ( session ) # Retrieve the LinkNodeTimeStep objects timeSteps = self . timeSteps # Link each link dataset in each time step for timeStep in timeSteps : # Retrieve link datasets linkDatasets = timeStep . linkDatasets # Link each node dataset for l , linkDataset in enumerate ( linkDatasets ) : # Get the fluvial link and nodes streamLink = orderedLinks [ l ] streamNodes = streamLink . nodes # Link link datasets to fluvial links linkDataset . link = streamLink # Retrieve node datasets nodeDatasets = linkDataset . nodeDatasets # Link the node dataset with the channel input file nodes if len ( nodeDatasets ) > 0 and len ( streamNodes ) > 0 : for n , nodeDataset in enumerate ( nodeDatasets ) : nodeDataset . node = streamNodes [ n ] session . add ( self ) session . commit ( )
4,520
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/lnd.py#L76-L131
[ "def", "searchEnterpriseGroups", "(", "self", ",", "searchFilter", "=", "\"\"", ",", "maxCount", "=", "100", ")", ":", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"filter\"", ":", "searchFilter", ",", "\"maxCount\"", ":", "maxCount", "}", "url", "=", "self", ".", "_url", "+", "\"/groups/searchEnterpriseGroups\"", "return", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")" ]
Link Node Dataset File Read from File Method
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) : # Set file extension property self . fileExtension = extension # Dictionary of keywords/cards and parse function names KEYWORDS = ( 'NUM_LINKS' , 'TIME_STEP' , 'NUM_TS' , 'START_TIME' , 'TS' ) # Parse file into chunks associated with keywords/cards with open ( path , 'r' ) as f : self . name = f . readline ( ) . strip ( ) chunks = pt . chunk ( KEYWORDS , f ) # Parse chunks associated with each key for card , chunkList in iteritems ( chunks ) : # Parse each chunk in the chunk list for chunk in chunkList : schunk = chunk [ 0 ] . strip ( ) . split ( ) # Cases if card == 'NUM_LINKS' : # NUM_LINKS handler self . numLinks = schunk [ 1 ] elif card == 'TIME_STEP' : # TIME_STEP handler self . timeStepInterval = schunk [ 1 ] elif card == 'NUM_TS' : # NUM_TS handler self . numTimeSteps = schunk [ 1 ] elif card == 'START_TIME' : # START_TIME handler self . startTime = '%s %s %s %s %s %s' % ( schunk [ 1 ] , schunk [ 2 ] , schunk [ 3 ] , schunk [ 4 ] , schunk [ 5 ] , schunk [ 6 ] ) elif card == 'TS' : # TS handler for line in chunk : sline = line . strip ( ) . split ( ) token = sline [ 0 ] # Cases if token == 'TS' : # Time Step line handler timeStep = LinkNodeTimeStep ( timeStep = sline [ 1 ] ) timeStep . linkNodeDataset = self else : # Split the line spLinkLine = line . strip ( ) . split ( ) # Create LinkDataset GSSHAPY object linkDataset = LinkDataset ( ) linkDataset . numNodeDatasets = int ( spLinkLine [ 0 ] ) linkDataset . timeStep = timeStep linkDataset . linkNodeDatasetFile = self # Parse line into NodeDatasets NODE_VALUE_INCREMENT = 2 statusIndex = 1 valueIndex = statusIndex + 1 # Parse line into node datasets if linkDataset . numNodeDatasets > 0 : for i in range ( 0 , linkDataset . numNodeDatasets ) : # Create NodeDataset GSSHAPY object nodeDataset = NodeDataset ( ) nodeDataset . status = int ( spLinkLine [ statusIndex ] ) nodeDataset . value = float ( spLinkLine [ valueIndex ] ) nodeDataset . linkDataset = linkDataset nodeDataset . linkNodeDatasetFile = self # Increment to next status/value pair statusIndex += NODE_VALUE_INCREMENT valueIndex += NODE_VALUE_INCREMENT else : nodeDataset = NodeDataset ( ) nodeDataset . value = float ( spLinkLine [ 1 ] ) nodeDataset . linkDataset = linkDataset nodeDataset . linkNodeDatasetFile = self
4,521
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/lnd.py#L356-L448
[ "async", "def", "batch_call_api", "(", "container", ",", "apis", ",", "timeout", "=", "120.0", ")", ":", "apiHandles", "=", "[", "(", "object", "(", ")", ",", "api", ")", "for", "api", "in", "apis", "]", "apiEvents", "=", "[", "ModuleAPICall", "(", "handle", ",", "targetname", ",", "name", ",", "params", "=", "params", ")", "for", "handle", ",", "(", "targetname", ",", "name", ",", "params", ")", "in", "apiHandles", "]", "apiMatchers", "=", "tuple", "(", "ModuleAPIReply", ".", "createMatcher", "(", "handle", ")", "for", "handle", ",", "_", "in", "apiHandles", ")", "async", "def", "process", "(", ")", ":", "for", "e", "in", "apiEvents", ":", "await", "container", ".", "wait_for_send", "(", "e", ")", "container", ".", "subroutine", "(", "process", "(", ")", ",", "False", ")", "eventdict", "=", "{", "}", "async", "def", "process2", "(", ")", ":", "ms", "=", "len", "(", "apiMatchers", ")", "matchers", "=", "Diff_", "(", "apiMatchers", ")", "while", "ms", ":", "ev", ",", "m", "=", "await", "matchers", "matchers", "=", "Diff_", "(", "matchers", ",", "remove", "=", "(", "m", ",", ")", ")", "eventdict", "[", "ev", ".", "handle", "]", "=", "ev", "await", "container", ".", "execute_with_timeout", "(", "timeout", ",", "process2", "(", ")", ")", "for", "e", "in", "apiEvents", ":", "if", "e", ".", "handle", "not", "in", "eventdict", ":", "e", ".", "canignore", "=", "True", "container", ".", "scheduler", ".", "ignore", "(", "ModuleAPICall", ".", "createMatcher", "(", "e", ".", "handle", ")", ")", "return", "[", "eventdict", ".", "get", "(", "handle", ",", "None", ")", "for", "handle", ",", "_", "in", "apiHandles", "]" ]
Link Node Dataset File Write to File Method
def _write ( self , session , openFile , replaceParamFile ) : # Retrieve TimeStep objects timeSteps = self . timeSteps # Write Lines openFile . write ( '%s\n' % self . name ) openFile . write ( 'NUM_LINKS %s\n' % self . numLinks ) openFile . write ( 'TIME_STEP %s\n' % self . timeStepInterval ) openFile . write ( 'NUM_TS %s\n' % self . numTimeSteps ) openFile . write ( 'START_TIME %s\n' % self . startTime ) for timeStep in timeSteps : openFile . write ( 'TS %s\n' % timeStep . timeStep ) # Retrieve LinkDataset objects linkDatasets = timeStep . linkDatasets for linkDataset in linkDatasets : # Write number of node datasets values openFile . write ( '{0} ' . format ( linkDataset . numNodeDatasets ) ) # Retrieve NodeDatasets nodeDatasets = linkDataset . nodeDatasets if linkDataset . numNodeDatasets > 0 : for nodeDataset in nodeDatasets : # Write status and value openFile . write ( '{0} {1:.5f} ' . format ( nodeDataset . status , nodeDataset . value ) ) else : for nodeDataset in nodeDatasets : # Write status and value if linkDataset . numNodeDatasets < 0 : openFile . write ( '{0:.5f}' . format ( nodeDataset . value ) ) else : openFile . write ( '{0:.3f}' . format ( nodeDataset . value ) ) # Write new line character after each link dataset openFile . write ( '\n' ) # Insert empty line between time steps openFile . write ( '\n' )
4,522
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/lnd.py#L452-L496
[ "def", "remove_stale_javascripts", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"Removing stale javascripts ...\"", ")", "for", "js", "in", "JAVASCRIPTS_TO_REMOVE", ":", "logger", ".", "info", "(", "\"Unregistering JS %s\"", "%", "js", ")", "portal", ".", "portal_javascripts", ".", "unregisterResource", "(", "js", ")" ]
Log into container .
def login ( container ) : columns , lines = shutil . get_terminal_size ( ) # Temporary try : subprocess . check_call ( [ "docker" , "exec" , "--env" , f"COLUMNS={str(columns)},LINES={str(lines)}" , # Temporary "--env" , f"LINES={str(lines)}" , # Temporary "--interactive" , "--tty" , container , "bash" , "--login" ] ) except subprocess . CalledProcessError : raise RuntimeError ( ) from None
4,523
https://github.com/cs50/cli50/blob/f712328200dd40c3e19e73893644cb61125ea66e/cli50/__main__.py#L195-L210
[ "def", "disable_avatar", "(", "self", ")", ":", "with", "(", "yield", "from", "self", ".", "_publish_lock", ")", ":", "todo", "=", "[", "]", "if", "self", ".", "_synchronize_vcard", ":", "todo", ".", "append", "(", "self", ".", "_disable_vcard_avatar", "(", ")", ")", "if", "(", "yield", "from", "self", ".", "_pep", ".", "available", "(", ")", ")", ":", "todo", ".", "append", "(", "self", ".", "_pep", ".", "publish", "(", "namespaces", ".", "xep0084_metadata", ",", "avatar_xso", ".", "Metadata", "(", ")", ")", ")", "yield", "from", "gather_reraise_multi", "(", "*", "todo", ",", "message", "=", "\"disable_avatar\"", ")" ]
Update GSSHA simulation start time
def _update_simulation_start ( self , simulation_start ) : self . simulation_start = simulation_start if self . simulation_duration is not None and self . simulation_start is not None : self . simulation_end = self . simulation_start + self . simulation_duration self . _update_simulation_start_cards ( )
4,524
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/event.py#L148-L155
[ "def", "delete", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# XXX: circular import", "from", "fields", "import", "RatingField", "qs", "=", "self", ".", "distinct", "(", ")", ".", "values_list", "(", "'content_type'", ",", "'object_id'", ")", ".", "order_by", "(", "'content_type'", ")", "to_update", "=", "[", "]", "for", "content_type", ",", "objects", "in", "itertools", ".", "groupby", "(", "qs", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", ":", "model_class", "=", "ContentType", ".", "objects", ".", "get", "(", "pk", "=", "content_type", ")", ".", "model_class", "(", ")", "if", "model_class", ":", "to_update", ".", "extend", "(", "list", "(", "model_class", ".", "objects", ".", "filter", "(", "pk__in", "=", "list", "(", "objects", ")", "[", "0", "]", ")", ")", ")", "retval", "=", "super", "(", "VoteQuerySet", ",", "self", ")", ".", "delete", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# TODO: this could be improved", "for", "obj", "in", "to_update", ":", "for", "field", "in", "getattr", "(", "obj", ",", "'_djangoratings'", ",", "[", "]", ")", ":", "getattr", "(", "obj", ",", "field", ".", "name", ")", ".", "_update", "(", "commit", "=", "False", ")", "obj", ".", "save", "(", ")", "return", "retval" ]
Update GSSHA cards for simulation start
def _update_simulation_start_cards ( self ) : if self . simulation_start is not None : self . _update_card ( "START_DATE" , self . simulation_start . strftime ( "%Y %m %d" ) ) self . _update_card ( "START_TIME" , self . simulation_start . strftime ( "%H %M" ) )
4,525
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/event.py#L157-L163
[ "def", "copy_and_move_messages", "(", "from_channel", ",", "to_channel", ")", ":", "with", "BlockSave", "(", "Message", ",", "query_dict", "=", "{", "'channel_id'", ":", "to_channel", ".", "key", "}", ")", ":", "for", "message", "in", "Message", ".", "objects", ".", "filter", "(", "channel", "=", "from_channel", ",", "typ", "=", "15", ")", ":", "message", ".", "key", "=", "''", "message", ".", "channel", "=", "to_channel", "message", ".", "save", "(", ")" ]
Update simulation end time from LSM
def _update_simulation_end_from_lsm ( self ) : te = self . l2g . xd . lsm . datetime [ - 1 ] simulation_end = te . replace ( tzinfo = utc ) . astimezone ( tz = self . tz ) . replace ( tzinfo = None ) if self . simulation_end is None : self . simulation_end = simulation_end elif self . simulation_end > simulation_end : self . simulation_end = simulation_end self . _update_card ( "END_TIME" , self . simulation_end . strftime ( "%Y %m %d %H %M" ) )
4,526
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/event.py#L165-L180
[ "def", "is_restricted", "(", "self", ",", "assets", ",", "dt", ")", ":", "if", "isinstance", "(", "assets", ",", "Asset", ")", ":", "return", "self", ".", "_is_restricted_for_asset", "(", "assets", ",", "dt", ")", "is_restricted", "=", "partial", "(", "self", ".", "_is_restricted_for_asset", ",", "dt", "=", "dt", ")", "return", "pd", ".", "Series", "(", "index", "=", "pd", ".", "Index", "(", "assets", ")", ",", "data", "=", "vectorize", "(", "is_restricted", ",", "otypes", "=", "[", "bool", "]", ")", "(", "assets", ")", ")" ]
Adds a precip file to project with interpolation_type
def add_precip_file ( self , precip_file_path , interpolation_type = None ) : # precip file read in self . _update_card ( 'PRECIP_FILE' , precip_file_path , True ) if interpolation_type is None : # check if precip type exists already in card if not self . project_manager . getCard ( 'RAIN_INV_DISTANCE' ) and not self . project_manager . getCard ( 'RAIN_THIESSEN' ) : # if no type exists, then make it theissen self . _update_card ( 'RAIN_THIESSEN' , '' ) else : if interpolation_type . upper ( ) not in self . PRECIP_INTERP_TYPES : raise IndexError ( "Invalid interpolation_type {0}" . format ( interpolation_type ) ) interpolation_type = interpolation_type . upper ( ) if interpolation_type == "INV_DISTANCE" : self . _update_card ( 'RAIN_INV_DISTANCE' , '' ) self . project_manager . deleteCard ( 'RAIN_THIESSEN' , self . db_session ) else : self . _update_card ( 'RAIN_THIESSEN' , '' ) self . project_manager . deleteCard ( 'RAIN_INV_DISTANCE' , self . db_session )
4,527
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/event.py#L197-L220
[ "def", "devices", "(", "self", ",", "timeout", "=", "None", ")", ":", "# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw", "# from Android system/core/adb/transport.c statename()", "re_device_info", "=", "re", ".", "compile", "(", "r'([^\\s]+)\\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)'", ")", "devices", "=", "[", "]", "lines", "=", "self", ".", "command_output", "(", "[", "\"devices\"", ",", "\"-l\"", "]", ",", "timeout", "=", "timeout", ")", ".", "split", "(", "'\\n'", ")", "for", "line", "in", "lines", ":", "if", "line", "==", "'List of devices attached '", ":", "continue", "match", "=", "re_device_info", ".", "match", "(", "line", ")", "if", "match", ":", "device", "=", "{", "'device_serial'", ":", "match", ".", "group", "(", "1", ")", ",", "'state'", ":", "match", ".", "group", "(", "2", ")", "}", "remainder", "=", "line", "[", "match", ".", "end", "(", "2", ")", ":", "]", ".", "strip", "(", ")", "if", "remainder", ":", "try", ":", "device", ".", "update", "(", "dict", "(", "[", "j", ".", "split", "(", "':'", ")", "for", "j", "in", "remainder", ".", "split", "(", "' '", ")", "]", ")", ")", "except", "ValueError", ":", "self", ".", "_logger", ".", "warning", "(", "'devices: Unable to parse '", "'remainder for device %s'", "%", "line", ")", "devices", ".", "append", "(", "device", ")", "return", "devices" ]
Prepares Gage output for GSSHA simulation
def prepare_gag_lsm ( self , lsm_precip_data_var , lsm_precip_type , interpolation_type = None ) : if self . l2g is None : raise ValueError ( "LSM converter not loaded ..." ) # remove uniform precip cards for unif_precip_card in self . UNIFORM_PRECIP_CARDS : self . project_manager . deleteCard ( unif_precip_card , self . db_session ) with tmp_chdir ( self . project_manager . project_directory ) : # PRECIPITATION CARD out_gage_file = '{0}.gag' . format ( self . project_manager . name ) self . l2g . lsm_precip_to_gssha_precip_gage ( out_gage_file , lsm_data_var = lsm_precip_data_var , precip_type = lsm_precip_type ) # SIMULATION TIME CARDS self . _update_simulation_end_from_lsm ( ) self . set_simulation_duration ( self . simulation_end - self . simulation_start ) # precip file read in self . add_precip_file ( out_gage_file , interpolation_type ) # make sure xarray dataset closed self . l2g . xd . close ( )
4,528
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/event.py#L222-L253
[ "def", "delete_annotation", "(", "self", ",", "term_ilx_id", ":", "str", ",", "annotation_type_ilx_id", ":", "str", ",", "annotation_value", ":", "str", ")", "->", "dict", ":", "term_data", "=", "self", ".", "get_entity", "(", "term_ilx_id", ")", "if", "not", "term_data", "[", "'id'", "]", ":", "exit", "(", "'term_ilx_id: '", "+", "term_ilx_id", "+", "' does not exist'", ")", "anno_data", "=", "self", ".", "get_entity", "(", "annotation_type_ilx_id", ")", "if", "not", "anno_data", "[", "'id'", "]", ":", "exit", "(", "'annotation_type_ilx_id: '", "+", "annotation_type_ilx_id", "+", "' does not exist'", ")", "entity_annotations", "=", "self", ".", "get_annotation_via_tid", "(", "term_data", "[", "'id'", "]", ")", "annotation_id", "=", "''", "for", "annotation", "in", "entity_annotations", ":", "if", "str", "(", "annotation", "[", "'tid'", "]", ")", "==", "str", "(", "term_data", "[", "'id'", "]", ")", ":", "if", "str", "(", "annotation", "[", "'annotation_tid'", "]", ")", "==", "str", "(", "anno_data", "[", "'id'", "]", ")", ":", "if", "str", "(", "annotation", "[", "'value'", "]", ")", "==", "str", "(", "annotation_value", ")", ":", "annotation_id", "=", "annotation", "[", "'id'", "]", "break", "if", "not", "annotation_id", ":", "print", "(", "'''WARNING: Annotation you wanted to delete does not exist '''", ")", "return", "None", "url", "=", "self", ".", "base_url", "+", "'term/edit-annotation/{annotation_id}'", ".", "format", "(", "annotation_id", "=", "annotation_id", ")", "data", "=", "{", "'tid'", ":", "' '", ",", "# for delete", "'annotation_tid'", ":", "' '", ",", "# for delete", "'value'", ":", "' '", ",", "# for delete", "'term_version'", ":", "' '", ",", "'annotation_term_version'", ":", "' '", ",", "}", "output", "=", "self", ".", "post", "(", "url", "=", "url", ",", "data", "=", "data", ",", ")", "# check output", "return", "output" ]
Prepares RAPID streamflow for GSSHA simulation
def prepare_rapid_streamflow ( self , path_to_rapid_qout , connection_list_file ) : ihg_filename = '{0}.ihg' . format ( self . project_manager . name ) with tmp_chdir ( self . project_manager . project_directory ) : # write out IHG file time_index_range = [ ] with RAPIDDataset ( path_to_rapid_qout , out_tzinfo = self . tz ) as qout_nc : time_index_range = qout_nc . get_time_index_range ( date_search_start = self . simulation_start , date_search_end = self . simulation_end ) if len ( time_index_range ) > 0 : time_array = qout_nc . get_time_array ( return_datetime = True , time_index_array = time_index_range ) # GSSHA STARTS INGESTING STREAMFLOW AT SECOND TIME STEP if self . simulation_start is not None : if self . simulation_start == time_array [ 0 ] : log . warning ( "First timestep of streamflow skipped " "in order for GSSHA to capture the streamflow." ) time_index_range = time_index_range [ 1 : ] time_array = time_array [ 1 : ] if len ( time_index_range ) > 0 : start_datetime = time_array [ 0 ] if self . simulation_start is None : self . _update_simulation_start ( start_datetime ) if self . simulation_end is None : self . simulation_end = time_array [ - 1 ] qout_nc . write_flows_to_gssha_time_series_ihg ( ihg_filename , connection_list_file , date_search_start = start_datetime , date_search_end = self . simulation_end , ) else : log . warning ( "No streamflow values found in time range ..." ) if len ( time_index_range ) > 0 : # update cards self . _update_simulation_start_cards ( ) self . _update_card ( "END_TIME" , self . simulation_end . strftime ( "%Y %m %d %H %M" ) ) self . _update_card ( "CHAN_POINT_INPUT" , ihg_filename , True ) # update duration self . set_simulation_duration ( self . simulation_end - self . simulation_start ) # UPDATE GMT CARD self . _update_gmt ( ) else : # cleanup os . remove ( ihg_filename ) self . project_manager . deleteCard ( 'CHAN_POINT_INPUT' , self . db_session )
4,529
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/event.py#L255-L312
[ "def", "create_or_update_group_alias", "(", "self", ",", "name", ",", "alias_id", "=", "None", ",", "mount_accessor", "=", "None", ",", "canonical_id", "=", "None", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'name'", ":", "name", ",", "'mount_accessor'", ":", "mount_accessor", ",", "'canonical_id'", ":", "canonical_id", ",", "}", "if", "alias_id", "is", "not", "None", ":", "params", "[", "'id'", "]", "=", "alias_id", "api_path", "=", "'/v1/{mount_point}/group-alias'", ".", "format", "(", "mount_point", "=", "mount_point", ")", "response", "=", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")", "return", "response", ".", "json", "(", ")" ]
Add a uniform precip event
def add_uniform_precip_event ( self , intensity , duration ) : self . project_manager . setCard ( 'PRECIP_UNIF' , '' ) self . project_manager . setCard ( 'RAIN_INTENSITY' , str ( intensity ) ) self . project_manager . setCard ( 'RAIN_DURATION' , str ( duration . total_seconds ( ) / 60.0 ) )
4,530
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/event.py#L383-L389
[ "def", "bitorder_decode", "(", "data", ",", "out", "=", "None", ",", "_bitorder", "=", "[", "]", ")", ":", "if", "not", "_bitorder", ":", "_bitorder", ".", "append", "(", "b'\\x00\\x80@\\xc0 \\xa0`\\xe0\\x10\\x90P\\xd00\\xb0p\\xf0\\x08\\x88H\\xc8('", "b'\\xa8h\\xe8\\x18\\x98X\\xd88\\xb8x\\xf8\\x04\\x84D\\xc4$\\xa4d\\xe4\\x14'", "b'\\x94T\\xd44\\xb4t\\xf4\\x0c\\x8cL\\xcc,\\xacl\\xec\\x1c\\x9c\\\\\\xdc<\\xbc|'", "b'\\xfc\\x02\\x82B\\xc2\"\\xa2b\\xe2\\x12\\x92R\\xd22\\xb2r\\xf2\\n\\x8aJ\\xca*'", "b'\\xaaj\\xea\\x1a\\x9aZ\\xda:\\xbaz\\xfa\\x06\\x86F\\xc6&\\xa6f\\xe6\\x16'", "b'\\x96V\\xd66\\xb6v\\xf6\\x0e\\x8eN\\xce.\\xaen\\xee\\x1e\\x9e^\\xde>\\xbe~'", "b'\\xfe\\x01\\x81A\\xc1!\\xa1a\\xe1\\x11\\x91Q\\xd11\\xb1q\\xf1\\t\\x89I\\xc9)'", "b'\\xa9i\\xe9\\x19\\x99Y\\xd99\\xb9y\\xf9\\x05\\x85E\\xc5%\\xa5e\\xe5\\x15'", "b'\\x95U\\xd55\\xb5u\\xf5\\r\\x8dM\\xcd-\\xadm\\xed\\x1d\\x9d]\\xdd=\\xbd}'", "b'\\xfd\\x03\\x83C\\xc3#\\xa3c\\xe3\\x13\\x93S\\xd33\\xb3s\\xf3\\x0b\\x8bK'", "b'\\xcb+\\xabk\\xeb\\x1b\\x9b[\\xdb;\\xbb{\\xfb\\x07\\x87G\\xc7\\'\\xa7g\\xe7'", "b'\\x17\\x97W\\xd77\\xb7w\\xf7\\x0f\\x8fO\\xcf/\\xafo\\xef\\x1f\\x9f_'", "b'\\xdf?\\xbf\\x7f\\xff'", ")", "_bitorder", ".", "append", "(", "numpy", ".", "frombuffer", "(", "_bitorder", "[", "0", "]", ",", "dtype", "=", "'uint8'", ")", ")", "try", ":", "view", "=", "data", ".", "view", "(", "'uint8'", ")", "numpy", ".", "take", "(", "_bitorder", "[", "1", "]", ",", "view", ",", "out", "=", "view", ")", "return", "data", "except", "AttributeError", ":", "return", "data", ".", "translate", "(", "_bitorder", "[", "0", "]", ")", "except", "ValueError", ":", "raise", "NotImplementedError", "(", "'slices of arrays not supported'", ")", "return", "None" ]
Based on timezone and start date the GMT card is updated
def _update_gmt ( self ) : if self . simulation_start is not None : # NOTE: Because of daylight savings time, # offset result depends on time of the year offset_string = str ( self . simulation_start . replace ( tzinfo = self . tz ) . utcoffset ( ) . total_seconds ( ) / 3600. ) self . _update_card ( 'GMT' , offset_string )
4,531
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/event.py#L497-L506
[ "def", "normalize_filename", "(", "filename", ")", ":", "# if the url pointed to a directory then just replace all the special chars", "filename", "=", "re", ".", "sub", "(", "\"/|\\\\|;|:|\\?|=\"", ",", "\"_\"", ",", "filename", ")", "if", "len", "(", "filename", ")", ">", "150", ":", "prefix", "=", "hashlib", ".", "md5", "(", "filename", ")", ".", "hexdigest", "(", ")", "filename", "=", "prefix", "+", "filename", "[", "-", "140", ":", "]", "return", "filename" ]
Prepares HMET data for GSSHA simulation from land surface model data .
def prepare_hmet_lsm ( self , lsm_data_var_map_array , hmet_ascii_output_folder = None , netcdf_file_path = None ) : if self . l2g is None : raise ValueError ( "LSM converter not loaded ..." ) with tmp_chdir ( self . project_manager . project_directory ) : # GSSHA simulation does not work after HMET data is finished self . _update_simulation_end_from_lsm ( ) # HMET CARDS if netcdf_file_path is not None : self . l2g . lsm_data_to_subset_netcdf ( netcdf_file_path , lsm_data_var_map_array ) self . _update_card ( "HMET_NETCDF" , netcdf_file_path , True ) self . project_manager . deleteCard ( 'HMET_ASCII' , self . db_session ) else : if "{0}" in hmet_ascii_output_folder and "{1}" in hmet_ascii_output_folder : hmet_ascii_output_folder = hmet_ascii_output_folder . format ( self . simulation_start . strftime ( "%Y%m%d%H%M" ) , self . simulation_end . strftime ( "%Y%m%d%H%M" ) ) self . l2g . lsm_data_to_arc_ascii ( lsm_data_var_map_array , main_output_folder = os . path . join ( self . gssha_directory , hmet_ascii_output_folder ) ) self . _update_card ( "HMET_ASCII" , os . path . join ( hmet_ascii_output_folder , 'hmet_file_list.txt' ) , True ) self . project_manager . deleteCard ( 'HMET_NETCDF' , self . db_session ) # UPDATE GMT CARD self . _update_gmt ( )
4,532
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/event.py#L508-L542
[ "def", "bulk_upsert", "(", "self", ",", "docs", ",", "namespace", ",", "timestamp", ")", ":", "def", "docs_to_upsert", "(", ")", ":", "doc", "=", "None", "for", "doc", "in", "docs", ":", "# Remove metadata and redundant _id", "index", ",", "doc_type", "=", "self", ".", "_index_and_mapping", "(", "namespace", ")", "doc_id", "=", "u", "(", "doc", ".", "pop", "(", "\"_id\"", ")", ")", "document_action", "=", "{", "'_index'", ":", "index", ",", "'_type'", ":", "doc_type", ",", "'_id'", ":", "doc_id", ",", "'_source'", ":", "self", ".", "_formatter", ".", "format_document", "(", "doc", ")", "}", "document_meta", "=", "{", "'_index'", ":", "self", ".", "meta_index_name", ",", "'_type'", ":", "self", ".", "meta_type", ",", "'_id'", ":", "doc_id", ",", "'_source'", ":", "{", "'ns'", ":", "namespace", ",", "'_ts'", ":", "timestamp", "}", "}", "yield", "document_action", "yield", "document_meta", "if", "doc", "is", "None", ":", "raise", "errors", ".", "EmptyDocsError", "(", "\"Cannot upsert an empty sequence of \"", "\"documents into Elastic Search\"", ")", "try", ":", "kw", "=", "{", "}", "if", "self", ".", "chunk_size", ">", "0", ":", "kw", "[", "'chunk_size'", "]", "=", "self", ".", "chunk_size", "responses", "=", "streaming_bulk", "(", "client", "=", "self", ".", "elastic", ",", "actions", "=", "docs_to_upsert", "(", ")", ",", "*", "*", "kw", ")", "for", "ok", ",", "resp", "in", "responses", ":", "if", "not", "ok", ":", "LOG", ".", "error", "(", "\"Could not bulk-upsert document \"", "\"into ElasticSearch: %r\"", "%", "resp", ")", "if", "self", ".", "auto_commit_interval", "==", "0", ":", "self", ".", "commit", "(", ")", "except", "errors", ".", "EmptyDocsError", ":", "# This can happen when mongo-connector starts up, there is no", "# config file, but nothing to dump", "pass" ]
Returns the number of characters available if sample string were to be printed in the terminal .
def get_remaining_width ( sample_string , max_terminal_width = None ) : if max_terminal_width is not None : available_width = min ( terminal_width ( ) , max_terminal_width ) else : available_width = terminal_width ( ) return available_width - len ( sample_string )
4,533
https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/components/misc.py#L134-L150
[ "def", "mangle_volume", "(", "citation_elements", ")", ":", "volume_re", "=", "re", ".", "compile", "(", "ur\"(\\d+)([A-Z])\"", ",", "re", ".", "U", "|", "re", ".", "I", ")", "for", "el", "in", "citation_elements", ":", "if", "el", "[", "'type'", "]", "==", "'JOURNAL'", ":", "matches", "=", "volume_re", ".", "match", "(", "el", "[", "'volume'", "]", ")", "if", "matches", ":", "el", "[", "'volume'", "]", "=", "matches", ".", "group", "(", "2", ")", "+", "matches", ".", "group", "(", "1", ")", "return", "citation_elements" ]
Defines structs and populates _WindowsCSBI . CSBI .
def _define_csbi ( ) : if _WindowsCSBI . CSBI is not None : return class COORD ( ctypes . Structure ) : """Windows COORD structure. http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119""" _fields_ = [ ( 'X' , ctypes . c_short ) , ( 'Y' , ctypes . c_short ) ] class SmallRECT ( ctypes . Structure ) : """Windows SMALL_RECT structure. http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311""" _fields_ = [ ( 'Left' , ctypes . c_short ) , ( 'Top' , ctypes . c_short ) , ( 'Right' , ctypes . c_short ) , ( 'Bottom' , ctypes . c_short ) ] class ConsoleScreenBufferInfo ( ctypes . Structure ) : """Windows CONSOLE_SCREEN_BUFFER_INFO structure. http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093 """ _fields_ = [ ( 'dwSize' , COORD ) , ( 'dwCursorPosition' , COORD ) , ( 'wAttributes' , ctypes . wintypes . WORD ) , ( 'srWindow' , SmallRECT ) , ( 'dwMaximumWindowSize' , COORD ) ] _WindowsCSBI . CSBI = ConsoleScreenBufferInfo
4,534
https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/components/misc.py#L39-L65
[ "def", "_next_server", "(", "self", ")", ":", "if", "self", ".", "options", "[", "\"dont_randomize\"", "]", ":", "server", "=", "self", ".", "_server_pool", ".", "pop", "(", "0", ")", "self", ".", "_server_pool", ".", "append", "(", "server", ")", "else", ":", "shuffle", "(", "self", ".", "_server_pool", ")", "s", "=", "None", "for", "server", "in", "self", ".", "_server_pool", ":", "if", "self", ".", "options", "[", "\"max_reconnect_attempts\"", "]", ">", "0", "and", "(", "server", ".", "reconnects", ">", "self", ".", "options", "[", "\"max_reconnect_attempts\"", "]", ")", ":", "continue", "else", ":", "s", "=", "server", "return", "s" ]
Initializes the WINDLL resource and populated the CSBI class variable .
def initialize ( ) : _WindowsCSBI . _define_csbi ( ) _WindowsCSBI . HANDLE_STDERR = _WindowsCSBI . HANDLE_STDERR or _WindowsCSBI . WINDLL . kernel32 . GetStdHandle ( - 12 ) _WindowsCSBI . HANDLE_STDOUT = _WindowsCSBI . HANDLE_STDOUT or _WindowsCSBI . WINDLL . kernel32 . GetStdHandle ( - 11 ) if _WindowsCSBI . WINDLL . kernel32 . GetConsoleScreenBufferInfo . argtypes : return _WindowsCSBI . WINDLL . kernel32 . GetStdHandle . argtypes = [ ctypes . wintypes . DWORD ] _WindowsCSBI . WINDLL . kernel32 . GetStdHandle . restype = ctypes . wintypes . HANDLE _WindowsCSBI . WINDLL . kernel32 . GetConsoleScreenBufferInfo . restype = ctypes . wintypes . BOOL _WindowsCSBI . WINDLL . kernel32 . GetConsoleScreenBufferInfo . argtypes = [ ctypes . wintypes . HANDLE , ctypes . POINTER ( _WindowsCSBI . CSBI ) ]
4,535
https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/components/misc.py#L68-L81
[ "def", "get_robotstxt_parser", "(", "url", ",", "session", "=", "None", ")", ":", "rp", "=", "robotparser", ".", "RobotFileParser", "(", ")", "try", ":", "req", "=", "urlopen", "(", "url", ",", "session", ",", "max_content_bytes", "=", "MaxContentBytes", ",", "raise_for_status", "=", "False", ")", "except", "Exception", ":", "# connect or timeout errors are treated as an absent robots.txt", "rp", ".", "allow_all", "=", "True", "else", ":", "if", "req", ".", "status_code", ">=", "400", ":", "rp", ".", "allow_all", "=", "True", "elif", "req", ".", "status_code", "==", "200", ":", "rp", ".", "parse", "(", "req", ".", "text", ".", "splitlines", "(", ")", ")", "return", "rp" ]
Applying genotype calls to multi - way alignment incidence matrix
def stencil ( * * kwargs ) : alnfile = kwargs . get ( 'alnfile' ) gtypefile = kwargs . get ( 'gtypefile' ) grpfile = kwargs . get ( 'grpfile' ) if grpfile is None : grpfile2chk = os . path . join ( DATA_DIR , 'ref.gene2transcripts.tsv' ) if os . path . exists ( grpfile2chk ) : grpfile = grpfile2chk else : print >> sys . stderr , '[gbrs::stencil] A group file is *not* given. Genotype will be stenciled as is.' # Load alignment incidence matrix ('alnfile' is assumed to be in multiway transcriptome) alnmat = emase . AlignmentPropertyMatrix ( h5file = alnfile , grpfile = grpfile ) # Load genotype calls hid = dict ( zip ( alnmat . hname , np . arange ( alnmat . num_haplotypes ) ) ) gid = dict ( zip ( alnmat . gname , np . arange ( len ( alnmat . gname ) ) ) ) gtmask = np . zeros ( ( alnmat . num_haplotypes , alnmat . num_loci ) ) gtcall_g = dict . fromkeys ( alnmat . gname ) with open ( gtypefile ) as fh : if grpfile is not None : gtcall_t = dict . fromkeys ( alnmat . lname ) for curline in dropwhile ( is_comment , fh ) : item = curline . rstrip ( ) . split ( "\t" ) g , gt = item [ : 2 ] gtcall_g [ g ] = gt hid2set = np . array ( [ hid [ c ] for c in gt ] ) tid2set = np . array ( alnmat . groups [ gid [ g ] ] ) gtmask [ np . meshgrid ( hid2set , tid2set ) ] = 1.0 for t in tid2set : gtcall_t [ alnmat . lname [ t ] ] = gt else : for curline in dropwhile ( is_comment , fh ) : item = curline . rstrip ( ) . split ( "\t" ) g , gt = item [ : 2 ] gtcall_g [ g ] = gt hid2set = np . array ( [ hid [ c ] for c in gt ] ) gtmask [ np . meshgrid ( hid2set , gid [ g ] ) ] = 1.0 alnmat . multiply ( gtmask , axis = 2 ) for h in xrange ( alnmat . num_haplotypes ) : alnmat . data [ h ] . eliminate_zeros ( ) outfile = kwargs . get ( 'outfile' ) if outfile is None : outfile = 'gbrs.stenciled.' + os . path . basename ( alnfile ) alnmat . save ( h5file = outfile )
4,536
https://github.com/churchill-lab/gbrs/blob/0f32d2620e82cb1459e56083af7c6e5c72d6ea88/gbrs/emase_utils.py#L160-L213
[ "def", "exclude", "(", "requestContext", ",", "seriesList", ",", "pattern", ")", ":", "regex", "=", "re", ".", "compile", "(", "pattern", ")", "return", "[", "s", "for", "s", "in", "seriesList", "if", "not", "regex", ".", "search", "(", "s", ".", "name", ")", "]" ]
Bulk register_item .
def register_items ( self , items ) : for item in items : item . set_parent ( self ) self . items . extend ( items )
4,537
https://github.com/hellupline/flask-manager/blob/70e48309f73aacf55f5c37b43165791ae1cf6861/flask_manager/tree.py#L38-L47
[ "def", "create_logstash", "(", "self", ",", "*", "*", "kwargs", ")", ":", "logstash", "=", "predix", ".", "admin", ".", "logstash", ".", "Logging", "(", "*", "*", "kwargs", ")", "logstash", ".", "create", "(", ")", "logstash", ".", "add_to_manifest", "(", "self", ")", "logging", ".", "info", "(", "'Install Kibana-Me-Logs application by following GitHub instructions'", ")", "logging", ".", "info", "(", "'git clone https://github.com/cloudfoundry-community/kibana-me-logs.git'", ")", "return", "logstash" ]
Get all the endpoints under this node in a tree like structure .
def endpoints ( self ) : children = [ item . endpoints ( ) for item in self . items ] return self . name , self . endpoint , children
4,538
https://github.com/hellupline/flask-manager/blob/70e48309f73aacf55f5c37b43165791ae1cf6861/flask_manager/tree.py#L74-L86
[ "def", "to_td", "(", "frame", ",", "name", ",", "con", ",", "if_exists", "=", "'fail'", ",", "time_col", "=", "None", ",", "time_index", "=", "None", ",", "index", "=", "True", ",", "index_label", "=", "None", ",", "chunksize", "=", "10000", ",", "date_format", "=", "None", ")", ":", "database", ",", "table", "=", "name", ".", "split", "(", "'.'", ")", "uploader", "=", "StreamingUploader", "(", "con", ".", "client", ",", "database", ",", "table", ",", "show_progress", "=", "True", ",", "clear_progress", "=", "True", ")", "uploader", ".", "message", "(", "'Streaming import into: {0}.{1}'", ".", "format", "(", "database", ",", "table", ")", ")", "# check existence", "if", "if_exists", "==", "'fail'", ":", "try", ":", "con", ".", "client", ".", "table", "(", "database", ",", "table", ")", "except", "tdclient", ".", "api", ".", "NotFoundError", ":", "uploader", ".", "message", "(", "'creating new table...'", ")", "con", ".", "client", ".", "create_log_table", "(", "database", ",", "table", ")", "else", ":", "raise", "RuntimeError", "(", "'table \"%s\" already exists'", "%", "name", ")", "elif", "if_exists", "==", "'replace'", ":", "try", ":", "con", ".", "client", ".", "table", "(", "database", ",", "table", ")", "except", "tdclient", ".", "api", ".", "NotFoundError", ":", "pass", "else", ":", "uploader", ".", "message", "(", "'deleting old table...'", ")", "con", ".", "client", ".", "delete_table", "(", "database", ",", "table", ")", "uploader", ".", "message", "(", "'creating new table...'", ")", "con", ".", "client", ".", "create_log_table", "(", "database", ",", "table", ")", "elif", "if_exists", "==", "'append'", ":", "try", ":", "con", ".", "client", ".", "table", "(", "database", ",", "table", ")", "except", "tdclient", ".", "api", ".", "NotFoundError", ":", "uploader", ".", "message", "(", "'creating new table...'", ")", "con", ".", "client", ".", "create_log_table", "(", "database", ",", "table", ")", "else", ":", "raise", "ValueError", "(", "'invalid value for if_exists: %s'", "%", "if_exists", ")", "# \"time_index\" implies \"index=False\"", "if", "time_index", ":", "index", "=", "None", "# convert", "frame", "=", "frame", ".", "copy", "(", ")", "frame", "=", "_convert_time_column", "(", "frame", ",", "time_col", ",", "time_index", ")", "frame", "=", "_convert_index_column", "(", "frame", ",", "index", ",", "index_label", ")", "frame", "=", "_convert_date_format", "(", "frame", ",", "date_format", ")", "# upload", "uploader", ".", "upload_frame", "(", "frame", ",", "chunksize", ")", "uploader", ".", "wait_for_import", "(", "len", "(", "frame", ")", ")" ]
Get the absolute name of self .
def absolute_name ( self ) : if self . is_root ( ) or self . parent . is_root ( ) : return utils . slugify ( self . name ) return ':' . join ( [ self . parent . absolute_name , utils . slugify ( self . name ) ] )
4,539
https://github.com/hellupline/flask-manager/blob/70e48309f73aacf55f5c37b43165791ae1cf6861/flask_manager/tree.py#L103-L111
[ "def", "union", "(", "self", ",", "*", "dstreams", ")", ":", "if", "not", "dstreams", ":", "raise", "ValueError", "(", "\"should have at least one DStream to union\"", ")", "if", "len", "(", "dstreams", ")", "==", "1", ":", "return", "dstreams", "[", "0", "]", "if", "len", "(", "set", "(", "s", ".", "_jrdd_deserializer", "for", "s", "in", "dstreams", ")", ")", ">", "1", ":", "raise", "ValueError", "(", "\"All DStreams should have same serializer\"", ")", "if", "len", "(", "set", "(", "s", ".", "_slideDuration", "for", "s", "in", "dstreams", ")", ")", ">", "1", ":", "raise", "ValueError", "(", "\"All DStreams should have same slide duration\"", ")", "cls", "=", "SparkContext", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "streaming", ".", "api", ".", "java", ".", "JavaDStream", "jdstreams", "=", "SparkContext", ".", "_gateway", ".", "new_array", "(", "cls", ",", "len", "(", "dstreams", ")", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "dstreams", ")", ")", ":", "jdstreams", "[", "i", "]", "=", "dstreams", "[", "i", "]", ".", "_jdstream", "return", "DStream", "(", "self", ".", "_jssc", ".", "union", "(", "jdstreams", ")", ",", "self", ",", "dstreams", "[", "0", "]", ".", "_jrdd_deserializer", ")" ]
Get the absolute url of self .
def absolute_url ( self ) : if self . is_root ( ) : return utils . concat_urls ( self . url ) return utils . concat_urls ( self . parent . absolute_url , self . url )
4,540
https://github.com/hellupline/flask-manager/blob/70e48309f73aacf55f5c37b43165791ae1cf6861/flask_manager/tree.py#L114-L122
[ "def", "_read_footer", "(", "file_obj", ")", ":", "footer_size", "=", "_get_footer_size", "(", "file_obj", ")", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ":", "logger", ".", "debug", "(", "\"Footer size in bytes: %s\"", ",", "footer_size", ")", "file_obj", ".", "seek", "(", "-", "(", "8", "+", "footer_size", ")", ",", "2", ")", "# seek to beginning of footer", "tin", "=", "TFileTransport", "(", "file_obj", ")", "pin", "=", "TCompactProtocolFactory", "(", ")", ".", "get_protocol", "(", "tin", ")", "fmd", "=", "parquet_thrift", ".", "FileMetaData", "(", ")", "fmd", ".", "read", "(", "pin", ")", "return", "fmd" ]
assumes eastward motion
def split_tracks ( lat , lon , * args ) : tracks = [ ] lt , ln = [ lat [ 0 ] ] , [ lon [ 0 ] ] zz = [ [ z [ 0 ] ] for z in args ] for i in range ( 1 , len ( lon ) ) : lt . append ( lat [ i ] ) for z , a in zip ( zz , args ) : z . append ( a [ i ] ) d1 = abs ( lon [ i ] - lon [ i - 1 ] ) d2 = abs ( ( lon [ i - 1 ] + 360 ) - lon [ i ] ) d3 = abs ( lon [ i - 1 ] - ( lon [ i ] + 360 ) ) if d2 < d1 : ln . append ( lon [ i ] - 360 ) tracks . append ( [ np . array ( lt ) , np . array ( ln ) ] + [ np . array ( z ) for z in zz ] ) lt = [ lat [ i - 1 ] , lat [ i ] ] ln = [ lon [ i - 1 ] + 360 , lon [ i ] ] zz = [ [ z [ i - 1 ] ] for z in args ] elif d3 < d1 : ln . append ( lon [ i ] + 360 ) tracks . append ( [ np . array ( lt ) , np . array ( ln ) ] + [ np . array ( z ) for z in zz ] ) lt = [ lat [ i - 1 ] , lat [ i ] ] ln = [ lon [ i - 1 ] - 360 , lon [ i ] ] zz = [ [ z [ i - 1 ] , z [ i ] ] for z in args ] else : ln . append ( lon [ i ] ) if len ( lt ) : tracks . append ( [ np . array ( lt ) , np . array ( ln ) ] + [ np . array ( z ) for z in zz ] ) return tracks
4,541
https://github.com/theodoregoetz/wernher/blob/ef5d3aabe24e532b5eab33cd0212b2dbc2c9022e/wernher/map_view.py#L56-L87
[ "def", "show_wbridges", "(", "self", ")", ":", "grp", "=", "self", ".", "getPseudoBondGroup", "(", "\"Water Bridges-%i\"", "%", "self", ".", "tid", ",", "associateWith", "=", "[", "self", ".", "model", "]", ")", "grp", ".", "lineWidth", "=", "3", "for", "i", ",", "wbridge", "in", "enumerate", "(", "self", ".", "plcomplex", ".", "waterbridges", ")", ":", "c", "=", "grp", ".", "newPseudoBond", "(", "self", ".", "atoms", "[", "wbridge", ".", "water_id", "]", ",", "self", ".", "atoms", "[", "wbridge", ".", "acc_id", "]", ")", "c", ".", "color", "=", "self", ".", "colorbyname", "(", "'cornflower blue'", ")", "self", ".", "water_ids", ".", "append", "(", "wbridge", ".", "water_id", ")", "b", "=", "grp", ".", "newPseudoBond", "(", "self", ".", "atoms", "[", "wbridge", ".", "don_id", "]", ",", "self", ".", "atoms", "[", "wbridge", ".", "water_id", "]", ")", "b", ".", "color", "=", "self", ".", "colorbyname", "(", "'cornflower blue'", ")", "self", ".", "water_ids", ".", "append", "(", "wbridge", ".", "water_id", ")", "if", "wbridge", ".", "protisdon", ":", "self", ".", "bs_res_ids", ".", "append", "(", "wbridge", ".", "don_id", ")", "else", ":", "self", ".", "bs_res_ids", ".", "append", "(", "wbridge", ".", "acc_id", ")" ]
Returns the rate with formatting . If done returns the overall rate instead .
def str_rate ( self ) : # Handle special cases. if not self . _eta . started or self . _eta . stalled or not self . rate : return '--.-KiB/s' unit_rate , unit = UnitByte ( self . _eta . rate_overall if self . done else self . rate ) . auto if unit_rate >= 100 : formatter = '%d' elif unit_rate >= 10 : formatter = '%.1f' else : formatter = '%.2f' return '{0}{1}/s' . format ( locale . format ( formatter , unit_rate , grouping = False ) , unit )
4,542
https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/progress.py#L239-L252
[ "def", "get_contacts", "(", "self", ",", "limit", "=", "100", ",", "offset", "=", "0", ",", "params", "=", "{", "}", ")", ":", "url", "=", "self", ".", "CONTACTS_URL", "+", "\"?limit=%s&offset=%s\"", "%", "(", "limit", ",", "offset", ")", "for", "key", ",", "value", "in", "params", ".", "items", "(", ")", ":", "if", "key", "is", "'ids'", ":", "value", "=", "\",\"", ".", "join", "(", "value", ")", "url", "+=", "'&%s=%s'", "%", "(", "key", ",", "value", ")", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "get_request", "(", ")" ]
Returns the rate with formatting .
def str_rate ( self ) : # Handle special cases. if not self . _eta . started or self . _eta . stalled or not self . rate : return '--- KiB/s' unit_rate , unit = UnitByte ( self . rate ) . auto_no_thousands if unit_rate >= 10 : formatter = '%d' else : formatter = '%0.1f' return '{0} {1}/s' . format ( locale . format ( formatter , unit_rate , grouping = False ) , unit )
4,543
https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/progress.py#L339-L350
[ "def", "buffer_iter", "(", "self", ",", "block_size", "=", "1024", ")", ":", "streams", "=", "(", "self", ".", "vert_data", ",", "self", ".", "idx_data", ",", ")", "# Chain streams seamlessly", "for", "stream", "in", "streams", ":", "stream", ".", "seek", "(", "0", ")", "while", "True", ":", "chunk", "=", "stream", ".", "read", "(", "block_size", ")", "if", "chunk", ":", "yield", "chunk", "else", ":", "break" ]
Initialize database with gsshapy tables
def init_db ( sqlalchemy_url ) : engine = create_engine ( sqlalchemy_url ) start = time . time ( ) metadata . create_all ( engine ) return time . time ( ) - start
4,544
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/db_tools.py#L35-L42
[ "def", "measure", "(", "*", "qubits", ":", "raw_types", ".", "Qid", ",", "key", ":", "Optional", "[", "str", "]", "=", "None", ",", "invert_mask", ":", "Tuple", "[", "bool", ",", "...", "]", "=", "(", ")", ")", "->", "gate_operation", ".", "GateOperation", ":", "for", "qubit", "in", "qubits", ":", "if", "isinstance", "(", "qubit", ",", "np", ".", "ndarray", ")", ":", "raise", "ValueError", "(", "'measure() was called a numpy ndarray. Perhaps you meant '", "'to call measure_state_vector on numpy array?'", ")", "elif", "not", "isinstance", "(", "qubit", ",", "raw_types", ".", "Qid", ")", ":", "raise", "ValueError", "(", "'measure() was called with type different than Qid.'", ")", "if", "key", "is", "None", ":", "key", "=", "_default_measurement_key", "(", "qubits", ")", "return", "MeasurementGate", "(", "len", "(", "qubits", ")", ",", "key", ",", "invert_mask", ")", ".", "on", "(", "*", "qubits", ")" ]
Create session with database to work in
def get_sessionmaker ( sqlalchemy_url , engine = None ) : if engine is None : engine = create_engine ( sqlalchemy_url ) return sessionmaker ( bind = engine )
4,545
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/db_tools.py#L234-L240
[ "def", "_parse", "(", "self", ")", ":", "cur_ver", "=", "None", "cur_line", "=", "None", "for", "line", "in", "self", ".", "content", ":", "m", "=", "re", ".", "match", "(", "'[^ ]+ \\(([0-9]+\\.[0-9]+\\.[0-9]+)-[0-9]+\\) [^ ]+; urgency=[^ ]+'", ",", "line", ")", "if", "m", ":", "cur_ver", "=", "m", ".", "group", "(", "1", ")", "self", ".", "versions", ".", "append", "(", "cur_ver", ")", "self", ".", "entries", "[", "cur_ver", "]", "=", "[", "]", "cur_entry", "=", "self", ".", "entries", "[", "cur_ver", "]", "if", "self", ".", "latest_version", "is", "None", "or", "StrictVersion", "(", "cur_ver", ")", ">", "StrictVersion", "(", "self", ".", "latest_version", ")", ":", "self", ".", "latest_version", "=", "m", ".", "group", "(", "1", ")", "elif", "cur_ver", ":", "m", "=", "re", ".", "match", "(", "' \\* (.*)'", ",", "line", ")", "if", "m", ":", "cur_entry", ".", "append", "(", "m", ".", "group", "(", "1", ")", ".", "strip", "(", ")", ")", "elif", "not", "re", ".", "match", "(", "'$'", ",", "line", ")", "and", "re", ".", "match", "(", "' *[^$]+'", ",", "line", ")", ":", "cur_entry", "[", "-", "1", "]", "+=", "\" \"", "+", "line", ".", "strip", "(", ")" ]
Load project manager and in memory sqlite db sessionmaker for GSSHA project
def get_project_session ( project_name , project_directory , map_type = None ) : sqlalchemy_url , sql_engine = init_sqlite_memory ( ) gdb_sessionmaker = get_sessionmaker ( sqlalchemy_url , sql_engine ) project_manager = ProjectFile ( name = project_name , project_directory = project_directory , map_type = map_type ) return project_manager , gdb_sessionmaker
4,546
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/db_tools.py#L243-L252
[ "def", "hold", "(", "name", ",", "seconds", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "False", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "start", "=", "time", ".", "time", "(", ")", "if", "'timer'", "not", "in", "__context__", ":", "__context__", "[", "'timer'", "]", "=", "{", "}", "if", "name", "not", "in", "__context__", "[", "'timer'", "]", ":", "__context__", "[", "'timer'", "]", "[", "name", "]", "=", "start", "if", "(", "start", "-", "__context__", "[", "'timer'", "]", "[", "name", "]", ")", ">", "seconds", ":", "ret", "[", "'result'", "]", "=", "True", "__context__", "[", "'timer'", "]", "[", "name", "]", "=", "start", "return", "ret" ]
Load the settings from a named section .
def get_settings ( config_uri , section = None , defaults = None ) : loader = get_loader ( config_uri ) return loader . get_settings ( section , defaults )
4,547
https://github.com/Pylons/plaster/blob/e70e55c182a8300d7ccf67e54d47740c72e72cd8/src/plaster/loaders.py#L33-L60
[ "def", "_verify", "(", "vm_", ")", ":", "log", ".", "info", "(", "'Verifying credentials for %s'", ",", "vm_", "[", "'name'", "]", ")", "win_installer", "=", "config", ".", "get_cloud_config_value", "(", "'win_installer'", ",", "vm_", ",", "__opts__", ")", "if", "win_installer", ":", "log", ".", "debug", "(", "'Testing Windows authentication method for %s'", ",", "vm_", "[", "'name'", "]", ")", "if", "not", "HAS_IMPACKET", ":", "log", ".", "error", "(", "'Impacket library not found'", ")", "return", "False", "# Test Windows connection", "kwargs", "=", "{", "'host'", ":", "vm_", "[", "'ssh_host'", "]", ",", "'username'", ":", "config", ".", "get_cloud_config_value", "(", "'win_username'", ",", "vm_", ",", "__opts__", ",", "default", "=", "'Administrator'", ")", ",", "'password'", ":", "config", ".", "get_cloud_config_value", "(", "'win_password'", ",", "vm_", ",", "__opts__", ",", "default", "=", "''", ")", "}", "# Test SMB connection", "try", ":", "log", ".", "debug", "(", "'Testing SMB protocol for %s'", ",", "vm_", "[", "'name'", "]", ")", "if", "__utils__", "[", "'smb.get_conn'", "]", "(", "*", "*", "kwargs", ")", "is", "False", ":", "return", "False", "except", "(", "smbSessionError", ",", "smb3SessionError", ")", "as", "exc", ":", "log", ".", "error", "(", "'Exception: %s'", ",", "exc", ")", "return", "False", "# Test WinRM connection", "use_winrm", "=", "config", ".", "get_cloud_config_value", "(", "'use_winrm'", ",", "vm_", ",", "__opts__", ",", "default", "=", "False", ")", "if", "use_winrm", ":", "log", ".", "debug", "(", "'WinRM protocol requested for %s'", ",", "vm_", "[", "'name'", "]", ")", "if", "not", "HAS_WINRM", ":", "log", ".", "error", "(", "'WinRM library not found'", ")", "return", "False", "kwargs", "[", "'port'", "]", "=", "config", ".", "get_cloud_config_value", "(", "'winrm_port'", ",", "vm_", ",", "__opts__", ",", "default", "=", "5986", ")", "kwargs", "[", "'timeout'", "]", "=", "10", "try", ":", "log", ".", "debug", "(", "'Testing WinRM protocol for %s'", ",", "vm_", "[", "'name'", "]", ")", "return", "__utils__", "[", "'cloud.wait_for_winrm'", "]", "(", "*", "*", "kwargs", ")", "is", "not", "None", "except", "(", "ConnectionError", ",", "ConnectTimeout", ",", "ReadTimeout", ",", "SSLError", ",", "ProxyError", ",", "RetryError", ",", "InvalidSchema", ",", "WinRMTransportError", ")", "as", "exc", ":", "log", ".", "error", "(", "'Exception: %s'", ",", "exc", ")", "return", "False", "return", "True", "else", ":", "log", ".", "debug", "(", "'Testing SSH authentication method for %s'", ",", "vm_", "[", "'name'", "]", ")", "# Test SSH connection", "kwargs", "=", "{", "'host'", ":", "vm_", "[", "'ssh_host'", "]", ",", "'port'", ":", "config", ".", "get_cloud_config_value", "(", "'ssh_port'", ",", "vm_", ",", "__opts__", ",", "default", "=", "22", ")", ",", "'username'", ":", "config", ".", "get_cloud_config_value", "(", "'ssh_username'", ",", "vm_", ",", "__opts__", ",", "default", "=", "'root'", ")", ",", "'password'", ":", "config", ".", "get_cloud_config_value", "(", "'password'", ",", "vm_", ",", "__opts__", ",", "search_global", "=", "False", ")", ",", "'key_filename'", ":", "config", ".", "get_cloud_config_value", "(", "'key_filename'", ",", "vm_", ",", "__opts__", ",", "search_global", "=", "False", ",", "default", "=", "config", ".", "get_cloud_config_value", "(", "'ssh_keyfile'", ",", "vm_", ",", "__opts__", ",", "search_global", "=", "False", ",", "default", "=", "None", ")", ")", ",", "'gateway'", ":", "vm_", ".", "get", "(", "'gateway'", ",", "None", ")", ",", "'maxtries'", ":", "1", "}", "log", ".", "debug", "(", "'Testing SSH protocol for %s'", ",", "vm_", "[", "'name'", "]", ")", "try", ":", "return", "__utils__", "[", "'cloud.wait_for_passwd'", "]", "(", "*", "*", "kwargs", ")", "is", "True", "except", "SaltCloudException", "as", "exc", ":", "log", ".", "error", "(", "'Exception: %s'", ",", "exc", ")", "return", "False" ]
Find all loaders that match the requested scheme and protocols .
def find_loaders ( scheme , protocols = None ) : # build a list of all required entry points matching_groups = [ 'plaster.loader_factory' ] if protocols : matching_groups += [ 'plaster.{0}_loader_factory' . format ( proto ) for proto in protocols ] scheme = scheme . lower ( ) # if a distribution is specified then it overrides the default search parts = scheme . split ( '+' , 1 ) if len ( parts ) == 2 : try : distro = pkg_resources . get_distribution ( parts [ 0 ] ) except pkg_resources . DistributionNotFound : pass else : ep = _find_ep_in_dist ( distro , parts [ 1 ] , matching_groups ) # if we got one or more loaders from a specific distribution # then they override everything else so we'll just return them if ep : return [ EntryPointLoaderInfo ( ep , protocols ) ] # find any distributions supporting the default loader protocol possible_entry_points = [ ep for ep in pkg_resources . iter_entry_points ( 'plaster.loader_factory' ) if scheme is None or scheme == ep . name . lower ( ) ] distros = { ep . dist for ep in possible_entry_points } matched_entry_points = list ( filter ( None , [ _find_ep_in_dist ( distro , scheme , matching_groups ) for distro in distros ] ) ) return [ EntryPointLoaderInfo ( ep , protocols = protocols ) for ep in matched_entry_points ]
4,548
https://github.com/Pylons/plaster/blob/e70e55c182a8300d7ccf67e54d47740c72e72cd8/src/plaster/loaders.py#L120-L173
[ "def", "setup_standalone_signals", "(", "instance", ")", ":", "window", "=", "instance", ".", "get_widget", "(", "'config-window'", ")", "window", ".", "connect", "(", "'delete-event'", ",", "Gtk", ".", "main_quit", ")", "# We need to block the execution of the already associated", "# callback before connecting the new handler.", "button", "=", "instance", ".", "get_widget", "(", "'button1'", ")", "button", ".", "handler_block_by_func", "(", "instance", ".", "gtk_widget_destroy", ")", "button", ".", "connect", "(", "'clicked'", ",", "Gtk", ".", "main_quit", ")", "return", "instance" ]
Combines multiple dicts in one .
def combine_dicts ( * dicts , copy = False , base = None ) : if len ( dicts ) == 1 and base is None : # Only one input dict. cd = dicts [ 0 ] . copy ( ) else : cd = { } if base is None else base # Initialize empty dict. for d in dicts : # Combine dicts. if d : # noinspection PyTypeChecker cd . update ( d ) # Return combined dict. return { k : _copy . deepcopy ( v ) for k , v in cd . items ( ) } if copy else cd
4,549
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L34-L71
[ "def", "multivariate_neg_logposterior", "(", "self", ",", "beta", ")", ":", "post", "=", "self", ".", "neg_loglik", "(", "beta", ")", "for", "k", "in", "range", "(", "0", ",", "self", ".", "z_no", ")", ":", "if", "self", ".", "latent_variables", ".", "z_list", "[", "k", "]", ".", "prior", ".", "covariance_prior", "is", "True", ":", "post", "+=", "-", "self", ".", "latent_variables", ".", "z_list", "[", "k", "]", ".", "prior", ".", "logpdf", "(", "self", ".", "custom_covariance", "(", "beta", ")", ")", "break", "else", ":", "post", "+=", "-", "self", ".", "latent_variables", ".", "z_list", "[", "k", "]", ".", "prior", ".", "logpdf", "(", "beta", "[", "k", "]", ")", "return", "post" ]
Merges and defines dictionaries with values identical to keys .
def kk_dict ( * kk , * * adict ) : for k in kk : if isinstance ( k , dict ) : if not set ( k ) . isdisjoint ( adict ) : raise ValueError ( 'keyword argument repeated' ) adict . update ( k ) elif k in adict : raise ValueError ( 'keyword argument repeated' ) else : adict [ k ] = k return adict
4,550
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L74-L121
[ "async", "def", "_sem_crawl", "(", "self", ",", "sem", ",", "res", ")", ":", "async", "with", "sem", ":", "st_", "=", "await", "self", ".", "crawl_raw", "(", "res", ")", "if", "st_", ":", "self", ".", "result", "[", "'ok'", "]", "+=", "1", "else", ":", "self", ".", "result", "[", "'fail'", "]", "+=", "1", "# take a little gap", "await", "asyncio", ".", "sleep", "(", "random", ".", "randint", "(", "0", ",", "1", ")", ")" ]
Returns the same arguments .
def bypass ( * inputs , copy = False ) : if len ( inputs ) == 1 : inputs = inputs [ 0 ] # Same inputs. return _copy . deepcopy ( inputs ) if copy else inputs
4,551
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L124-L151
[ "def", "_remove_vm", "(", "name", ",", "datacenter", ",", "service_instance", ",", "placement", "=", "None", ",", "power_off", "=", "None", ")", ":", "results", "=", "{", "}", "if", "placement", ":", "(", "resourcepool_object", ",", "placement_object", ")", "=", "salt", ".", "utils", ".", "vmware", ".", "get_placement", "(", "service_instance", ",", "datacenter", ",", "placement", ")", "else", ":", "placement_object", "=", "salt", ".", "utils", ".", "vmware", ".", "get_datacenter", "(", "service_instance", ",", "datacenter", ")", "if", "power_off", ":", "power_off_vm", "(", "name", ",", "datacenter", ",", "service_instance", ")", "results", "[", "'powered_off'", "]", "=", "True", "vm_ref", "=", "salt", ".", "utils", ".", "vmware", ".", "get_mor_by_property", "(", "service_instance", ",", "vim", ".", "VirtualMachine", ",", "name", ",", "property_name", "=", "'name'", ",", "container_ref", "=", "placement_object", ")", "if", "not", "vm_ref", ":", "raise", "salt", ".", "exceptions", ".", "VMwareObjectRetrievalError", "(", "'The virtual machine object {0} in datacenter '", "'{1} was not found'", ".", "format", "(", "name", ",", "datacenter", ")", ")", "return", "results", ",", "vm_ref" ]
Returns a dict with new key values .
def map_dict ( key_map , * dicts , copy = False , base = None ) : it = combine_dicts ( * dicts ) . items ( ) # Combine dicts. get = key_map . get # Namespace shortcut. # Return mapped dict. return combine_dicts ( { get ( k , k ) : v for k , v in it } , copy = copy , base = base )
4,552
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L176-L212
[ "def", "setOverlayTextureColorSpace", "(", "self", ",", "ulOverlayHandle", ",", "eTextureColorSpace", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setOverlayTextureColorSpace", "result", "=", "fn", "(", "ulOverlayHandle", ",", "eTextureColorSpace", ")", "return", "result" ]
Returns a new dict .
def map_list ( key_map , * inputs , copy = False , base = None ) : d = { } if base is None else base # Initialize empty dict. for m , v in zip ( key_map , inputs ) : if isinstance ( m , dict ) : map_dict ( m , v , base = d ) # Apply a map dict. elif isinstance ( m , list ) : map_list ( m , * v , base = d ) # Apply a map list. else : d [ m ] = v # Apply map. return combine_dicts ( copy = copy , base = d )
4,553
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L215-L272
[ "def", "cmd_tracker_calpress", "(", "self", ",", "args", ")", ":", "connection", "=", "self", ".", "find_connection", "(", ")", "if", "not", "connection", ":", "print", "(", "\"No antenna tracker found\"", ")", "return", "connection", ".", "calibrate_pressure", "(", ")" ]
Selects the chosen dictionary keys from the given dictionary .
def selector ( keys , dictionary , copy = False , output_type = 'dict' , allow_miss = False ) : if not allow_miss : # noinspection PyUnusedLocal def check ( key ) : return True else : def check ( key ) : return key in dictionary if output_type == 'list' : # Select as list. res = [ dictionary [ k ] for k in keys if check ( k ) ] return _copy . deepcopy ( res ) if copy else res elif output_type == 'values' : return bypass ( * [ dictionary [ k ] for k in keys if check ( k ) ] , copy = copy ) # Select as dict. return bypass ( { k : dictionary [ k ] for k in keys if check ( k ) } , copy = copy )
4,554
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L275-L334
[ "def", "log_prior", "(", "self", ")", ":", "for", "p", ",", "b", "in", "zip", "(", "self", ".", "parameter_vector", ",", "self", ".", "parameter_bounds", ")", ":", "if", "b", "[", "0", "]", "is", "not", "None", "and", "p", "<", "b", "[", "0", "]", ":", "return", "-", "np", ".", "inf", "if", "b", "[", "1", "]", "is", "not", "None", "and", "p", ">", "b", "[", "1", "]", ":", "return", "-", "np", ".", "inf", "return", "0.0" ]
Replicates n times the input value .
def replicate_value ( value , n = 2 , copy = True ) : return bypass ( * [ value ] * n , copy = copy )
4,555
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L337-L365
[ "def", "is_removable", "(", "self", ",", "device", ")", ":", "if", "not", "self", ".", "is_handleable", "(", "device", ")", ":", "return", "False", "if", "device", ".", "is_filesystem", ":", "return", "device", ".", "is_mounted", "if", "device", ".", "is_crypto", ":", "return", "device", ".", "is_unlocked", "if", "device", ".", "is_partition_table", "or", "device", ".", "is_drive", ":", "return", "any", "(", "self", ".", "is_removable", "(", "dev", ")", "for", "dev", "in", "self", ".", "get_all_handleable", "(", ")", "if", "_is_parent_of", "(", "device", ",", "dev", ")", ")", "return", "False" ]
Stacks the keys of nested - dictionaries into tuples and yields a list of k - v pairs .
def stack_nested_keys ( nested_dict , key = ( ) , depth = - 1 ) : if depth != 0 and hasattr ( nested_dict , 'items' ) : for k , v in nested_dict . items ( ) : yield from stack_nested_keys ( v , key = key + ( k , ) , depth = depth - 1 ) else : yield key , nested_dict
4,556
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L475-L501
[ "def", "predict_median", "(", "self", ",", "X", ",", "ancillary_X", "=", "None", ")", ":", "return", "self", ".", "predict_percentile", "(", "X", ",", "p", "=", "0.5", ",", "ancillary_X", "=", "ancillary_X", ")" ]
Nested keys are inside of nested - dictionaries .
def are_in_nested_dicts ( nested_dict , * keys ) : if keys : # noinspection PyBroadException try : return are_in_nested_dicts ( nested_dict [ keys [ 0 ] ] , * keys [ 1 : ] ) except Exception : # Key error or not a dict. return False return True
4,557
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L541-L564
[ "def", "WriteFD", "(", "self", ",", "Channel", ",", "MessageBuffer", ")", ":", "try", ":", "res", "=", "self", ".", "__m_dllBasic", ".", "CAN_WriteFD", "(", "Channel", ",", "byref", "(", "MessageBuffer", ")", ")", "return", "TPCANStatus", "(", "res", ")", "except", ":", "logger", ".", "error", "(", "\"Exception on PCANBasic.WriteFD\"", ")", "raise" ]
Merge nested - dictionaries .
def combine_nested_dicts ( * nested_dicts , depth = - 1 , base = None ) : if base is None : base = { } for nested_dict in nested_dicts : for k , v in stack_nested_keys ( nested_dict , depth = depth ) : while k : # noinspection PyBroadException try : get_nested_dicts ( base , * k [ : - 1 ] ) [ k [ - 1 ] ] = v break except Exception : # A branch of the nested_dict is longer than the base. k = k [ : - 1 ] v = get_nested_dicts ( nested_dict , * k ) return base
4,558
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L567-L603
[ "def", "moreland_adjusthue", "(", "msh", ",", "m_unsat", ")", ":", "if", "msh", "[", "M", "]", ">=", "m_unsat", ":", "return", "msh", "[", "H", "]", "# \"Best we can do\"", "hspin", "=", "(", "msh", "[", "S", "]", "*", "np", ".", "sqrt", "(", "m_unsat", "**", "2", "-", "msh", "[", "M", "]", "**", "2", ")", "/", "(", "msh", "[", "M", "]", "*", "np", ".", "sin", "(", "msh", "[", "S", "]", ")", ")", ")", "if", "msh", "[", "H", "]", ">", "-", "np", ".", "pi", "/", "3", ":", "# \"Spin away from purple\"", "return", "msh", "[", "H", "]", "+", "hspin", "return", "msh", "[", "H", "]", "-", "hspin" ]
Decorator to add a function to a dispatcher .
def add_function ( dsp , inputs_kwargs = False , inputs_defaults = False , * * kw ) : def decorator ( f ) : dsp . add_func ( f , inputs_kwargs = inputs_kwargs , inputs_defaults = inputs_defaults , * * kw ) return f return decorator
4,559
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L1242-L1296
[ "def", "wp_caption", "(", "self", ",", "post", ")", ":", "for", "match", "in", "re", ".", "finditer", "(", "r\"\\[caption (.*?)\\](.*?)\\[/caption\\]\"", ",", "post", ")", ":", "meta", "=", "'<div '", "caption", "=", "''", "for", "imatch", "in", "re", ".", "finditer", "(", "r'(\\w+)=\"(.*?)\"'", ",", "match", ".", "group", "(", "1", ")", ")", ":", "if", "imatch", ".", "group", "(", "1", ")", "==", "'id'", ":", "meta", "+=", "'id=\"%s\" '", "%", "imatch", ".", "group", "(", "2", ")", "if", "imatch", ".", "group", "(", "1", ")", "==", "'align'", ":", "meta", "+=", "'class=\"wp-caption %s\" '", "%", "imatch", ".", "group", "(", "2", ")", "if", "imatch", ".", "group", "(", "1", ")", "==", "'width'", ":", "width", "=", "int", "(", "imatch", ".", "group", "(", "2", ")", ")", "+", "10", "meta", "+=", "'style=\"width: %spx;\" '", "%", "width", "if", "imatch", ".", "group", "(", "1", ")", "==", "'caption'", ":", "caption", "=", "imatch", ".", "group", "(", "2", ")", "parts", "=", "(", "match", ".", "group", "(", "2", ")", ",", "caption", ")", "meta", "+=", "'>%s<p class=\"wp-caption-text\">%s</p></div>'", "%", "parts", "post", "=", "post", ".", "replace", "(", "match", ".", "group", "(", "0", ")", ",", "meta", ")", "return", "post" ]
Constructs a Blueprint out of the current object .
def blue ( self , memo = None ) : memo = { } if memo is None else memo if self not in memo : import inspect from . blue import Blueprint , _parent_blue keys = tuple ( inspect . signature ( self . __init__ ) . parameters ) memo [ self ] = Blueprint ( * * { k : _parent_blue ( v , memo ) for k , v in self . __dict__ . items ( ) if k in keys } ) . _set_cls ( self . __class__ ) return memo [ self ]
4,560
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L741-L762
[ "def", "encrypt", "(", "self", ",", "txt", ",", "key", ")", ":", "# log.debug(\"encrypt(txt='%s', key='%s')\", txt, key)", "assert", "isinstance", "(", "txt", ",", "six", ".", "text_type", ")", ",", "\"txt: %s is not text type!\"", "%", "repr", "(", "txt", ")", "assert", "isinstance", "(", "key", ",", "six", ".", "text_type", ")", ",", "\"key: %s is not text type!\"", "%", "repr", "(", "key", ")", "if", "len", "(", "txt", ")", "!=", "len", "(", "key", ")", ":", "raise", "SecureJSLoginError", "(", "\"encrypt error: %s and '%s' must have the same length!\"", "%", "(", "txt", ",", "key", ")", ")", "pbkdf2_hash", "=", "PBKDF2SHA1Hasher1", "(", ")", ".", "get_salt_hash", "(", "txt", ")", "txt", "=", "force_bytes", "(", "txt", ")", "key", "=", "force_bytes", "(", "key", ")", "crypted", "=", "self", ".", "xor", "(", "txt", ",", "key", ")", "crypted", "=", "binascii", ".", "hexlify", "(", "crypted", ")", "crypted", "=", "six", ".", "text_type", "(", "crypted", ",", "\"ascii\"", ")", "return", "\"%s$%s\"", "%", "(", "pbkdf2_hash", ",", "crypted", ")" ]
Given a dictionary of data and this widget s name returns the value of this widget . Returns None if it s not provided .
def value_from_datadict ( self , data , files , name ) : value = super ( FileSizeWidget , self ) . value_from_datadict ( data , files , name ) if value not in EMPTY_VALUES : try : return parse_size ( value ) except ValueError : pass return value
4,561
https://github.com/leplatrem/django-sizefield/blob/6a273a43a2e8d157ee438811c0824eae534bcdb2/sizefield/widgets.py#L17-L28
[ "def", "log_likelihood", "(", "C", ",", "T", ")", ":", "C", "=", "C", ".", "tocsr", "(", ")", "T", "=", "T", ".", "tocsr", "(", ")", "ind", "=", "scipy", ".", "nonzero", "(", "C", ")", "relT", "=", "np", ".", "array", "(", "T", "[", "ind", "]", ")", "[", "0", ",", ":", "]", "relT", "=", "np", ".", "log", "(", "relT", ")", "relC", "=", "np", ".", "array", "(", "C", "[", "ind", "]", ")", "[", "0", ",", ":", "]", "return", "relT", ".", "dot", "(", "relC", ")" ]
A managed SSH session . When the session is ready we ll invoke the ssh_cb callback .
def connect_ssh_with_cb ( ssh_cb , user , host , auth_cb , allow_new = True , verbosity = 0 ) : with connect_ssh ( user , host , auth_cb , allow_new = True , verbosity = 0 ) as ssh : ssh_cb ( ssh )
4,562
https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/easy.py#L20-L27
[ "def", "set_dimensions", "(", "self", ",", "variables", ",", "unlimited_dims", "=", "None", ")", ":", "if", "unlimited_dims", "is", "None", ":", "unlimited_dims", "=", "set", "(", ")", "existing_dims", "=", "self", ".", "get_dimensions", "(", ")", "dims", "=", "OrderedDict", "(", ")", "for", "v", "in", "unlimited_dims", ":", "# put unlimited_dims first", "dims", "[", "v", "]", "=", "None", "for", "v", "in", "variables", ".", "values", "(", ")", ":", "dims", ".", "update", "(", "dict", "(", "zip", "(", "v", ".", "dims", ",", "v", ".", "shape", ")", ")", ")", "for", "dim", ",", "length", "in", "dims", ".", "items", "(", ")", ":", "if", "dim", "in", "existing_dims", "and", "length", "!=", "existing_dims", "[", "dim", "]", ":", "raise", "ValueError", "(", "\"Unable to update size for existing dimension\"", "\"%r (%d != %d)\"", "%", "(", "dim", ",", "length", ",", "existing_dims", "[", "dim", "]", ")", ")", "elif", "dim", "not", "in", "existing_dims", ":", "is_unlimited", "=", "dim", "in", "unlimited_dims", "self", ".", "set_dimension", "(", "dim", ",", "length", ",", "is_unlimited", ")" ]
A managed SFTP session . When the SSH session and an additional SFTP session are ready invoke the sftp_cb callback .
def connect_sftp_with_cb ( sftp_cb , * args , * * kwargs ) : with _connect_sftp ( * args , * * kwargs ) as ( ssh , sftp ) : sftp_cb ( ssh , sftp )
4,563
https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/easy.py#L39-L45
[ "def", "index_search_document", "(", "self", ",", "*", ",", "index", ")", ":", "cache_key", "=", "self", ".", "search_document_cache_key", "new_doc", "=", "self", ".", "as_search_document", "(", "index", "=", "index", ")", "cached_doc", "=", "cache", ".", "get", "(", "cache_key", ")", "if", "new_doc", "==", "cached_doc", ":", "logger", ".", "debug", "(", "\"Search document for %r is unchanged, ignoring update.\"", ",", "self", ")", "return", "[", "]", "cache", ".", "set", "(", "cache_key", ",", "new_doc", ",", "timeout", "=", "get_setting", "(", "\"cache_expiry\"", ",", "60", ")", ")", "get_client", "(", ")", ".", "index", "(", "index", "=", "index", ",", "doc_type", "=", "self", ".", "search_doc_type", ",", "body", "=", "new_doc", ",", "id", "=", "self", ".", "pk", ")" ]
This is just a convenience function for key - based login .
def get_key_auth_cb ( key_filepath ) : def auth_cb ( ssh ) : key = ssh_pki_import_privkey_file ( key_filepath ) ssh . userauth_publickey ( key ) return auth_cb
4,564
https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/easy.py#L47-L54
[ "def", "setOverlayTextureColorSpace", "(", "self", ",", "ulOverlayHandle", ",", "eTextureColorSpace", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setOverlayTextureColorSpace", "result", "=", "fn", "(", "ulOverlayHandle", ",", "eTextureColorSpace", ")", "return", "result" ]
Returns a function that adds an edge to the graph checking only the out node .
def add_edge_fun ( graph ) : # Namespace shortcut for speed. succ , pred , node = graph . _succ , graph . _pred , graph . _node def add_edge ( u , v , * * attr ) : if v not in succ : # Add nodes. succ [ v ] , pred [ v ] , node [ v ] = { } , { } , { } succ [ u ] [ v ] = pred [ v ] [ u ] = attr # Add the edge. return add_edge
4,565
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/alg.py#L22-L45
[ "def", "wave_infochunk", "(", "path", ")", ":", "with", "open", "(", "path", ",", "'rb'", ")", "as", "file", ":", "if", "file", ".", "read", "(", "4", ")", "!=", "b\"RIFF\"", ":", "return", "None", "data_size", "=", "file", ".", "read", "(", "4", ")", "# container size", "if", "file", ".", "read", "(", "4", ")", "!=", "b\"WAVE\"", ":", "return", "None", "while", "True", ":", "chunkid", "=", "file", ".", "read", "(", "4", ")", "sizebuf", "=", "file", ".", "read", "(", "4", ")", "if", "len", "(", "sizebuf", ")", "<", "4", "or", "len", "(", "chunkid", ")", "<", "4", ":", "return", "None", "size", "=", "struct", ".", "unpack", "(", "b'<L'", ",", "sizebuf", ")", "[", "0", "]", "if", "chunkid", "[", "0", ":", "3", "]", "!=", "b\"fmt\"", ":", "if", "size", "%", "2", "==", "1", ":", "seek", "=", "size", "+", "1", "else", ":", "seek", "=", "size", "file", ".", "seek", "(", "size", ",", "1", ")", "else", ":", "return", "bytearray", "(", "b\"RIFF\"", "+", "data_size", "+", "b\"WAVE\"", "+", "chunkid", "+", "sizebuf", "+", "file", ".", "read", "(", "size", ")", ")" ]
Returns a function that removes an edge from the graph .
def remove_edge_fun ( graph ) : # Namespace shortcut for speed. rm_edge , rm_node = graph . remove_edge , graph . remove_node from networkx import is_isolate def remove_edge ( u , v ) : rm_edge ( u , v ) # Remove the edge. if is_isolate ( graph , v ) : # Check if v is isolate. rm_node ( v ) # Remove the isolate out node. return remove_edge
4,566
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/alg.py#L48-L72
[ "def", "read_data", "(", "self", ",", "blocksize", "=", "4096", ")", ":", "frames", "=", "ctypes", ".", "c_uint", "(", "blocksize", "//", "self", ".", "_client_fmt", ".", "mBytesPerFrame", ")", "buf", "=", "ctypes", ".", "create_string_buffer", "(", "blocksize", ")", "buflist", "=", "AudioBufferList", "(", ")", "buflist", ".", "mNumberBuffers", "=", "1", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mNumberChannels", "=", "self", ".", "_client_fmt", ".", "mChannelsPerFrame", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mDataByteSize", "=", "blocksize", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mData", "=", "ctypes", ".", "cast", "(", "buf", ",", "ctypes", ".", "c_void_p", ")", "while", "True", ":", "check", "(", "_coreaudio", ".", "ExtAudioFileRead", "(", "self", ".", "_obj", ",", "ctypes", ".", "byref", "(", "frames", ")", ",", "ctypes", ".", "byref", "(", "buflist", ")", ")", ")", "assert", "buflist", ".", "mNumberBuffers", "==", "1", "size", "=", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mDataByteSize", "if", "not", "size", ":", "break", "data", "=", "ctypes", ".", "cast", "(", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mData", ",", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_char", ")", ")", "blob", "=", "data", "[", ":", "size", "]", "yield", "blob" ]
Finds an unused node id in graph .
def get_unused_node_id ( graph , initial_guess = 'unknown' , _format = '{}<%d>' ) : has_node = graph . has_node # Namespace shortcut for speed. n = counter ( ) # Counter. node_id_format = _format . format ( initial_guess ) # Node id format. node_id = initial_guess # Initial guess. while has_node ( node_id ) : # Check if node id is used. node_id = node_id_format % n ( ) # Guess. return node_id
4,567
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/alg.py#L75-L105
[ "def", "wet_bulb_temperature", "(", "pressure", ",", "temperature", ",", "dewpoint", ")", ":", "if", "not", "hasattr", "(", "pressure", ",", "'shape'", ")", ":", "pressure", "=", "atleast_1d", "(", "pressure", ")", "temperature", "=", "atleast_1d", "(", "temperature", ")", "dewpoint", "=", "atleast_1d", "(", "dewpoint", ")", "it", "=", "np", ".", "nditer", "(", "[", "pressure", ",", "temperature", ",", "dewpoint", ",", "None", "]", ",", "op_dtypes", "=", "[", "'float'", ",", "'float'", ",", "'float'", ",", "'float'", "]", ",", "flags", "=", "[", "'buffered'", "]", ")", "for", "press", ",", "temp", ",", "dewp", ",", "ret", "in", "it", ":", "press", "=", "press", "*", "pressure", ".", "units", "temp", "=", "temp", "*", "temperature", ".", "units", "dewp", "=", "dewp", "*", "dewpoint", ".", "units", "lcl_pressure", ",", "lcl_temperature", "=", "lcl", "(", "press", ",", "temp", ",", "dewp", ")", "moist_adiabat_temperatures", "=", "moist_lapse", "(", "concatenate", "(", "[", "lcl_pressure", ",", "press", "]", ")", ",", "lcl_temperature", ")", "ret", "[", "...", "]", "=", "moist_adiabat_temperatures", "[", "-", "1", "]", "# If we started with a scalar, return a scalar", "if", "it", ".", "operands", "[", "3", "]", ".", "size", "==", "1", ":", "return", "it", ".", "operands", "[", "3", "]", "[", "0", "]", "*", "moist_adiabat_temperatures", ".", "units", "return", "it", ".", "operands", "[", "3", "]", "*", "moist_adiabat_temperatures", ".", "units" ]
Adds function node edges .
def add_func_edges ( dsp , fun_id , nodes_bunch , edge_weights = None , input = True , data_nodes = None ) : # Namespace shortcut for speed. add_edge = _add_edge_dmap_fun ( dsp . dmap , edge_weights ) node , add_data = dsp . dmap . nodes , dsp . add_data remove_nodes = dsp . dmap . remove_nodes_from # Define an error message. msg = 'Invalid %sput id: {} is not a data node' % [ 'out' , 'in' ] [ input ] i , j = ( 'i' , 'o' ) if input else ( 'o' , 'i' ) data_nodes = data_nodes or [ ] # Update data nodes. for u in nodes_bunch : # Iterate nodes. try : if node [ u ] [ 'type' ] != 'data' : # The node is not a data node. data_nodes . append ( fun_id ) # Add function id to be removed. remove_nodes ( data_nodes ) # Remove function and new data nodes. raise ValueError ( msg . format ( u ) ) # Raise error. except KeyError : data_nodes . append ( add_data ( data_id = u ) ) # Add new data node. add_edge ( * * { i : u , j : fun_id , 'w' : u } ) # Add edge. return data_nodes
4,568
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/alg.py#L108-L166
[ "def", "get_file_object", "(", "username", ",", "password", ",", "utc_start", "=", "None", ",", "utc_stop", "=", "None", ")", ":", "if", "not", "utc_start", ":", "utc_start", "=", "datetime", ".", "now", "(", ")", "if", "not", "utc_stop", ":", "utc_stop", "=", "utc_start", "+", "timedelta", "(", "days", "=", "1", ")", "logging", ".", "info", "(", "\"Downloading schedules for username [%s] in range [%s] to \"", "\"[%s].\"", "%", "(", "username", ",", "utc_start", ",", "utc_stop", ")", ")", "replacements", "=", "{", "'start_time'", ":", "utc_start", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%SZ'", ")", ",", "'stop_time'", ":", "utc_stop", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%SZ'", ")", "}", "soap_message_xml", "=", "(", "soap_message_xml_template", "%", "replacements", ")", "authinfo", "=", "urllib2", ".", "HTTPDigestAuthHandler", "(", ")", "authinfo", ".", "add_password", "(", "realm", ",", "url", ",", "username", ",", "password", ")", "try", ":", "request", "=", "urllib2", ".", "Request", "(", "url", ",", "soap_message_xml", ",", "request_headers", ")", "response", "=", "urllib2", ".", "build_opener", "(", "authinfo", ")", ".", "open", "(", "request", ")", "if", "response", ".", "headers", "[", "'Content-Encoding'", "]", "==", "'gzip'", ":", "response", "=", "GzipStream", "(", "response", ")", "except", ":", "logging", ".", "exception", "(", "\"Could not acquire connection to Schedules Direct.\"", ")", "raise", "return", "response" ]
Adds edge to the dispatcher map .
def _add_edge_dmap_fun ( graph , edges_weights = None ) : add = graph . add_edge # Namespace shortcut for speed. if edges_weights is not None : def add_edge ( i , o , w ) : if w in edges_weights : add ( i , o , weight = edges_weights [ w ] ) # Weighted edge. else : add ( i , o ) # Normal edge. else : # noinspection PyUnusedLocal def add_edge ( i , o , w ) : add ( i , o ) # Normal edge. return add_edge
4,569
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/alg.py#L169-L199
[ "def", "load", "(", "self", ",", "filename", "=", "None", ")", ":", "fields", "=", "[", "]", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "format_data", "=", "f", ".", "read", "(", ")", ".", "strip", "(", ")", "lines", "=", "format_data", ".", "split", "(", "'\\n'", ")", "self", ".", "_sql_version", "=", "lines", ".", "pop", "(", "0", ")", "self", ".", "_num_fields", "=", "int", "(", "lines", ".", "pop", "(", "0", ")", ")", "for", "line", "in", "lines", ":", "# Get rid of mulitple spaces", "line", "=", "re", ".", "sub", "(", "' +'", ",", "' '", ",", "line", ".", "strip", "(", ")", ")", "row_format", "=", "BCPFormatRow", "(", "line", ".", "split", "(", "' '", ")", ")", "fields", ".", "append", "(", "row_format", ")", "self", ".", "fields", "=", "fields", "self", ".", "filename", "=", "filename" ]
Returns a dispatcher node that match the given node id .
def _get_node ( nodes , node_id , fuzzy = True ) : try : return node_id , nodes [ node_id ] # Return dispatcher node and its id. except KeyError as ex : if fuzzy : it = sorted ( nodes . items ( ) ) n = next ( ( ( k , v ) for k , v in it if node_id in k ) , EMPTY ) if n is not EMPTY : return n raise ex
4,570
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/alg.py#L202-L227
[ "def", "_bqschema_to_nullsafe_dtypes", "(", "schema_fields", ")", ":", "# If you update this mapping, also update the table at", "# `docs/source/reading.rst`.", "dtype_map", "=", "{", "\"FLOAT\"", ":", "np", ".", "dtype", "(", "float", ")", ",", "# pandas doesn't support timezone-aware dtype in DataFrame/Series", "# constructors. It's more idiomatic to localize after construction.", "# https://github.com/pandas-dev/pandas/issues/25843", "\"TIMESTAMP\"", ":", "\"datetime64[ns]\"", ",", "\"TIME\"", ":", "\"datetime64[ns]\"", ",", "\"DATE\"", ":", "\"datetime64[ns]\"", ",", "\"DATETIME\"", ":", "\"datetime64[ns]\"", ",", "}", "dtypes", "=", "{", "}", "for", "field", "in", "schema_fields", ":", "name", "=", "str", "(", "field", "[", "\"name\"", "]", ")", "if", "field", "[", "\"mode\"", "]", ".", "upper", "(", ")", "==", "\"REPEATED\"", ":", "continue", "dtype", "=", "dtype_map", ".", "get", "(", "field", "[", "\"type\"", "]", ".", "upper", "(", ")", ")", "if", "dtype", ":", "dtypes", "[", "name", "]", "=", "dtype", "return", "dtypes" ]
Returns the full pipe of a dispatch run .
def get_full_pipe ( sol , base = ( ) ) : pipe , i = DspPipe ( ) , len ( base ) for p in sol . _pipe : n , s = p [ - 1 ] d = s . dsp p = { 'task' : p } if n in s . _errors : p [ 'error' ] = s . _errors [ n ] node_id = s . full_name + ( n , ) assert base == node_id [ : i ] , '%s != %s' % ( node_id [ : i ] , base ) n_id = node_id [ i : ] n , path = d . get_node ( n , node_attr = None ) if n [ 'type' ] == 'function' and 'function' in n : try : sub_sol = s . workflow . node [ path [ - 1 ] ] [ 'solution' ] sp = get_full_pipe ( sub_sol , base = node_id ) if sp : p [ 'sub_pipe' ] = sp except KeyError : pass pipe [ bypass ( * n_id ) ] = p return pipe
4,571
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/alg.py#L426-L471
[ "def", "scale_rc", "(", "self", ",", "manifest_filename", ",", "namespace", "=", "\"default\"", ",", "num_replicas", "=", "0", ")", ":", "rc_manifest", ",", "rc_manifest_json", "=", "util", ".", "load_yaml", "(", "filename", "=", "manifest_filename", ")", "logging", ".", "debug", "(", "\"%s\"", "%", "(", "rc_manifest_json", ")", ")", "rc_path", "=", "\"\"", ".", "join", "(", "[", "\"/api/v1/namespaces/\"", ",", "namespace", ",", "\"/replicationcontrollers/\"", ",", "rc_manifest", "[", "\"metadata\"", "]", "[", "\"name\"", "]", "]", ")", "rc_manifest", "[", "\"spec\"", "]", "[", "\"replicas\"", "]", "=", "num_replicas", "res", "=", "self", ".", "execute_operation", "(", "method", "=", "\"PUT\"", ",", "ops_path", "=", "rc_path", ",", "payload", "=", "util", ".", "serialize_tojson", "(", "rc_manifest", ")", ")", "try", ":", "rc_url", "=", "res", ".", "json", "(", ")", "[", "\"metadata\"", "]", "[", "\"selfLink\"", "]", "except", "KeyError", ":", "raise", "ResourceCRUDException", "(", "\"\"", ".", "join", "(", "[", "\"Sorry, can not scale the RC: \"", ",", "rc_manifest", "[", "\"metadata\"", "]", "[", "\"name\"", "]", "]", ")", ")", "logging", ".", "info", "(", "\"I scaled the RC %s at %s to %d replicas\"", "%", "(", "rc_manifest", "[", "\"metadata\"", "]", "[", "\"name\"", "]", ",", "rc_url", ",", "num_replicas", ")", ")", "return", "(", "res", ",", "rc_url", ")" ]
Parse Storm Pipe CONNECT Chunk Method
def connectChunk ( key , chunk ) : schunk = chunk [ 0 ] . strip ( ) . split ( ) result = { 'slinkNumber' : schunk [ 1 ] , 'upSjunc' : schunk [ 2 ] , 'downSjunc' : schunk [ 3 ] } return result
4,572
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/spn_chunk.py#L15-L25
[ "def", "read_yaml_config", "(", "filename", ",", "check", "=", "True", ",", "osreplace", "=", "True", ",", "exit", "=", "True", ")", ":", "location", "=", "filename", "if", "location", "is", "not", "None", ":", "location", "=", "path_expand", "(", "location", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "location", ")", "and", "not", "check", ":", "return", "None", "if", "check", "and", "os", ".", "path", ".", "exists", "(", "location", ")", ":", "# test for tab in yaml file", "if", "check_file_for_tabs", "(", "location", ")", ":", "log", ".", "error", "(", "\"The file {0} contains tabs. yaml \"", "\"Files are not allowed to contain tabs\"", ".", "format", "(", "location", ")", ")", "sys", ".", "exit", "(", ")", "result", "=", "None", "try", ":", "if", "osreplace", ":", "result", "=", "open", "(", "location", ",", "'r'", ")", ".", "read", "(", ")", "t", "=", "Template", "(", "result", ")", "result", "=", "t", ".", "substitute", "(", "os", ".", "environ", ")", "# data = yaml.safe_load(result)", "data", "=", "ordered_load", "(", "result", ",", "yaml", ".", "SafeLoader", ")", "else", ":", "f", "=", "open", "(", "location", ",", "\"r\"", ")", "# data = yaml.safe_load(f)", "data", "=", "ordered_load", "(", "result", ",", "yaml", ".", "SafeLoader", ")", "f", ".", "close", "(", ")", "return", "data", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "\"The file {0} fails with a yaml read error\"", ".", "format", "(", "filename", ")", ")", "Error", ".", "traceback", "(", "e", ")", "sys", ".", "exit", "(", ")", "else", ":", "log", ".", "error", "(", "\"The file {0} does not exist.\"", ".", "format", "(", "filename", ")", ")", "if", "exit", ":", "sys", ".", "exit", "(", ")", "return", "None" ]
Fetch database for items matching .
def get_items ( self , page = 1 , order_by = None , filters = None ) : start = ( page - 1 ) * self . per_page query = self . get_query ( ) if order_by is not None : query = query . order_by ( self . _get_field ( order_by ) ) if filters is not None : query = self . _filter ( query , filters ) return query . offset ( start ) . limit ( self . per_page ) , self . count ( query )
4,573
https://github.com/hellupline/flask-manager/blob/70e48309f73aacf55f5c37b43165791ae1cf6861/flask_manager/ext/sqlalchemy.py#L160-L184
[ "def", "external_metadata", "(", "self", ",", "datasource_type", "=", "None", ",", "datasource_id", "=", "None", ")", ":", "if", "datasource_type", "==", "'druid'", ":", "datasource", "=", "ConnectorRegistry", ".", "get_datasource", "(", "datasource_type", ",", "datasource_id", ",", "db", ".", "session", ")", "elif", "datasource_type", "==", "'table'", ":", "database", "=", "(", "db", ".", "session", ".", "query", "(", "Database", ")", ".", "filter_by", "(", "id", "=", "request", ".", "args", ".", "get", "(", "'db_id'", ")", ")", ".", "one", "(", ")", ")", "Table", "=", "ConnectorRegistry", ".", "sources", "[", "'table'", "]", "datasource", "=", "Table", "(", "database", "=", "database", ",", "table_name", "=", "request", ".", "args", ".", "get", "(", "'table_name'", ")", ",", "schema", "=", "request", ".", "args", ".", "get", "(", "'schema'", ")", "or", "None", ",", ")", "external_metadata", "=", "datasource", ".", "external_metadata", "(", ")", "return", "self", ".", "json_response", "(", "external_metadata", ")" ]
Generic File Read from File Method
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) : # Persist name and extension of file self . name = name self . fileExtension = extension # Open file and parse into a data structure with open ( path , 'r' ) as f : self . text = f . read ( )
4,574
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/generic.py#L55-L65
[ "def", "syzygyJD", "(", "jd", ")", ":", "sun", "=", "swe", ".", "sweObjectLon", "(", "const", ".", "SUN", ",", "jd", ")", "moon", "=", "swe", ".", "sweObjectLon", "(", "const", ".", "MOON", ",", "jd", ")", "dist", "=", "angle", ".", "distance", "(", "sun", ",", "moon", ")", "# Offset represents the Syzygy type. ", "# Zero is conjunction and 180 is opposition.", "offset", "=", "180", "if", "(", "dist", ">=", "180", ")", "else", "0", "while", "abs", "(", "dist", ")", ">", "MAX_ERROR", ":", "jd", "=", "jd", "-", "dist", "/", "13.1833", "# Moon mean daily motion", "sun", "=", "swe", ".", "sweObjectLon", "(", "const", ".", "SUN", ",", "jd", ")", "moon", "=", "swe", ".", "sweObjectLon", "(", "const", ".", "MOON", ",", "jd", ")", "dist", "=", "angle", ".", "closestdistance", "(", "sun", "-", "offset", ",", "moon", ")", "return", "jd" ]
r Return True if the set has no elements in common with other .
def isdisjoint ( self , other ) : if isinstance ( other , _sequence_types + ( BaseMultiset , ) ) : pass elif not isinstance ( other , Container ) : other = self . _as_multiset ( other ) return all ( element not in other for element in self . _elements . keys ( ) )
4,575
https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L148-L167
[ "def", "lstm_posterior_builder", "(", "getter", ",", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "del", "args", "parameter_shapes", "=", "tfp", ".", "distributions", ".", "Normal", ".", "param_static_shapes", "(", "kwargs", "[", "\"shape\"", "]", ")", "# The standard deviation of the scale mixture prior.", "prior_stddev", "=", "np", ".", "sqrt", "(", "FLAGS", ".", "prior_pi", "*", "np", ".", "square", "(", "FLAGS", ".", "prior_sigma1", ")", "+", "(", "1", "-", "FLAGS", ".", "prior_pi", ")", "*", "np", ".", "square", "(", "FLAGS", ".", "prior_sigma2", ")", ")", "loc_var", "=", "getter", "(", "\"{}/posterior_loc\"", ".", "format", "(", "name", ")", ",", "shape", "=", "parameter_shapes", "[", "\"loc\"", "]", ",", "initializer", "=", "kwargs", ".", "get", "(", "\"initializer\"", ")", ",", "dtype", "=", "tf", ".", "float32", ")", "scale_var", "=", "getter", "(", "\"{}/posterior_scale\"", ".", "format", "(", "name", ")", ",", "initializer", "=", "tf", ".", "random_uniform", "(", "minval", "=", "np", ".", "log", "(", "np", ".", "exp", "(", "prior_stddev", "/", "4.0", ")", "-", "1.0", ")", ",", "maxval", "=", "np", ".", "log", "(", "np", ".", "exp", "(", "prior_stddev", "/", "2.0", ")", "-", "1.0", ")", ",", "dtype", "=", "tf", ".", "float32", ",", "shape", "=", "parameter_shapes", "[", "\"scale\"", "]", ")", ")", "return", "tfp", ".", "distributions", ".", "Normal", "(", "loc", "=", "loc_var", ",", "scale", "=", "tf", ".", "nn", ".", "softplus", "(", "scale_var", ")", "+", "1e-5", ",", "name", "=", "\"{}/posterior_dist\"", ".", "format", "(", "name", ")", ")" ]
r Return a new multiset with all elements from the others removed .
def difference ( self , * others ) : result = self . __copy__ ( ) _elements = result . _elements _total = result . _total for other in map ( self . _as_multiset , others ) : for element , multiplicity in other . items ( ) : if element in _elements : old_multiplicity = _elements [ element ] new_multiplicity = old_multiplicity - multiplicity if new_multiplicity > 0 : _elements [ element ] = new_multiplicity _total -= multiplicity else : del _elements [ element ] _total -= old_multiplicity result . _total = _total return result
4,576
https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L169-L208
[ "def", "harvest_fundref", "(", "source", "=", "None", ")", ":", "loader", "=", "LocalFundRefLoader", "(", "source", "=", "source", ")", "if", "source", "else", "RemoteFundRefLoader", "(", ")", "for", "funder_json", "in", "loader", ".", "iter_funders", "(", ")", ":", "register_funder", ".", "delay", "(", "funder_json", ")" ]
r Return a new multiset with all elements from the multiset and the others with maximal multiplicities .
def union ( self , * others ) : result = self . __copy__ ( ) _elements = result . _elements _total = result . _total for other in map ( self . _as_mapping , others ) : for element , multiplicity in other . items ( ) : old_multiplicity = _elements . get ( element , 0 ) if multiplicity > old_multiplicity : _elements [ element ] = multiplicity _total += multiplicity - old_multiplicity result . _total = _total return result
4,577
https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L222-L256
[ "def", "isTrackedDeviceConnected", "(", "self", ",", "unDeviceIndex", ")", ":", "fn", "=", "self", ".", "function_table", ".", "isTrackedDeviceConnected", "result", "=", "fn", "(", "unDeviceIndex", ")", "return", "result" ]
r Return a new multiset with elements common to the multiset and all others .
def intersection ( self , * others ) : result = self . __copy__ ( ) _elements = result . _elements _total = result . _total for other in map ( self . _as_mapping , others ) : for element , multiplicity in list ( _elements . items ( ) ) : new_multiplicity = other . get ( element , 0 ) if new_multiplicity < multiplicity : if new_multiplicity > 0 : _elements [ element ] = new_multiplicity _total -= multiplicity - new_multiplicity else : del _elements [ element ] _total -= multiplicity result . _total = _total return result
4,578
https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L316-L354
[ "def", "get_expr_summ_id", "(", "self", ",", "experiment_id", ",", "time_slide_id", ",", "veto_def_name", ",", "datatype", ",", "sim_proc_id", "=", "None", ")", ":", "# look for the ID", "for", "row", "in", "self", ":", "if", "(", "row", ".", "experiment_id", ",", "row", ".", "time_slide_id", ",", "row", ".", "veto_def_name", ",", "row", ".", "datatype", ",", "row", ".", "sim_proc_id", ")", "==", "(", "experiment_id", ",", "time_slide_id", ",", "veto_def_name", ",", "datatype", ",", "sim_proc_id", ")", ":", "# found it", "return", "row", ".", "experiment_summ_id", "# if get to here, experiment not found in table", "return", "None" ]
r Return a new set with elements in either the set or other but not both .
def symmetric_difference ( self , other ) : other = self . _as_multiset ( other ) result = self . __class__ ( ) _total = 0 _elements = result . _elements self_elements = self . _elements other_elements = other . _elements dist_elements = set ( self_elements . keys ( ) ) | set ( other_elements . keys ( ) ) for element in dist_elements : multiplicity = self_elements . get ( element , 0 ) other_multiplicity = other_elements . get ( element , 0 ) new_multiplicity = ( multiplicity - other_multiplicity if multiplicity > other_multiplicity else other_multiplicity - multiplicity ) _total += new_multiplicity if new_multiplicity > 0 : _elements [ element ] = new_multiplicity result . _total = _total return result
4,579
https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L365-L405
[ "def", "calibrate_percentile_ranks", "(", "allele", ",", "predictor", ",", "peptides", "=", "None", ")", ":", "global", "GLOBAL_DATA", "if", "peptides", "is", "None", ":", "peptides", "=", "GLOBAL_DATA", "[", "\"calibration_peptides\"", "]", "predictor", ".", "calibrate_percentile_ranks", "(", "peptides", "=", "peptides", ",", "alleles", "=", "[", "allele", "]", ")", "return", "{", "allele", ":", "predictor", ".", "allele_to_percent_rank_transform", "[", "allele", "]", ",", "}" ]
Return a new set with each element s multiplicity multiplied with the given scalar factor .
def times ( self , factor ) : if factor == 0 : return self . __class__ ( ) if factor < 0 : raise ValueError ( 'The factor must no be negative.' ) result = self . __copy__ ( ) _elements = result . _elements for element in _elements : _elements [ element ] *= factor result . _total *= factor return result
4,580
https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L416-L443
[ "def", "_should_ignore", "(", "self", ",", "name", ")", ":", "_name", "=", "name", ".", "lower", "(", ")", "return", "(", "_name", ".", "startswith", "(", "\"deprecated\"", ")", "or", "_name", ".", "startswith", "(", "\"_\"", ")", "or", "_name", "in", "(", "\"remote\"", ",", "\"reserved\"", ",", "\"dialogs_py\"", ",", "\"dialogs_ipy\"", ",", "\"dialogs_jy\"", ")", ")" ]
r Update the multiset adding elements from all others using the maximum multiplicity .
def union_update ( self , * others ) : _elements = self . _elements _total = self . _total for other in map ( self . _as_mapping , others ) : for element , multiplicity in other . items ( ) : old_multiplicity = _elements . get ( element , 0 ) if multiplicity > old_multiplicity : _elements [ element ] = multiplicity _total += multiplicity - old_multiplicity self . _total = _total
4,581
https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L719-L750
[ "def", "_generate_examples_validation", "(", "self", ",", "archive", ",", "labels", ")", ":", "# Get the current random seeds.", "numpy_st0", "=", "np", ".", "random", ".", "get_state", "(", ")", "# Set new random seeds.", "np", ".", "random", ".", "seed", "(", "135", ")", "logging", ".", "warning", "(", "'Overwriting cv2 RNG seed.'", ")", "tfds", ".", "core", ".", "lazy_imports", ".", "cv2", ".", "setRNGSeed", "(", "357", ")", "for", "example", "in", "super", "(", "Imagenet2012Corrupted", ",", "self", ")", ".", "_generate_examples_validation", "(", "archive", ",", "labels", ")", ":", "with", "tf", ".", "Graph", "(", ")", ".", "as_default", "(", ")", ":", "tf_img", "=", "tf", ".", "image", ".", "decode_jpeg", "(", "example", "[", "'image'", "]", ".", "read", "(", ")", ",", "channels", "=", "3", ")", "image_np", "=", "tfds", ".", "as_numpy", "(", "tf_img", ")", "example", "[", "'image'", "]", "=", "self", ".", "_get_corrupted_example", "(", "image_np", ")", "yield", "example", "# Reset the seeds back to their original values.", "np", ".", "random", ".", "set_state", "(", "numpy_st0", ")" ]
r Update the multiset keeping only elements found in it and all others .
def intersection_update ( self , * others ) : for other in map ( self . _as_mapping , others ) : for element , current_count in list ( self . items ( ) ) : multiplicity = other . get ( element , 0 ) if multiplicity < current_count : self [ element ] = multiplicity
4,582
https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L760-L787
[ "def", "start_service", "(", "name", ",", "argv", "=", "None", ")", ":", "with", "win32", ".", "OpenSCManager", "(", "dwDesiredAccess", "=", "win32", ".", "SC_MANAGER_CONNECT", ")", "as", "hSCManager", ":", "with", "win32", ".", "OpenService", "(", "hSCManager", ",", "name", ",", "dwDesiredAccess", "=", "win32", ".", "SERVICE_START", ")", "as", "hService", ":", "win32", ".", "StartService", "(", "hService", ")" ]
r Remove all elements contained the others from this multiset .
def difference_update ( self , * others ) : for other in map ( self . _as_multiset , others ) : for element , multiplicity in other . items ( ) : self . discard ( element , multiplicity )
4,583
https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L797-L822
[ "def", "cublasGetVersion", "(", "handle", ")", ":", "version", "=", "ctypes", ".", "c_int", "(", ")", "status", "=", "_libcublas", ".", "cublasGetVersion_v2", "(", "handle", ",", "ctypes", ".", "byref", "(", "version", ")", ")", "cublasCheckStatus", "(", "status", ")", "return", "version", ".", "value" ]
r Update the multiset to contain only elements in either this multiset or the other but not both .
def symmetric_difference_update ( self , other ) : other = self . _as_multiset ( other ) elements = set ( self . distinct_elements ( ) ) | set ( other . distinct_elements ( ) ) for element in elements : multiplicity = self [ element ] other_count = other [ element ] self [ element ] = ( multiplicity - other_count if multiplicity > other_count else other_count - multiplicity )
4,584
https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L832-L860
[ "def", "cublasGetVersion", "(", "handle", ")", ":", "version", "=", "ctypes", ".", "c_int", "(", ")", "status", "=", "_libcublas", ".", "cublasGetVersion_v2", "(", "handle", ",", "ctypes", ".", "byref", "(", "version", ")", ")", "cublasCheckStatus", "(", "status", ")", "return", "version", ".", "value" ]
Update each this multiset by multiplying each element s multiplicity with the given scalar factor .
def times_update ( self , factor ) : if factor < 0 : raise ValueError ( "The factor must not be negative." ) elif factor == 0 : self . clear ( ) else : _elements = self . _elements for element in _elements : _elements [ element ] *= factor self . _total *= factor
4,585
https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L870-L899
[ "def", "_should_ignore", "(", "self", ",", "name", ")", ":", "_name", "=", "name", ".", "lower", "(", ")", "return", "(", "_name", ".", "startswith", "(", "\"deprecated\"", ")", "or", "_name", ".", "startswith", "(", "\"_\"", ")", "or", "_name", "in", "(", "\"remote\"", ",", "\"reserved\"", ",", "\"dialogs_py\"", ",", "\"dialogs_ipy\"", ",", "\"dialogs_jy\"", ")", ")" ]
Adds an element to the multiset .
def add ( self , element , multiplicity = 1 ) : if multiplicity < 1 : raise ValueError ( "Multiplicity must be positive" ) self . _elements [ element ] += multiplicity self . _total += multiplicity
4,586
https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L907-L932
[ "def", "getShocks", "(", "self", ")", ":", "PersistentShockConsumerType", ".", "getShocks", "(", "self", ")", "# Get permanent and transitory income shocks", "MedShkNow", "=", "np", ".", "zeros", "(", "self", ".", "AgentCount", ")", "# Initialize medical shock array", "MedPriceNow", "=", "np", ".", "zeros", "(", "self", ".", "AgentCount", ")", "# Initialize relative price array", "for", "t", "in", "range", "(", "self", ".", "T_cycle", ")", ":", "these", "=", "t", "==", "self", ".", "t_cycle", "N", "=", "np", ".", "sum", "(", "these", ")", "if", "N", ">", "0", ":", "MedShkAvg", "=", "self", ".", "MedShkAvg", "[", "t", "]", "MedShkStd", "=", "self", ".", "MedShkStd", "[", "t", "]", "MedPrice", "=", "self", ".", "MedPrice", "[", "t", "]", "MedShkNow", "[", "these", "]", "=", "self", ".", "RNG", ".", "permutation", "(", "approxLognormal", "(", "N", ",", "mu", "=", "np", ".", "log", "(", "MedShkAvg", ")", "-", "0.5", "*", "MedShkStd", "**", "2", ",", "sigma", "=", "MedShkStd", ")", "[", "1", "]", ")", "MedPriceNow", "[", "these", "]", "=", "MedPrice", "self", ".", "MedShkNow", "=", "MedShkNow", "self", ".", "MedPriceNow", "=", "MedPriceNow" ]
Removes an element from the multiset .
def remove ( self , element , multiplicity = None ) : _elements = self . _elements if element not in _elements : raise KeyError old_multiplicity = _elements . get ( element , 0 ) if multiplicity is None or multiplicity >= old_multiplicity : del _elements [ element ] self . _total -= old_multiplicity elif multiplicity < 0 : raise ValueError ( "Multiplicity must be not be negative" ) elif multiplicity > 0 : _elements [ element ] -= multiplicity self . _total -= multiplicity return old_multiplicity
4,587
https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L934-L987
[ "def", "getMetastable", "(", "rates", ",", "ver", ":", "np", ".", "ndarray", ",", "lamb", ",", "br", ",", "reactfn", ":", "Path", ")", ":", "with", "h5py", ".", "File", "(", "reactfn", ",", "'r'", ")", "as", "f", ":", "A", "=", "f", "[", "'/metastable/A'", "]", "[", ":", "]", "lambnew", "=", "f", "[", "'/metastable/lambda'", "]", ".", "value", ".", "ravel", "(", "order", "=", "'F'", ")", "# some are not 1-D!", "vnew", "=", "np", ".", "concatenate", "(", "(", "A", "[", ":", "2", "]", "*", "rates", ".", "loc", "[", "...", ",", "'no1s'", "]", ".", "values", "[", ":", ",", "None", "]", ",", "A", "[", "2", ":", "4", "]", "*", "rates", ".", "loc", "[", "...", ",", "'no1d'", "]", ".", "values", "[", ":", ",", "None", "]", ",", "A", "[", "4", ":", "]", "*", "rates", ".", "loc", "[", "...", ",", "'noii2p'", "]", ".", "values", "[", ":", ",", "None", "]", ")", ",", "axis", "=", "-", "1", ")", "assert", "vnew", ".", "shape", "==", "(", "rates", ".", "shape", "[", "0", "]", ",", "A", ".", "size", ")", "return", "catvl", "(", "rates", ".", "alt_km", ",", "ver", ",", "vnew", ",", "lamb", ",", "lambnew", ",", "br", ")" ]
Removes the element from the multiset .
def discard ( self , element , multiplicity = None ) : _elements = self . _elements if element in _elements : old_multiplicity = _elements [ element ] if multiplicity is None or multiplicity >= old_multiplicity : del _elements [ element ] self . _total -= old_multiplicity elif multiplicity < 0 : raise ValueError ( "Multiplicity must not be negative" ) elif multiplicity > 0 : _elements [ element ] -= multiplicity self . _total -= multiplicity return old_multiplicity else : return 0
4,588
https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L989-L1048
[ "def", "getMetastable", "(", "rates", ",", "ver", ":", "np", ".", "ndarray", ",", "lamb", ",", "br", ",", "reactfn", ":", "Path", ")", ":", "with", "h5py", ".", "File", "(", "reactfn", ",", "'r'", ")", "as", "f", ":", "A", "=", "f", "[", "'/metastable/A'", "]", "[", ":", "]", "lambnew", "=", "f", "[", "'/metastable/lambda'", "]", ".", "value", ".", "ravel", "(", "order", "=", "'F'", ")", "# some are not 1-D!", "vnew", "=", "np", ".", "concatenate", "(", "(", "A", "[", ":", "2", "]", "*", "rates", ".", "loc", "[", "...", ",", "'no1s'", "]", ".", "values", "[", ":", ",", "None", "]", ",", "A", "[", "2", ":", "4", "]", "*", "rates", ".", "loc", "[", "...", ",", "'no1d'", "]", ".", "values", "[", ":", ",", "None", "]", ",", "A", "[", "4", ":", "]", "*", "rates", ".", "loc", "[", "...", ",", "'noii2p'", "]", ".", "values", "[", ":", ",", "None", "]", ")", ",", "axis", "=", "-", "1", ")", "assert", "vnew", ".", "shape", "==", "(", "rates", ".", "shape", "[", "0", "]", ",", "A", ".", "size", ")", "return", "catvl", "(", "rates", ".", "alt_km", ",", "ver", ",", "vnew", ",", "lamb", ",", "lambnew", ",", "br", ")" ]
Clean - up the resources of all initialized executors .
def shutdown_executors ( wait = True ) : return { k : shutdown_executor ( k , wait ) for k in list ( _EXECUTORS . keys ( ) ) }
4,589
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/asy.py#L88-L102
[ "def", "get_or_generate_txt_vocab", "(", "data_dir", ",", "vocab_filename", ",", "vocab_size", ",", "filepatterns", ")", ":", "if", "isinstance", "(", "filepatterns", ",", "str", ")", ":", "filepatterns", "=", "[", "filepatterns", "]", "def", "generate", "(", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"Generating vocab from %s\"", ",", "filepatterns", ")", "for", "filepattern", "in", "filepatterns", ":", "for", "filename", "in", "tf", ".", "gfile", ".", "Glob", "(", "filepattern", ")", ":", "with", "tf", ".", "gfile", ".", "GFile", "(", "filename", ",", "mode", "=", "\"r\"", ")", "as", "source_file", ":", "for", "line", "in", "source_file", ":", "yield", "line", ".", "strip", "(", ")", "return", "get_or_generate_vocab_inner", "(", "data_dir", ",", "vocab_filename", ",", "vocab_size", ",", "generate", "(", ")", ")" ]
Execute sol . _evaluate_node in an asynchronous thread .
def async_thread ( sol , args , node_attr , node_id , * a , * * kw ) : executor = _get_executor ( _executor_name ( kw . get ( 'executor' , False ) , sol . dsp ) ) if not executor : return sol . _evaluate_node ( args , node_attr , node_id , * a , * * kw ) futures = args if node_attr [ 'type' ] == 'data' and ( node_attr [ 'wait_inputs' ] or 'function' in node_attr ) : futures = args [ 0 ] . values ( ) from concurrent . futures import Future futures = { v for v in futures if isinstance ( v , Future ) } def _submit ( ) : return executor . thread ( _async_eval , sol , args , node_attr , node_id , * a , * * kw ) if futures : # Chain results. result = Future ( ) def _set_res ( fut ) : try : result . set_result ( fut . result ( ) ) except BaseException as ex : result . set_exception ( ex ) def _submit_task ( fut = None ) : futures . discard ( fut ) not futures and _submit ( ) . add_done_callback ( _set_res ) for f in list ( futures ) : f . add_done_callback ( _submit_task ) else : result = _submit ( ) timeout = node_attr . get ( 'await_result' , False ) if timeout is not False : return _await_result ( result , timeout , sol , node_id ) n = len ( node_attr . get ( 'outputs' , [ ] ) ) return AsyncList ( future = result , n = n ) if n > 1 else result
4,590
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/asy.py#L206-L277
[ "def", "delete_classifier", "(", "self", ",", "classifier_id", ",", "*", "*", "kwargs", ")", ":", "if", "classifier_id", "is", "None", ":", "raise", "ValueError", "(", "'classifier_id must be provided'", ")", "headers", "=", "{", "}", "if", "'headers'", "in", "kwargs", ":", "headers", ".", "update", "(", "kwargs", ".", "get", "(", "'headers'", ")", ")", "sdk_headers", "=", "get_sdk_headers", "(", "'watson_vision_combined'", ",", "'V3'", ",", "'delete_classifier'", ")", "headers", ".", "update", "(", "sdk_headers", ")", "params", "=", "{", "'version'", ":", "self", ".", "version", "}", "url", "=", "'/v3/classifiers/{0}'", ".", "format", "(", "*", "self", ".", "_encode_path_vars", "(", "classifier_id", ")", ")", "response", "=", "self", ".", "request", "(", "method", "=", "'DELETE'", ",", "url", "=", "url", ",", "headers", "=", "headers", ",", "params", "=", "params", ",", "accept_json", "=", "True", ")", "return", "response" ]
Return the result of a Future object .
def await_result ( obj , timeout = None ) : from concurrent . futures import Future return obj . result ( timeout ) if isinstance ( obj , Future ) else obj
4,591
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/asy.py#L440-L466
[ "def", "_validate_version", "(", "connection", ",", "dsn", ")", ":", "try", ":", "version", "=", "get_stored_version", "(", "connection", ")", "except", "VersionIsNotStored", ":", "logger", ".", "debug", "(", "'Version not stored in the db: assuming new database creation.'", ")", "version", "=", "SCHEMA_VERSION", "_update_version", "(", "connection", ",", "version", ")", "assert", "isinstance", "(", "version", ",", "int", ")", "if", "version", ">", "10", "and", "version", "<", "100", ":", "raise", "DatabaseError", "(", "'You are trying to open an old SQLite database.'", ")", "if", "_migration_required", "(", "connection", ")", ":", "migrate", "(", "connection", ",", "dsn", ")" ]
Creates a cross - tab or pivot table from a normalised input table . Use this function to denormalize a table of normalized records .
def pivot ( table , left , top , value ) : rs = { } ysort = [ ] xsort = [ ] for row in table : yaxis = tuple ( [ row [ c ] for c in left ] ) # e.g. yaxis = ('Simon',) if yaxis not in ysort : ysort . append ( yaxis ) xaxis = tuple ( [ row [ c ] for c in top ] ) # e.g. xaxis = ('2004',) if xaxis not in xsort : xsort . append ( xaxis ) try : rs [ yaxis ] except KeyError : rs [ yaxis ] = { } if xaxis not in rs [ yaxis ] : rs [ yaxis ] [ xaxis ] = 0 rs [ yaxis ] [ xaxis ] += row [ value ] """ In the following loop we take care of missing data, e.g 'Eric' has a value in 2004 but not in 2005 """ for key in rs : if len ( rs [ key ] ) - len ( xsort ) : for var in xsort : if var not in rs [ key ] . keys ( ) : rs [ key ] [ var ] = '' headings = list ( left ) headings . extend ( xsort ) t = [ ] """ The lists 'sortedkeys' and 'sortedvalues' make sure that even if the field 'top' is unordered, data will be transposed correctly. E.g. in the example above the table rows are not ordered by the year """ for left in ysort : row = list ( left ) sortedkeys = sorted ( rs [ left ] . keys ( ) ) sortedvalues = map ( rs [ left ] . get , sortedkeys ) row . extend ( sortedvalues ) t . append ( dict ( zip ( headings , row ) ) ) return t
4,592
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/pivot.py#L14-L94
[ "def", "copy", "(", "self", ")", ":", "p", "=", "Project", "(", ")", "p", ".", "name", "=", "self", ".", "name", "p", ".", "path", "=", "self", ".", "path", "p", ".", "_plugin", "=", "self", ".", "_plugin", "p", ".", "stage", "=", "self", ".", "stage", ".", "copy", "(", ")", "p", ".", "stage", ".", "project", "=", "p", "for", "sprite", "in", "self", ".", "sprites", ":", "s", "=", "sprite", ".", "copy", "(", ")", "s", ".", "project", "=", "p", "p", ".", "sprites", ".", "append", "(", "s", ")", "for", "actor", "in", "self", ".", "actors", ":", "if", "isinstance", "(", "actor", ",", "Sprite", ")", ":", "p", ".", "actors", ".", "append", "(", "p", ".", "get_sprite", "(", "actor", ".", "name", ")", ")", "else", ":", "a", "=", "actor", ".", "copy", "(", ")", "if", "isinstance", "(", "a", ",", "Watcher", ")", ":", "if", "isinstance", "(", "a", ".", "target", ",", "Project", ")", ":", "a", ".", "target", "=", "p", "elif", "isinstance", "(", "a", ".", "target", ",", "Stage", ")", ":", "a", ".", "target", "=", "p", ".", "stage", "else", ":", "a", ".", "target", "=", "p", ".", "get_sprite", "(", "a", ".", "target", ".", "name", ")", "p", ".", "actors", ".", "append", "(", "a", ")", "p", ".", "variables", "=", "dict", "(", "(", "n", ",", "v", ".", "copy", "(", ")", ")", "for", "(", "n", ",", "v", ")", "in", "self", ".", "variables", ".", "items", "(", ")", ")", "p", ".", "lists", "=", "dict", "(", "(", "n", ",", "l", ".", "copy", "(", ")", ")", "for", "(", "n", ",", "l", ")", "in", "self", ".", "lists", ".", "items", "(", ")", ")", "p", ".", "thumbnail", "=", "self", ".", "thumbnail", "p", ".", "tempo", "=", "self", ".", "tempo", "p", ".", "notes", "=", "self", ".", "notes", "p", ".", "author", "=", "self", ".", "author", "return", "p" ]
Function to download HRRR data for GSSHA
def download_hrrr_for_gssha ( main_directory , forecast_start_date_string , #EX. '20160913' forecast_start_hour_string , #EX. '00' to '23' leftlon = - 180 , rightlon = 180 , toplat = 90 , bottomlat = - 90 ) : out_directory = path . join ( main_directory , forecast_start_date_string ) try : mkdir ( out_directory ) except OSError : pass forecast_timestep_hour_string_array = [ '00' , '01' , '02' , '03' , '04' , '05' , '06' , '07' , '08' , '09' , '10' , '11' , '12' , '13' , '14' , '15' , '16' , '17' , '18' ] downloaded_file_list = [ ] for forecast_timestep_hour_string in forecast_timestep_hour_string_array : file_name = 'hrrr.t{0}z.wrfsfcf{1}.grib2' . format ( forecast_start_hour_string , forecast_timestep_hour_string ) payload = { 'file' : file_name , 'lev_10_m_above_ground' : 'on' , 'lev_2_m_above_ground' : 'on' , 'lev_entire_atmosphere' : 'on' , 'lev_surface' : 'on' , 'var_DSWRF' : 'on' , 'var_PRATE' : 'on' , 'var_PRES' : 'on' , 'var_RH' : 'on' , 'var_TMP' : 'on' , 'var_UGRD' : 'on' , 'var_VGRD' : 'on' , 'var_TCDC' : 'on' , 'subregion' : '' , 'leftlon' : str ( leftlon ) , 'rightlon' : str ( rightlon ) , 'toplat' : str ( toplat ) , 'bottomlat' : str ( bottomlat ) , 'dir' : '/hrrr.{0}' . format ( forecast_start_date_string ) , } r = requests . get ( 'http://nomads.ncep.noaa.gov/cgi-bin/filter_hrrr_2d.pl' , params = payload , stream = True ) if r . status_code == requests . codes . ok : out_file = path . join ( out_directory , file_name ) downloaded_file_list . append ( out_file ) with open ( out_file , 'wb' ) as fd : for chunk in r . iter_content ( chunk_size = 1024 ) : fd . write ( chunk ) else : log . error ( "Problem downloading {0}" . format ( file_name ) ) for filename in downloaded_file_list : try : remove ( filename ) except OSError : pass downloaded_file_list = [ ] break return downloaded_file_list
4,593
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/grid/hrrr_to_gssha.py#L24-L114
[ "def", "comment", "(", "self", ",", "text", ",", "comment_prefix", "=", "'#'", ")", ":", "comment", "=", "Comment", "(", "self", ".", "_container", ")", "if", "not", "text", ".", "startswith", "(", "comment_prefix", ")", ":", "text", "=", "\"{} {}\"", ".", "format", "(", "comment_prefix", ",", "text", ")", "if", "not", "text", ".", "endswith", "(", "'\\n'", ")", ":", "text", "=", "\"{}{}\"", ".", "format", "(", "text", ",", "'\\n'", ")", "comment", ".", "add_line", "(", "text", ")", "self", ".", "_container", ".", "structure", ".", "insert", "(", "self", ".", "_idx", ",", "comment", ")", "self", ".", "_idx", "+=", "1", "return", "self" ]
Patch the current RAML ResourceNode by the resource with the correct method if it exists
def _patch_resource ( self , method ) : resource = self . client . get_resource ( "" , self . resource . path , method ) if not resource : raise UnsupportedResourceMethodError ( self . resource . path , method ) self . resource = resource
4,594
https://github.com/timofurrer/ramlient/blob/e93092252635a6b3b0aca2c390b9f820368b791c/ramlient/core.py#L61-L77
[ "def", "_CompressHistogram", "(", "self", ",", "histo_ev", ")", ":", "return", "CompressedHistogramEvent", "(", "histo_ev", ".", "wall_time", ",", "histo_ev", ".", "step", ",", "compressor", ".", "compress_histogram_proto", "(", "histo_ev", ".", "histogram_value", ",", "self", ".", "_compression_bps", ")", ")" ]
Parse RAML file
def parse_raml ( self ) : if utils . is_url ( self . ramlfile ) : raml = utils . download_file ( self . ramlfile ) else : with codecs . open ( self . ramlfile , "rb" , encoding = "utf-8" ) as raml_f : raml = raml_f . read ( ) loader = ramlfications . loads ( raml ) config = ramlfications . setup_config ( self . ramlconfig ) self . raml = ramlfications . parse_raml ( loader , config )
4,595
https://github.com/timofurrer/ramlient/blob/e93092252635a6b3b0aca2c390b9f820368b791c/ramlient/core.py#L112-L124
[ "def", "recv", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "timeout", ":", "try", ":", "testsock", "=", "self", ".", "_zmq", ".", "select", "(", "[", "self", ".", "socket", "]", ",", "[", "]", ",", "[", "]", ",", "timeout", ")", "[", "0", "]", "except", "zmq", ".", "ZMQError", "as", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "EINTR", ":", "testsock", "=", "None", "else", ":", "raise", "if", "not", "testsock", ":", "return", "rv", "=", "self", ".", "socket", ".", "recv", "(", "self", ".", "_zmq", ".", "NOBLOCK", ")", "return", "LogRecord", ".", "from_dict", "(", "json", ".", "loads", "(", "rv", ")", ")", "else", ":", "return", "super", "(", "ZeroMQPullSubscriber", ",", "self", ")", ".", "recv", "(", "timeout", ")" ]
Gets a resource by it s path and optional by it s method
def get_resource ( self , base_resource_path , resource_path , method = None ) : basic_path = base_resource_path + resource_path dynamic_path = base_resource_path + "{" + resource_path + "}" for resource in self . raml . resources : method_matched = method is None or resource . method == method if method_matched and ( resource . path == basic_path or resource . path == basic_path + '/' ) : return resource if resource . path == dynamic_path and method_matched : return NodeParameter ( resource = resource , parameter = resource_path ) return None
4,596
https://github.com/timofurrer/ramlient/blob/e93092252635a6b3b0aca2c390b9f820368b791c/ramlient/core.py#L140-L163
[ "def", "team_stats", "(", "game_id", ")", ":", "# get data from data module", "box_score", "=", "mlbgame", ".", "data", ".", "get_box_score", "(", "game_id", ")", "raw_box_score", "=", "mlbgame", ".", "data", ".", "get_raw_box_score", "(", "game_id", ")", "# parse XML", "box_score_tree", "=", "etree", ".", "parse", "(", "box_score", ")", ".", "getroot", "(", ")", "raw_box_score_tree", "=", "etree", ".", "parse", "(", "raw_box_score", ")", ".", "getroot", "(", ")", "# get pitching and batting ingo", "pitching", "=", "box_score_tree", ".", "findall", "(", "'pitching'", ")", "batting", "=", "box_score_tree", ".", "findall", "(", "'batting'", ")", "# dictionary for output", "output", "=", "{", "}", "output", "=", "__team_stats_info", "(", "pitching", ",", "output", ",", "'pitching'", ")", "output", "=", "__team_stats_info", "(", "batting", ",", "output", ",", "'batting'", ")", "output", "=", "__raw_team_stats_info", "(", "raw_box_score_tree", ",", "output", ")", "return", "output" ]
Like self . auto but calculates the next unit if > 999 . 99 .
def auto_no_thousands ( self ) : if self . _value >= 1000000000000 : return self . TiB , 'TiB' if self . _value >= 1000000000 : return self . GiB , 'GiB' if self . _value >= 1000000 : return self . MiB , 'MiB' if self . _value >= 1000 : return self . KiB , 'KiB' else : return self . B , 'B'
4,597
https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/components/units.py#L96-L107
[ "def", "remove_organization", "(", "self", ",", "service_desk_id", ",", "organization_id", ")", ":", "log", ".", "warning", "(", "'Removing organization...'", ")", "url", "=", "'rest/servicedeskapi/servicedesk/{}/organization'", ".", "format", "(", "service_desk_id", ")", "data", "=", "{", "'organizationId'", ":", "organization_id", "}", "return", "self", ".", "delete", "(", "url", ",", "headers", "=", "self", ".", "experimental_headers", ",", "data", "=", "data", ")" ]
Prints an error message to stderr and exits with a status of 1 by default .
def error ( message , code = 1 ) : if message : print ( 'ERROR: {0}' . format ( message ) , file = sys . stderr ) else : print ( file = sys . stderr ) sys . exit ( code )
4,598
https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/example_colors.py#L30-L36
[ "def", "prepare_blobs", "(", "self", ")", ":", "self", ".", "raw_header", "=", "self", ".", "extract_header", "(", ")", "if", "self", ".", "cache_enabled", ":", "self", ".", "_cache_offsets", "(", ")" ]
This function updates the paths in the HMET card file to the new location of the HMET data . This is necessary because the file paths are absolute and will need to be updated if moved .
def update_hmet_card_file ( hmet_card_file_path , new_hmet_data_path ) : hmet_card_file_path_temp = "{0}_tmp" . format ( hmet_card_file_path ) try : remove ( hmet_card_file_path_temp ) except OSError : pass copy ( hmet_card_file_path , hmet_card_file_path_temp ) with io_open ( hmet_card_file_path_temp , 'w' , newline = '\r\n' ) as out_hmet_list_file : with open ( hmet_card_file_path ) as old_hmet_list_file : for date_path in old_hmet_list_file : out_hmet_list_file . write ( u"{0}\n" . format ( path . join ( new_hmet_data_path , path . basename ( date_path ) ) ) ) try : remove ( hmet_card_file_path ) except OSError : pass rename ( hmet_card_file_path_temp , hmet_card_file_path )
4,599
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/grid/grid_to_gssha.py#L32-L67
[ "def", "wait_until", "(", "predicate", ",", "timeout", "=", "30", ")", ":", "not_expired", "=", "Timeout", "(", "timeout", ")", "while", "not_expired", "(", ")", ":", "r", "=", "predicate", "(", ")", "if", "r", ":", "break" ]