id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
22,100
ArangoDB-Community/pyArango
pyArango/connection.py
Connection.createDatabase
def createDatabase(self, name, **dbArgs) : "use dbArgs for arguments other than name. for a full list of arguments please have a look at arangoDB's doc" dbArgs['name'] = name payload = json.dumps(dbArgs, default=str) url = self.URL + "/database" r = self.session.post(url, data = payload) data = r.json() if r.status_code == 201 and not data["error"] : db = Database(self, name) self.databases[name] = db return self.databases[name] else : raise CreationError(data["errorMessage"], r.content)
python
def createDatabase(self, name, **dbArgs) : "use dbArgs for arguments other than name. for a full list of arguments please have a look at arangoDB's doc" dbArgs['name'] = name payload = json.dumps(dbArgs, default=str) url = self.URL + "/database" r = self.session.post(url, data = payload) data = r.json() if r.status_code == 201 and not data["error"] : db = Database(self, name) self.databases[name] = db return self.databases[name] else : raise CreationError(data["errorMessage"], r.content)
[ "def", "createDatabase", "(", "self", ",", "name", ",", "*", "*", "dbArgs", ")", ":", "dbArgs", "[", "'name'", "]", "=", "name", "payload", "=", "json", ".", "dumps", "(", "dbArgs", ",", "default", "=", "str", ")", "url", "=", "self", ".", "URL", ...
use dbArgs for arguments other than name. for a full list of arguments please have a look at arangoDB's doc
[ "use", "dbArgs", "for", "arguments", "other", "than", "name", ".", "for", "a", "full", "list", "of", "arguments", "please", "have", "a", "look", "at", "arangoDB", "s", "doc" ]
dd72e5f6c540e5e148943d615ddf7553bb78ce0b
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/connection.py#L151-L163
22,101
sensu-plugins/sensu-plugin-python
sensu_plugin/plugin.py
SensuPlugin.output
def output(self, args): ''' Print the output message. ''' print("SensuPlugin: {}".format(' '.join(str(a) for a in args)))
python
def output(self, args): ''' Print the output message. ''' print("SensuPlugin: {}".format(' '.join(str(a) for a in args)))
[ "def", "output", "(", "self", ",", "args", ")", ":", "print", "(", "\"SensuPlugin: {}\"", ".", "format", "(", "' '", ".", "join", "(", "str", "(", "a", ")", "for", "a", "in", "args", ")", ")", ")" ]
Print the output message.
[ "Print", "the", "output", "message", "." ]
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/plugin.py#L51-L55
22,102
sensu-plugins/sensu-plugin-python
sensu_plugin/plugin.py
SensuPlugin.__make_dynamic
def __make_dynamic(self, method): ''' Create a method for each of the exit codes. ''' def dynamic(*args): self.plugin_info['status'] = method if not args: args = None self.output(args) sys.exit(getattr(self.exit_code, method)) method_lc = method.lower() dynamic.__doc__ = "%s method" % method_lc dynamic.__name__ = method_lc setattr(self, dynamic.__name__, dynamic)
python
def __make_dynamic(self, method): ''' Create a method for each of the exit codes. ''' def dynamic(*args): self.plugin_info['status'] = method if not args: args = None self.output(args) sys.exit(getattr(self.exit_code, method)) method_lc = method.lower() dynamic.__doc__ = "%s method" % method_lc dynamic.__name__ = method_lc setattr(self, dynamic.__name__, dynamic)
[ "def", "__make_dynamic", "(", "self", ",", "method", ")", ":", "def", "dynamic", "(", "*", "args", ")", ":", "self", ".", "plugin_info", "[", "'status'", "]", "=", "method", "if", "not", "args", ":", "args", "=", "None", "self", ".", "output", "(", ...
Create a method for each of the exit codes.
[ "Create", "a", "method", "for", "each", "of", "the", "exit", "codes", "." ]
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/plugin.py#L57-L71
22,103
sensu-plugins/sensu-plugin-python
sensu_plugin/plugin.py
SensuPlugin.__exitfunction
def __exitfunction(self): ''' Method called by exit hook, ensures that both an exit code and output is supplied, also catches errors. ''' if self._hook.exit_code is None and self._hook.exception is None: print("Check did not exit! You should call an exit code method.") sys.stdout.flush() os._exit(1) elif self._hook.exception: print("Check failed to run: %s, %s" % (sys.last_type, traceback.format_tb(sys.last_traceback))) sys.stdout.flush() os._exit(2)
python
def __exitfunction(self): ''' Method called by exit hook, ensures that both an exit code and output is supplied, also catches errors. ''' if self._hook.exit_code is None and self._hook.exception is None: print("Check did not exit! You should call an exit code method.") sys.stdout.flush() os._exit(1) elif self._hook.exception: print("Check failed to run: %s, %s" % (sys.last_type, traceback.format_tb(sys.last_traceback))) sys.stdout.flush() os._exit(2)
[ "def", "__exitfunction", "(", "self", ")", ":", "if", "self", ".", "_hook", ".", "exit_code", "is", "None", "and", "self", ".", "_hook", ".", "exception", "is", "None", ":", "print", "(", "\"Check did not exit! You should call an exit code method.\"", ")", "sys"...
Method called by exit hook, ensures that both an exit code and output is supplied, also catches errors.
[ "Method", "called", "by", "exit", "hook", "ensures", "that", "both", "an", "exit", "code", "and", "output", "is", "supplied", "also", "catches", "errors", "." ]
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/plugin.py#L79-L92
22,104
sensu-plugins/sensu-plugin-python
sensu_plugin/handler.py
SensuHandler.run
def run(self): ''' Set up the event object, global settings and command line arguments. ''' # Parse the stdin into a global event object stdin = self.read_stdin() self.event = self.read_event(stdin) # Prepare global settings self.settings = get_settings() self.api_settings = self.get_api_settings() # Prepare command line arguments and self.parser = argparse.ArgumentParser() # set up the 2.x to 1.x event mapping argument self.parser.add_argument("--map-v2-event-into-v1", action="store_true", default=False, dest="v2event") if hasattr(self, 'setup'): self.setup() (self.options, self.remain) = self.parser.parse_known_args() # map the event if required if (self.options.v2event or os.environ.get("SENSU_MAP_V2_EVENT_INTO_V1")): self.event = map_v2_event_into_v1(self.event) # Filter (deprecated) and handle self.filter() self.handle()
python
def run(self): ''' Set up the event object, global settings and command line arguments. ''' # Parse the stdin into a global event object stdin = self.read_stdin() self.event = self.read_event(stdin) # Prepare global settings self.settings = get_settings() self.api_settings = self.get_api_settings() # Prepare command line arguments and self.parser = argparse.ArgumentParser() # set up the 2.x to 1.x event mapping argument self.parser.add_argument("--map-v2-event-into-v1", action="store_true", default=False, dest="v2event") if hasattr(self, 'setup'): self.setup() (self.options, self.remain) = self.parser.parse_known_args() # map the event if required if (self.options.v2event or os.environ.get("SENSU_MAP_V2_EVENT_INTO_V1")): self.event = map_v2_event_into_v1(self.event) # Filter (deprecated) and handle self.filter() self.handle()
[ "def", "run", "(", "self", ")", ":", "# Parse the stdin into a global event object", "stdin", "=", "self", ".", "read_stdin", "(", ")", "self", ".", "event", "=", "self", ".", "read_event", "(", "stdin", ")", "# Prepare global settings", "self", ".", "settings",...
Set up the event object, global settings and command line arguments.
[ "Set", "up", "the", "event", "object", "global", "settings", "and", "command", "line", "arguments", "." ]
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/handler.py#L31-L65
22,105
sensu-plugins/sensu-plugin-python
sensu_plugin/handler.py
SensuHandler.filter
def filter(self): ''' Filters exit the proccess if the event should not be handled. Filtering events is deprecated and will be removed in a future release. ''' if self.deprecated_filtering_enabled(): print('warning: event filtering in sensu-plugin is deprecated,' + 'see http://bit.ly/sensu-plugin') self.filter_disabled() self.filter_silenced() self.filter_dependencies() if self.deprecated_occurrence_filtering(): print('warning: occurrence filtering in sensu-plugin is' + 'deprecated, see http://bit.ly/sensu-plugin') self.filter_repeated()
python
def filter(self): ''' Filters exit the proccess if the event should not be handled. Filtering events is deprecated and will be removed in a future release. ''' if self.deprecated_filtering_enabled(): print('warning: event filtering in sensu-plugin is deprecated,' + 'see http://bit.ly/sensu-plugin') self.filter_disabled() self.filter_silenced() self.filter_dependencies() if self.deprecated_occurrence_filtering(): print('warning: occurrence filtering in sensu-plugin is' + 'deprecated, see http://bit.ly/sensu-plugin') self.filter_repeated()
[ "def", "filter", "(", "self", ")", ":", "if", "self", ".", "deprecated_filtering_enabled", "(", ")", ":", "print", "(", "'warning: event filtering in sensu-plugin is deprecated,'", "+", "'see http://bit.ly/sensu-plugin'", ")", "self", ".", "filter_disabled", "(", ")", ...
Filters exit the proccess if the event should not be handled. Filtering events is deprecated and will be removed in a future release.
[ "Filters", "exit", "the", "proccess", "if", "the", "event", "should", "not", "be", "handled", ".", "Filtering", "events", "is", "deprecated", "and", "will", "be", "removed", "in", "a", "future", "release", "." ]
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/handler.py#L95-L111
22,106
sensu-plugins/sensu-plugin-python
sensu_plugin/handler.py
SensuHandler.bail
def bail(self, msg): ''' Gracefully terminate with message ''' client_name = self.event['client'].get('name', 'error:no-client-name') check_name = self.event['check'].get('name', 'error:no-check-name') print('{}: {}/{}'.format(msg, client_name, check_name)) sys.exit(0)
python
def bail(self, msg): ''' Gracefully terminate with message ''' client_name = self.event['client'].get('name', 'error:no-client-name') check_name = self.event['check'].get('name', 'error:no-check-name') print('{}: {}/{}'.format(msg, client_name, check_name)) sys.exit(0)
[ "def", "bail", "(", "self", ",", "msg", ")", ":", "client_name", "=", "self", ".", "event", "[", "'client'", "]", ".", "get", "(", "'name'", ",", "'error:no-client-name'", ")", "check_name", "=", "self", ".", "event", "[", "'check'", "]", ".", "get", ...
Gracefully terminate with message
[ "Gracefully", "terminate", "with", "message" ]
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/handler.py#L135-L142
22,107
sensu-plugins/sensu-plugin-python
sensu_plugin/handler.py
SensuHandler.api_request
def api_request(self, method, path): ''' Query Sensu api for information. ''' if not hasattr(self, 'api_settings'): ValueError('api.json settings not found') if method.lower() == 'get': _request = requests.get elif method.lower() == 'post': _request = requests.post domain = self.api_settings['host'] uri = '{}:{}/{}'.format(domain, self.api_settings['port'], path) if self.api_settings.get('user') and self.api_settings.get('password'): auth = (self.api_settings['user'], self.api_settings['password']) else: auth = () req = _request(uri, auth=auth) return req
python
def api_request(self, method, path): ''' Query Sensu api for information. ''' if not hasattr(self, 'api_settings'): ValueError('api.json settings not found') if method.lower() == 'get': _request = requests.get elif method.lower() == 'post': _request = requests.post domain = self.api_settings['host'] uri = '{}:{}/{}'.format(domain, self.api_settings['port'], path) if self.api_settings.get('user') and self.api_settings.get('password'): auth = (self.api_settings['user'], self.api_settings['password']) else: auth = () req = _request(uri, auth=auth) return req
[ "def", "api_request", "(", "self", ",", "method", ",", "path", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'api_settings'", ")", ":", "ValueError", "(", "'api.json settings not found'", ")", "if", "method", ".", "lower", "(", ")", "==", "'get'", ...
Query Sensu api for information.
[ "Query", "Sensu", "api", "for", "information", "." ]
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/handler.py#L172-L191
22,108
sensu-plugins/sensu-plugin-python
sensu_plugin/handler.py
SensuHandler.event_exists
def event_exists(self, client, check): ''' Query Sensu API for event. ''' return self.api_request( 'get', 'events/{}/{}'.format(client, check) ).status_code == 200
python
def event_exists(self, client, check): ''' Query Sensu API for event. ''' return self.api_request( 'get', 'events/{}/{}'.format(client, check) ).status_code == 200
[ "def", "event_exists", "(", "self", ",", "client", ",", "check", ")", ":", "return", "self", ".", "api_request", "(", "'get'", ",", "'events/{}/{}'", ".", "format", "(", "client", ",", "check", ")", ")", ".", "status_code", "==", "200" ]
Query Sensu API for event.
[ "Query", "Sensu", "API", "for", "event", "." ]
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/handler.py#L199-L206
22,109
sensu-plugins/sensu-plugin-python
sensu_plugin/handler.py
SensuHandler.filter_silenced
def filter_silenced(self): ''' Determine whether a check is silenced and shouldn't handle. ''' stashes = [ ('client', '/silence/{}'.format(self.event['client']['name'])), ('check', '/silence/{}/{}'.format( self.event['client']['name'], self.event['check']['name'])), ('check', '/silence/all/{}'.format(self.event['check']['name'])) ] for scope, path in stashes: if self.stash_exists(path): self.bail(scope + ' alerts silenced')
python
def filter_silenced(self): ''' Determine whether a check is silenced and shouldn't handle. ''' stashes = [ ('client', '/silence/{}'.format(self.event['client']['name'])), ('check', '/silence/{}/{}'.format( self.event['client']['name'], self.event['check']['name'])), ('check', '/silence/all/{}'.format(self.event['check']['name'])) ] for scope, path in stashes: if self.stash_exists(path): self.bail(scope + ' alerts silenced')
[ "def", "filter_silenced", "(", "self", ")", ":", "stashes", "=", "[", "(", "'client'", ",", "'/silence/{}'", ".", "format", "(", "self", ".", "event", "[", "'client'", "]", "[", "'name'", "]", ")", ")", ",", "(", "'check'", ",", "'/silence/{}/{}'", "."...
Determine whether a check is silenced and shouldn't handle.
[ "Determine", "whether", "a", "check", "is", "silenced", "and", "shouldn", "t", "handle", "." ]
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/handler.py#L216-L229
22,110
sensu-plugins/sensu-plugin-python
sensu_plugin/handler.py
SensuHandler.filter_dependencies
def filter_dependencies(self): ''' Determine whether a check has dependencies. ''' dependencies = self.event['check'].get('dependencies', None) if dependencies is None or not isinstance(dependencies, list): return for dependency in self.event['check']['dependencies']: if not str(dependency): continue dependency_split = tuple(dependency.split('/')) # If there's a dependency on a check from another client, then use # that client name, otherwise assume same client. if len(dependency_split) == 2: client, check = dependency_split else: client = self.event['client']['name'] check = dependency_split[0] if self.event_exists(client, check): self.bail('check dependency event exists')
python
def filter_dependencies(self): ''' Determine whether a check has dependencies. ''' dependencies = self.event['check'].get('dependencies', None) if dependencies is None or not isinstance(dependencies, list): return for dependency in self.event['check']['dependencies']: if not str(dependency): continue dependency_split = tuple(dependency.split('/')) # If there's a dependency on a check from another client, then use # that client name, otherwise assume same client. if len(dependency_split) == 2: client, check = dependency_split else: client = self.event['client']['name'] check = dependency_split[0] if self.event_exists(client, check): self.bail('check dependency event exists')
[ "def", "filter_dependencies", "(", "self", ")", ":", "dependencies", "=", "self", ".", "event", "[", "'check'", "]", ".", "get", "(", "'dependencies'", ",", "None", ")", "if", "dependencies", "is", "None", "or", "not", "isinstance", "(", "dependencies", ",...
Determine whether a check has dependencies.
[ "Determine", "whether", "a", "check", "has", "dependencies", "." ]
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/handler.py#L231-L250
22,111
sensu-plugins/sensu-plugin-python
sensu_plugin/handler.py
SensuHandler.filter_repeated
def filter_repeated(self): ''' Determine whether a check is repeating. ''' defaults = { 'occurrences': 1, 'interval': 30, 'refresh': 1800 } # Override defaults with anything defined in the settings if isinstance(self.settings['sensu_plugin'], dict): defaults.update(self.settings['sensu_plugin']) occurrences = int(self.event['check'].get( 'occurrences', defaults['occurrences'])) interval = int(self.event['check'].get( 'interval', defaults['interval'])) refresh = int(self.event['check'].get( 'refresh', defaults['refresh'])) if self.event['occurrences'] < occurrences: self.bail('not enough occurrences') if (self.event['occurrences'] > occurrences and self.event['action'] == 'create'): return number = int(refresh / interval) if (number == 0 or (self.event['occurrences'] - occurrences) % number == 0): return self.bail('only handling every ' + str(number) + ' occurrences')
python
def filter_repeated(self): ''' Determine whether a check is repeating. ''' defaults = { 'occurrences': 1, 'interval': 30, 'refresh': 1800 } # Override defaults with anything defined in the settings if isinstance(self.settings['sensu_plugin'], dict): defaults.update(self.settings['sensu_plugin']) occurrences = int(self.event['check'].get( 'occurrences', defaults['occurrences'])) interval = int(self.event['check'].get( 'interval', defaults['interval'])) refresh = int(self.event['check'].get( 'refresh', defaults['refresh'])) if self.event['occurrences'] < occurrences: self.bail('not enough occurrences') if (self.event['occurrences'] > occurrences and self.event['action'] == 'create'): return number = int(refresh / interval) if (number == 0 or (self.event['occurrences'] - occurrences) % number == 0): return self.bail('only handling every ' + str(number) + ' occurrences')
[ "def", "filter_repeated", "(", "self", ")", ":", "defaults", "=", "{", "'occurrences'", ":", "1", ",", "'interval'", ":", "30", ",", "'refresh'", ":", "1800", "}", "# Override defaults with anything defined in the settings", "if", "isinstance", "(", "self", ".", ...
Determine whether a check is repeating.
[ "Determine", "whether", "a", "check", "is", "repeating", "." ]
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/handler.py#L252-L285
22,112
sensu-plugins/sensu-plugin-python
sensu_plugin/utils.py
config_files
def config_files(): ''' Get list of currently used config files. ''' sensu_loaded_tempfile = os.environ.get('SENSU_LOADED_TEMPFILE') sensu_config_files = os.environ.get('SENSU_CONFIG_FILES') sensu_v1_config = '/etc/sensu/config.json' sensu_v1_confd = '/etc/sensu/conf.d' if sensu_loaded_tempfile and os.path.isfile(sensu_loaded_tempfile): with open(sensu_loaded_tempfile, 'r') as tempfile: contents = tempfile.read() return contents.split(':') elif sensu_config_files: return sensu_config_files.split(':') else: files = [] filenames = [] if os.path.isfile(sensu_v1_config): files = [sensu_v1_config] if os.path.isdir(sensu_v1_confd): filenames = [f for f in os.listdir(sensu_v1_confd) if os.path.splitext(f)[1] == '.json'] for filename in filenames: files.append('{}/{}'.format(sensu_v1_confd, filename)) return files
python
def config_files(): ''' Get list of currently used config files. ''' sensu_loaded_tempfile = os.environ.get('SENSU_LOADED_TEMPFILE') sensu_config_files = os.environ.get('SENSU_CONFIG_FILES') sensu_v1_config = '/etc/sensu/config.json' sensu_v1_confd = '/etc/sensu/conf.d' if sensu_loaded_tempfile and os.path.isfile(sensu_loaded_tempfile): with open(sensu_loaded_tempfile, 'r') as tempfile: contents = tempfile.read() return contents.split(':') elif sensu_config_files: return sensu_config_files.split(':') else: files = [] filenames = [] if os.path.isfile(sensu_v1_config): files = [sensu_v1_config] if os.path.isdir(sensu_v1_confd): filenames = [f for f in os.listdir(sensu_v1_confd) if os.path.splitext(f)[1] == '.json'] for filename in filenames: files.append('{}/{}'.format(sensu_v1_confd, filename)) return files
[ "def", "config_files", "(", ")", ":", "sensu_loaded_tempfile", "=", "os", ".", "environ", ".", "get", "(", "'SENSU_LOADED_TEMPFILE'", ")", "sensu_config_files", "=", "os", ".", "environ", ".", "get", "(", "'SENSU_CONFIG_FILES'", ")", "sensu_v1_config", "=", "'/e...
Get list of currently used config files.
[ "Get", "list", "of", "currently", "used", "config", "files", "." ]
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/utils.py#L10-L34
22,113
sensu-plugins/sensu-plugin-python
sensu_plugin/utils.py
get_settings
def get_settings(): ''' Get all currently loaded settings. ''' settings = {} for config_file in config_files(): config_contents = load_config(config_file) if config_contents is not None: settings = deep_merge(settings, config_contents) return settings
python
def get_settings(): ''' Get all currently loaded settings. ''' settings = {} for config_file in config_files(): config_contents = load_config(config_file) if config_contents is not None: settings = deep_merge(settings, config_contents) return settings
[ "def", "get_settings", "(", ")", ":", "settings", "=", "{", "}", "for", "config_file", "in", "config_files", "(", ")", ":", "config_contents", "=", "load_config", "(", "config_file", ")", "if", "config_contents", "is", "not", "None", ":", "settings", "=", ...
Get all currently loaded settings.
[ "Get", "all", "currently", "loaded", "settings", "." ]
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/utils.py#L37-L46
22,114
sensu-plugins/sensu-plugin-python
sensu_plugin/utils.py
load_config
def load_config(filename): ''' Read contents of config file. ''' try: with open(filename, 'r') as config_file: return json.loads(config_file.read()) except IOError: pass
python
def load_config(filename): ''' Read contents of config file. ''' try: with open(filename, 'r') as config_file: return json.loads(config_file.read()) except IOError: pass
[ "def", "load_config", "(", "filename", ")", ":", "try", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "config_file", ":", "return", "json", ".", "loads", "(", "config_file", ".", "read", "(", ")", ")", "except", "IOError", ":", "pass" ]
Read contents of config file.
[ "Read", "contents", "of", "config", "file", "." ]
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/utils.py#L49-L57
22,115
sensu-plugins/sensu-plugin-python
sensu_plugin/utils.py
deep_merge
def deep_merge(dict_one, dict_two): ''' Deep merge two dicts. ''' merged = dict_one.copy() for key, value in dict_two.items(): # value is equivalent to dict_two[key] if (key in dict_one and isinstance(dict_one[key], dict) and isinstance(value, dict)): merged[key] = deep_merge(dict_one[key], value) elif (key in dict_one and isinstance(dict_one[key], list) and isinstance(value, list)): merged[key] = list(set(dict_one[key] + value)) else: merged[key] = value return merged
python
def deep_merge(dict_one, dict_two): ''' Deep merge two dicts. ''' merged = dict_one.copy() for key, value in dict_two.items(): # value is equivalent to dict_two[key] if (key in dict_one and isinstance(dict_one[key], dict) and isinstance(value, dict)): merged[key] = deep_merge(dict_one[key], value) elif (key in dict_one and isinstance(dict_one[key], list) and isinstance(value, list)): merged[key] = list(set(dict_one[key] + value)) else: merged[key] = value return merged
[ "def", "deep_merge", "(", "dict_one", ",", "dict_two", ")", ":", "merged", "=", "dict_one", ".", "copy", "(", ")", "for", "key", ",", "value", "in", "dict_two", ".", "items", "(", ")", ":", "# value is equivalent to dict_two[key]", "if", "(", "key", "in", ...
Deep merge two dicts.
[ "Deep", "merge", "two", "dicts", "." ]
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/utils.py#L60-L77
22,116
sensu-plugins/sensu-plugin-python
sensu_plugin/utils.py
map_v2_event_into_v1
def map_v2_event_into_v1(event): ''' Helper method to convert Sensu 2.x event into Sensu 1.x event. ''' # return the event if it has already been mapped if "v2_event_mapped_into_v1" in event: return event # Trigger mapping code if enity exists and client does not if not bool(event.get('client')) and "entity" in event: event['client'] = event['entity'] # Fill in missing client attributes if "name" not in event['client']: event['client']['name'] = event['entity']['id'] if "subscribers" not in event['client']: event['client']['subscribers'] = event['entity']['subscriptions'] # Fill in renamed check attributes expected in 1.4 event if "subscribers" not in event['check']: event['check']['subscribers'] = event['check']['subscriptions'] if "source" not in event['check']: event['check']['source'] = event['check']['proxy_entity_id'] # Mimic 1.4 event action based on 2.0 event state # action used in logs and fluentd plugins handlers action_state_mapping = {'flapping': 'flapping', 'passing': 'resolve', 'failing': 'create'} if "state" in event['check']: state = event['check']['state'] else: state = "unknown::2.0_event" if "action" not in event and state.lower() in action_state_mapping: event['action'] = action_state_mapping[state.lower()] else: event['action'] = state # Mimic 1.4 event history based on 2.0 event history if "history" in event['check']: # save the original history event['check']['history_v2'] = deepcopy(event['check']['history']) legacy_history = [] for history in event['check']['history']: if isinstance(history['status'], int): legacy_history.append(str(history['status'])) else: legacy_history.append("3") event['check']['history'] = legacy_history # Setting flag indicating this function has already been called event['v2_event_mapped_into_v1'] = True # return the updated event return event
python
def map_v2_event_into_v1(event): ''' Helper method to convert Sensu 2.x event into Sensu 1.x event. ''' # return the event if it has already been mapped if "v2_event_mapped_into_v1" in event: return event # Trigger mapping code if enity exists and client does not if not bool(event.get('client')) and "entity" in event: event['client'] = event['entity'] # Fill in missing client attributes if "name" not in event['client']: event['client']['name'] = event['entity']['id'] if "subscribers" not in event['client']: event['client']['subscribers'] = event['entity']['subscriptions'] # Fill in renamed check attributes expected in 1.4 event if "subscribers" not in event['check']: event['check']['subscribers'] = event['check']['subscriptions'] if "source" not in event['check']: event['check']['source'] = event['check']['proxy_entity_id'] # Mimic 1.4 event action based on 2.0 event state # action used in logs and fluentd plugins handlers action_state_mapping = {'flapping': 'flapping', 'passing': 'resolve', 'failing': 'create'} if "state" in event['check']: state = event['check']['state'] else: state = "unknown::2.0_event" if "action" not in event and state.lower() in action_state_mapping: event['action'] = action_state_mapping[state.lower()] else: event['action'] = state # Mimic 1.4 event history based on 2.0 event history if "history" in event['check']: # save the original history event['check']['history_v2'] = deepcopy(event['check']['history']) legacy_history = [] for history in event['check']['history']: if isinstance(history['status'], int): legacy_history.append(str(history['status'])) else: legacy_history.append("3") event['check']['history'] = legacy_history # Setting flag indicating this function has already been called event['v2_event_mapped_into_v1'] = True # return the updated event return event
[ "def", "map_v2_event_into_v1", "(", "event", ")", ":", "# return the event if it has already been mapped", "if", "\"v2_event_mapped_into_v1\"", "in", "event", ":", "return", "event", "# Trigger mapping code if enity exists and client does not", "if", "not", "bool", "(", "event"...
Helper method to convert Sensu 2.x event into Sensu 1.x event.
[ "Helper", "method", "to", "convert", "Sensu", "2", ".", "x", "event", "into", "Sensu", "1", ".", "x", "event", "." ]
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/utils.py#L80-L139
22,117
sensu-plugins/sensu-plugin-python
sensu_plugin/check.py
SensuPluginCheck.check_name
def check_name(self, name=None): ''' Checks the plugin name and sets it accordingly. Uses name if specified, class name if not set. ''' if name: self.plugin_info['check_name'] = name if self.plugin_info['check_name'] is not None: return self.plugin_info['check_name'] return self.__class__.__name__
python
def check_name(self, name=None): ''' Checks the plugin name and sets it accordingly. Uses name if specified, class name if not set. ''' if name: self.plugin_info['check_name'] = name if self.plugin_info['check_name'] is not None: return self.plugin_info['check_name'] return self.__class__.__name__
[ "def", "check_name", "(", "self", ",", "name", "=", "None", ")", ":", "if", "name", ":", "self", ".", "plugin_info", "[", "'check_name'", "]", "=", "name", "if", "self", ".", "plugin_info", "[", "'check_name'", "]", "is", "not", "None", ":", "return", ...
Checks the plugin name and sets it accordingly. Uses name if specified, class name if not set.
[ "Checks", "the", "plugin", "name", "and", "sets", "it", "accordingly", ".", "Uses", "name", "if", "specified", "class", "name", "if", "not", "set", "." ]
bd43a5ea4d191e5e63494c8679aab02ac072d9ed
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/check.py#L11-L22
22,118
chainer/chainerui
chainerui/models/result.py
Result.sampled_logs
def sampled_logs(self, logs_limit=-1): """Return up to `logs_limit` logs. If `logs_limit` is -1, this function will return all logs that belong to the result. """ logs_count = len(self.logs) if logs_limit == -1 or logs_count <= logs_limit: return self.logs elif logs_limit == 0: return [] elif logs_limit == 1: return [self.logs[-1]] else: def get_sampled_log(idx): # always include the first and last element of `self.logs` return self.logs[idx * (logs_count - 1) // (logs_limit - 1)] return [get_sampled_log(i) for i in range(logs_limit)]
python
def sampled_logs(self, logs_limit=-1): logs_count = len(self.logs) if logs_limit == -1 or logs_count <= logs_limit: return self.logs elif logs_limit == 0: return [] elif logs_limit == 1: return [self.logs[-1]] else: def get_sampled_log(idx): # always include the first and last element of `self.logs` return self.logs[idx * (logs_count - 1) // (logs_limit - 1)] return [get_sampled_log(i) for i in range(logs_limit)]
[ "def", "sampled_logs", "(", "self", ",", "logs_limit", "=", "-", "1", ")", ":", "logs_count", "=", "len", "(", "self", ".", "logs", ")", "if", "logs_limit", "==", "-", "1", "or", "logs_count", "<=", "logs_limit", ":", "return", "self", ".", "logs", "...
Return up to `logs_limit` logs. If `logs_limit` is -1, this function will return all logs that belong to the result.
[ "Return", "up", "to", "logs_limit", "logs", "." ]
87ad25e875bc332bfdad20197fd3d0cb81a078e8
https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/models/result.py#L60-L77
22,119
chainer/chainerui
chainerui/models/result.py
Result.serialize_with_sampled_logs
def serialize_with_sampled_logs(self, logs_limit=-1): """serialize a result with up to `logs_limit` logs. If `logs_limit` is -1, this function will return a result with all its logs. """ return { 'id': self.id, 'pathName': self.path_name, 'name': self.name, 'isUnregistered': self.is_unregistered, 'logs': [log.serialize for log in self.sampled_logs(logs_limit)], 'args': self.args.serialize if self.args is not None else [], 'commands': [cmd.serialize for cmd in self.commands], 'snapshots': [cmd.serialize for cmd in self.snapshots], 'logModifiedAt': self.log_modified_at.isoformat() }
python
def serialize_with_sampled_logs(self, logs_limit=-1): return { 'id': self.id, 'pathName': self.path_name, 'name': self.name, 'isUnregistered': self.is_unregistered, 'logs': [log.serialize for log in self.sampled_logs(logs_limit)], 'args': self.args.serialize if self.args is not None else [], 'commands': [cmd.serialize for cmd in self.commands], 'snapshots': [cmd.serialize for cmd in self.snapshots], 'logModifiedAt': self.log_modified_at.isoformat() }
[ "def", "serialize_with_sampled_logs", "(", "self", ",", "logs_limit", "=", "-", "1", ")", ":", "return", "{", "'id'", ":", "self", ".", "id", ",", "'pathName'", ":", "self", ".", "path_name", ",", "'name'", ":", "self", ".", "name", ",", "'isUnregistered...
serialize a result with up to `logs_limit` logs. If `logs_limit` is -1, this function will return a result with all its logs.
[ "serialize", "a", "result", "with", "up", "to", "logs_limit", "logs", "." ]
87ad25e875bc332bfdad20197fd3d0cb81a078e8
https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/models/result.py#L79-L96
22,120
chainer/chainerui
chainerui/summary.py
reporter
def reporter(prefix=None, out=None, subdir='', timeout=5, **kwargs): """Summary media assets to visualize. ``reporter`` function collects media assets by the ``with`` statement and aggregates in same row to visualize. This function returns an object which provides the following methods. * :meth:`~chainerui.summary._Reporter.image`: collect images. almost same \ as :func:`~chainerui.summary.image` * :meth:`~chainerui.summary._Reporter.audio`: collect audio. almost same \ as :func:`~chainerui.summary.audio` Example of how to set several assets:: >>> from chainerui.summary import reporter >>> summary.set_out('/path/to/output') # same as 'log' file directory >>> >>> with reporter(epoch=1, iteration=10) as r: >>> r.image(image_array1) >>> r.image(image_array2) >>> r.audio(audio_array, 44100) >>> # image_array1 and image_array2 are visualized on a browser >>> # audio_array can be listened on a browser Args: prefix (str): prefix of column name. out (str): directory path of output. subdir (str): sub-directory path of output. **kwargs (dict): key-value pair to show as description. regardless of empty or not, timestamp is added. """ report = _Reporter(prefix, out, subdir, **kwargs) yield report report.save(timeout)
python
def reporter(prefix=None, out=None, subdir='', timeout=5, **kwargs): report = _Reporter(prefix, out, subdir, **kwargs) yield report report.save(timeout)
[ "def", "reporter", "(", "prefix", "=", "None", ",", "out", "=", "None", ",", "subdir", "=", "''", ",", "timeout", "=", "5", ",", "*", "*", "kwargs", ")", ":", "report", "=", "_Reporter", "(", "prefix", ",", "out", ",", "subdir", ",", "*", "*", ...
Summary media assets to visualize. ``reporter`` function collects media assets by the ``with`` statement and aggregates in same row to visualize. This function returns an object which provides the following methods. * :meth:`~chainerui.summary._Reporter.image`: collect images. almost same \ as :func:`~chainerui.summary.image` * :meth:`~chainerui.summary._Reporter.audio`: collect audio. almost same \ as :func:`~chainerui.summary.audio` Example of how to set several assets:: >>> from chainerui.summary import reporter >>> summary.set_out('/path/to/output') # same as 'log' file directory >>> >>> with reporter(epoch=1, iteration=10) as r: >>> r.image(image_array1) >>> r.image(image_array2) >>> r.audio(audio_array, 44100) >>> # image_array1 and image_array2 are visualized on a browser >>> # audio_array can be listened on a browser Args: prefix (str): prefix of column name. out (str): directory path of output. subdir (str): sub-directory path of output. **kwargs (dict): key-value pair to show as description. regardless of empty or not, timestamp is added.
[ "Summary", "media", "assets", "to", "visualize", "." ]
87ad25e875bc332bfdad20197fd3d0cb81a078e8
https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/summary.py#L174-L208
22,121
chainer/chainerui
chainerui/summary.py
audio
def audio(audio, sample_rate, name=None, out=None, subdir='', timeout=5, **kwargs): """summary audio files to listen on a browser. An sampled array is converted as WAV audio file, saved to output directory, and reported to the ChainerUI server. The audio file is saved every called this function. The audio file will be listened on `assets` endpoint vertically. If need to aggregate audio files in row, use :func:`~chainerui.summary.reporter`. Example of how to set arguments:: >>> from chainerui import summary >>> summary.set_out('/path/to/output') >>> rate = 44100 >>> >>> summary.audio(sampled_array, rate, name='test') >>> # sampled_array can be listened on a browser. Add description about the audio file:: >>> summary.image( >>> sampled_array, rate, name='test', epoch=1, iteration=100) >>> # 'epoch' and 'iteration' column will be shown. Args: audio (:class:`numpy.ndarray` or :class:`cupy.ndarray` or \ :class:`chainer.Variable`): sampled wave array. sample_rate (int): sampling rate. name (str): name of image. set as column name. when not setting, assigned ``'audio'``. out (str): directory path of output. subdir (str): sub-directory path of output. **kwargs (dict): key-value pair to show as description. regardless of empty or not, timestamp on created the image is added. """ from chainerui.report.audio_report import check_available if not check_available(): return from chainerui.report.audio_report import report as _audio out_root = _chainerui_asset_observer.get_outpath(out) out_path = os.path.join(out_root, subdir) if not os.path.isdir(out_path): os.makedirs(out_path) col_name = name if col_name is None: col_name = 'audio' filename, created_at = _audio(audio, sample_rate, out_path, col_name) value = kwargs value['timestamp'] = created_at.isoformat() value['audios'] = {col_name: os.path.join(subdir, filename)} _chainerui_asset_observer.add(value) _chainerui_asset_observer.save(out_root, timeout)
python
def audio(audio, sample_rate, name=None, out=None, subdir='', timeout=5, **kwargs): from chainerui.report.audio_report import check_available if not check_available(): return from chainerui.report.audio_report import report as _audio out_root = _chainerui_asset_observer.get_outpath(out) out_path = os.path.join(out_root, subdir) if not os.path.isdir(out_path): os.makedirs(out_path) col_name = name if col_name is None: col_name = 'audio' filename, created_at = _audio(audio, sample_rate, out_path, col_name) value = kwargs value['timestamp'] = created_at.isoformat() value['audios'] = {col_name: os.path.join(subdir, filename)} _chainerui_asset_observer.add(value) _chainerui_asset_observer.save(out_root, timeout)
[ "def", "audio", "(", "audio", ",", "sample_rate", ",", "name", "=", "None", ",", "out", "=", "None", ",", "subdir", "=", "''", ",", "timeout", "=", "5", ",", "*", "*", "kwargs", ")", ":", "from", "chainerui", ".", "report", ".", "audio_report", "im...
summary audio files to listen on a browser. An sampled array is converted as WAV audio file, saved to output directory, and reported to the ChainerUI server. The audio file is saved every called this function. The audio file will be listened on `assets` endpoint vertically. If need to aggregate audio files in row, use :func:`~chainerui.summary.reporter`. Example of how to set arguments:: >>> from chainerui import summary >>> summary.set_out('/path/to/output') >>> rate = 44100 >>> >>> summary.audio(sampled_array, rate, name='test') >>> # sampled_array can be listened on a browser. Add description about the audio file:: >>> summary.image( >>> sampled_array, rate, name='test', epoch=1, iteration=100) >>> # 'epoch' and 'iteration' column will be shown. Args: audio (:class:`numpy.ndarray` or :class:`cupy.ndarray` or \ :class:`chainer.Variable`): sampled wave array. sample_rate (int): sampling rate. name (str): name of image. set as column name. when not setting, assigned ``'audio'``. out (str): directory path of output. subdir (str): sub-directory path of output. **kwargs (dict): key-value pair to show as description. regardless of empty or not, timestamp on created the image is added.
[ "summary", "audio", "files", "to", "listen", "on", "a", "browser", "." ]
87ad25e875bc332bfdad20197fd3d0cb81a078e8
https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/summary.py#L304-L359
22,122
chainer/chainerui
chainerui/summary.py
_Reporter.audio
def audio(self, audio, sample_rate, name=None, subdir=''): """Summary audio to listen on web browser. Args: audio (:class:`numpy.ndarray` or :class:`cupy.ndarray` or \ :class:`chainer.Variable`): sampled wave array. sample_rate (int): sampling rate. name (str): name of image. set as column name. when not setting, assigned ``'audio'`` + sequential number. subdir (str): sub-directory path of output. """ from chainerui.report.audio_report import check_available if not check_available(): return from chainerui.report.audio_report import report as _audio col_name = self.get_col_name(name, 'audio') out_dir, rel_out_dir = self.get_subdir(subdir) filename, _ = _audio(audio, sample_rate, out_dir, col_name) self.audios[col_name] = os.path.join(rel_out_dir, filename) self.count += 1
python
def audio(self, audio, sample_rate, name=None, subdir=''): from chainerui.report.audio_report import check_available if not check_available(): return from chainerui.report.audio_report import report as _audio col_name = self.get_col_name(name, 'audio') out_dir, rel_out_dir = self.get_subdir(subdir) filename, _ = _audio(audio, sample_rate, out_dir, col_name) self.audios[col_name] = os.path.join(rel_out_dir, filename) self.count += 1
[ "def", "audio", "(", "self", ",", "audio", ",", "sample_rate", ",", "name", "=", "None", ",", "subdir", "=", "''", ")", ":", "from", "chainerui", ".", "report", ".", "audio_report", "import", "check_available", "if", "not", "check_available", "(", ")", "...
Summary audio to listen on web browser. Args: audio (:class:`numpy.ndarray` or :class:`cupy.ndarray` or \ :class:`chainer.Variable`): sampled wave array. sample_rate (int): sampling rate. name (str): name of image. set as column name. when not setting, assigned ``'audio'`` + sequential number. subdir (str): sub-directory path of output.
[ "Summary", "audio", "to", "listen", "on", "web", "browser", "." ]
87ad25e875bc332bfdad20197fd3d0cb81a078e8
https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/summary.py#L106-L128
22,123
chainer/chainerui
chainerui/models/project.py
Project.create
def create(cls, path_name=None, name=None, crawlable=True): """initialize an instance and save it to db.""" project = cls(path_name, name, crawlable) db.session.add(project) db.session.commit() return collect_results(project, force=True)
python
def create(cls, path_name=None, name=None, crawlable=True): project = cls(path_name, name, crawlable) db.session.add(project) db.session.commit() return collect_results(project, force=True)
[ "def", "create", "(", "cls", ",", "path_name", "=", "None", ",", "name", "=", "None", ",", "crawlable", "=", "True", ")", ":", "project", "=", "cls", "(", "path_name", ",", "name", ",", "crawlable", ")", "db", ".", "session", ".", "add", "(", "proj...
initialize an instance and save it to db.
[ "initialize", "an", "instance", "and", "save", "it", "to", "db", "." ]
87ad25e875bc332bfdad20197fd3d0cb81a078e8
https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/models/project.py#L36-L44
22,124
chainer/chainerui
chainerui/tasks/collect_assets.py
collect_assets
def collect_assets(result, force=False): """collect assets from meta file Collecting assets only when the metafile is updated. If number of assets are decreased, assets are reset and re-collect the assets. """ path_name = result.path_name info_path = os.path.join(path_name, summary.CHAINERUI_ASSETS_METAFILE_NAME) if not os.path.isfile(info_path): return start_idx = len(result.assets) file_modified_at = datetime.datetime.fromtimestamp(os.path.getmtime( info_path)) if start_idx > 0: if result.assets[-1].file_modified_at == file_modified_at: return with open(info_path, 'r') as f: info_list = json.load(f, object_pairs_hook=OrderedDict) if len(info_list) < start_idx: start_idx = 0 result.assets = [] for base_info in info_list[start_idx:]: asset_path = base_info.pop('images', {}) asset_path.update(base_info.pop('audios', {})) asset = Asset.create( result_id=result.id, summary=base_info, file_modified_at=file_modified_at) for key, path in asset_path.items(): with open(os.path.join(path_name, path), 'rb') as f: data = f.read() content = Bindata( asset_id=asset.id, name=path, tag=key, content=data) asset.content_list.append(content) result.assets.append(asset) db.session.commit()
python
def collect_assets(result, force=False): path_name = result.path_name info_path = os.path.join(path_name, summary.CHAINERUI_ASSETS_METAFILE_NAME) if not os.path.isfile(info_path): return start_idx = len(result.assets) file_modified_at = datetime.datetime.fromtimestamp(os.path.getmtime( info_path)) if start_idx > 0: if result.assets[-1].file_modified_at == file_modified_at: return with open(info_path, 'r') as f: info_list = json.load(f, object_pairs_hook=OrderedDict) if len(info_list) < start_idx: start_idx = 0 result.assets = [] for base_info in info_list[start_idx:]: asset_path = base_info.pop('images', {}) asset_path.update(base_info.pop('audios', {})) asset = Asset.create( result_id=result.id, summary=base_info, file_modified_at=file_modified_at) for key, path in asset_path.items(): with open(os.path.join(path_name, path), 'rb') as f: data = f.read() content = Bindata( asset_id=asset.id, name=path, tag=key, content=data) asset.content_list.append(content) result.assets.append(asset) db.session.commit()
[ "def", "collect_assets", "(", "result", ",", "force", "=", "False", ")", ":", "path_name", "=", "result", ".", "path_name", "info_path", "=", "os", ".", "path", ".", "join", "(", "path_name", ",", "summary", ".", "CHAINERUI_ASSETS_METAFILE_NAME", ")", "if", ...
collect assets from meta file Collecting assets only when the metafile is updated. If number of assets are decreased, assets are reset and re-collect the assets.
[ "collect", "assets", "from", "meta", "file" ]
87ad25e875bc332bfdad20197fd3d0cb81a078e8
https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/tasks/collect_assets.py#L12-L50
22,125
chainer/chainerui
chainerui/utils/save_args.py
save_args
def save_args(conditions, out_path): """A util function to save experiment condition for job table. Args: conditions (:class:`argparse.Namespace` or dict): Experiment conditions to show on a job table. Keys are show as table header and values are show at a job row. out_path (str): Output directory name to save conditions. """ if isinstance(conditions, argparse.Namespace): args = vars(conditions) else: args = conditions try: os.makedirs(out_path) except OSError: pass with tempdir(prefix='args', dir=out_path) as tempd: path = os.path.join(tempd, 'args.json') with open(path, 'w') as f: json.dump(args, f, indent=4) new_path = os.path.join(out_path, 'args') shutil.move(path, new_path)
python
def save_args(conditions, out_path): if isinstance(conditions, argparse.Namespace): args = vars(conditions) else: args = conditions try: os.makedirs(out_path) except OSError: pass with tempdir(prefix='args', dir=out_path) as tempd: path = os.path.join(tempd, 'args.json') with open(path, 'w') as f: json.dump(args, f, indent=4) new_path = os.path.join(out_path, 'args') shutil.move(path, new_path)
[ "def", "save_args", "(", "conditions", ",", "out_path", ")", ":", "if", "isinstance", "(", "conditions", ",", "argparse", ".", "Namespace", ")", ":", "args", "=", "vars", "(", "conditions", ")", "else", ":", "args", "=", "conditions", "try", ":", "os", ...
A util function to save experiment condition for job table. Args: conditions (:class:`argparse.Namespace` or dict): Experiment conditions to show on a job table. Keys are show as table header and values are show at a job row. out_path (str): Output directory name to save conditions.
[ "A", "util", "function", "to", "save", "experiment", "condition", "for", "job", "table", "." ]
87ad25e875bc332bfdad20197fd3d0cb81a078e8
https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/utils/save_args.py#L9-L36
22,126
sunt05/SuPy
src/supy/supy_misc.py
_path_insensitive
def _path_insensitive(path): """ Recursive part of path_insensitive to do the work. """ path = str(path) if path == '' or os.path.exists(path): return path base = os.path.basename(path) # may be a directory or a file dirname = os.path.dirname(path) suffix = '' if not base: # dir ends with a slash? if len(dirname) < len(path): suffix = path[:len(path) - len(dirname)] base = os.path.basename(dirname) dirname = os.path.dirname(dirname) if not os.path.exists(dirname): dirname = _path_insensitive(dirname) if not dirname: return # at this point, the directory exists but not the file try: # we are expecting dirname to be a directory, but it could be a file files = os.listdir(dirname) except OSError: return baselow = base.lower() try: basefinal = next(fl for fl in files if fl.lower() == baselow) except StopIteration: return if basefinal: return os.path.join(dirname, basefinal) + suffix else: return
python
def _path_insensitive(path): path = str(path) if path == '' or os.path.exists(path): return path base = os.path.basename(path) # may be a directory or a file dirname = os.path.dirname(path) suffix = '' if not base: # dir ends with a slash? if len(dirname) < len(path): suffix = path[:len(path) - len(dirname)] base = os.path.basename(dirname) dirname = os.path.dirname(dirname) if not os.path.exists(dirname): dirname = _path_insensitive(dirname) if not dirname: return # at this point, the directory exists but not the file try: # we are expecting dirname to be a directory, but it could be a file files = os.listdir(dirname) except OSError: return baselow = base.lower() try: basefinal = next(fl for fl in files if fl.lower() == baselow) except StopIteration: return if basefinal: return os.path.join(dirname, basefinal) + suffix else: return
[ "def", "_path_insensitive", "(", "path", ")", ":", "path", "=", "str", "(", "path", ")", "if", "path", "==", "''", "or", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "return", "path", "base", "=", "os", ".", "path", ".", "basename", "...
Recursive part of path_insensitive to do the work.
[ "Recursive", "part", "of", "path_insensitive", "to", "do", "the", "work", "." ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_misc.py#L34-L74
22,127
sunt05/SuPy
docs/source/proc_var_info/nml_rst_proc.py
form_option
def form_option(str_opt): '''generate option name based suffix for URL :param str_opt: opt name :type str_opt: str :return: URL suffix for the specified option :rtype: str ''' str_base = '#cmdoption-arg-' str_opt_x = str_base+str_opt.lower()\ .replace('_', '-')\ .replace('(', '-')\ .replace(')', '') return str_opt_x
python
def form_option(str_opt): '''generate option name based suffix for URL :param str_opt: opt name :type str_opt: str :return: URL suffix for the specified option :rtype: str ''' str_base = '#cmdoption-arg-' str_opt_x = str_base+str_opt.lower()\ .replace('_', '-')\ .replace('(', '-')\ .replace(')', '') return str_opt_x
[ "def", "form_option", "(", "str_opt", ")", ":", "str_base", "=", "'#cmdoption-arg-'", "str_opt_x", "=", "str_base", "+", "str_opt", ".", "lower", "(", ")", ".", "replace", "(", "'_'", ",", "'-'", ")", ".", "replace", "(", "'('", ",", "'-'", ")", ".", ...
generate option name based suffix for URL :param str_opt: opt name :type str_opt: str :return: URL suffix for the specified option :rtype: str
[ "generate", "option", "name", "based", "suffix", "for", "URL" ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/docs/source/proc_var_info/nml_rst_proc.py#L83-L97
22,128
sunt05/SuPy
docs/source/proc_var_info/nml_rst_proc.py
gen_url_option
def gen_url_option( str_opt, set_site=set_site, set_runcontrol=set_runcontrol, set_initcond=set_initcond, source='docs'): '''construct a URL for option based on source :param str_opt: option name, defaults to '' :param str_opt: str, optional :param source: URL source: 'docs' for readthedocs.org; 'github' for github repo, defaults to 'docs' :param source: str, optional :return: a valid URL pointing to the option related resources :rtype: urlpath.URL ''' dict_base = { 'docs': URL('https://suews-docs.readthedocs.io/en/latest/input_files/'), 'github': URL('https://github.com/Urban-Meteorology-Reading/SUEWS-Docs/raw/master/docs/source/input_files/'), } url_base = dict_base[source] url_page = choose_page( str_opt, set_site, set_runcontrol, set_initcond, source=source) # print('str_opt', str_opt, url_base, url_page) str_opt_x = form_option(str_opt) url_opt = url_base/(url_page+str_opt_x) return url_opt
python
def gen_url_option( str_opt, set_site=set_site, set_runcontrol=set_runcontrol, set_initcond=set_initcond, source='docs'): '''construct a URL for option based on source :param str_opt: option name, defaults to '' :param str_opt: str, optional :param source: URL source: 'docs' for readthedocs.org; 'github' for github repo, defaults to 'docs' :param source: str, optional :return: a valid URL pointing to the option related resources :rtype: urlpath.URL ''' dict_base = { 'docs': URL('https://suews-docs.readthedocs.io/en/latest/input_files/'), 'github': URL('https://github.com/Urban-Meteorology-Reading/SUEWS-Docs/raw/master/docs/source/input_files/'), } url_base = dict_base[source] url_page = choose_page( str_opt, set_site, set_runcontrol, set_initcond, source=source) # print('str_opt', str_opt, url_base, url_page) str_opt_x = form_option(str_opt) url_opt = url_base/(url_page+str_opt_x) return url_opt
[ "def", "gen_url_option", "(", "str_opt", ",", "set_site", "=", "set_site", ",", "set_runcontrol", "=", "set_runcontrol", ",", "set_initcond", "=", "set_initcond", ",", "source", "=", "'docs'", ")", ":", "dict_base", "=", "{", "'docs'", ":", "URL", "(", "'htt...
construct a URL for option based on source :param str_opt: option name, defaults to '' :param str_opt: str, optional :param source: URL source: 'docs' for readthedocs.org; 'github' for github repo, defaults to 'docs' :param source: str, optional :return: a valid URL pointing to the option related resources :rtype: urlpath.URL
[ "construct", "a", "URL", "for", "option", "based", "on", "source" ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/docs/source/proc_var_info/nml_rst_proc.py#L154-L180
22,129
sunt05/SuPy
docs/source/proc_var_info/gen_df_forcing_output_csv.py
gen_df_forcing
def gen_df_forcing( path_csv_in='SSss_YYYY_data_tt.csv', url_base=url_repo_input,)->pd.DataFrame: '''Generate description info of supy forcing data into a dataframe Parameters ---------- path_csv_in : str, optional path to the input csv file relative to url_base (the default is '/input_files/SSss_YYYY_data_tt.csv']) url_base : urlpath.URL, optional URL to the input files of repo base (the default is url_repo_input, which is defined at the top of this file) Returns ------- pd.DataFrame Description info of supy forcing data ''' try: # load info from SUEWS docs repo # this is regarded as the official source urlpath_table = url_base/path_csv_in df_var_info = pd.read_csv(urlpath_table) except: print(f'{urlpath_table} not existing!') else: # clean info dataframe df_var_forcing = df_var_info.drop(['No.', 'Use'], axis=1) # set index with `Column name` df_var_forcing = df_var_forcing.set_index('Column Name') df_var_forcing.index = df_var_forcing.index\ .map(lambda x: x.replace('`', ''))\ .rename('variable') # add `Second` info df_var_forcing.loc['isec'] = 'Second [S]' return df_var_forcing
python
def gen_df_forcing( path_csv_in='SSss_YYYY_data_tt.csv', url_base=url_repo_input,)->pd.DataFrame: '''Generate description info of supy forcing data into a dataframe Parameters ---------- path_csv_in : str, optional path to the input csv file relative to url_base (the default is '/input_files/SSss_YYYY_data_tt.csv']) url_base : urlpath.URL, optional URL to the input files of repo base (the default is url_repo_input, which is defined at the top of this file) Returns ------- pd.DataFrame Description info of supy forcing data ''' try: # load info from SUEWS docs repo # this is regarded as the official source urlpath_table = url_base/path_csv_in df_var_info = pd.read_csv(urlpath_table) except: print(f'{urlpath_table} not existing!') else: # clean info dataframe df_var_forcing = df_var_info.drop(['No.', 'Use'], axis=1) # set index with `Column name` df_var_forcing = df_var_forcing.set_index('Column Name') df_var_forcing.index = df_var_forcing.index\ .map(lambda x: x.replace('`', ''))\ .rename('variable') # add `Second` info df_var_forcing.loc['isec'] = 'Second [S]' return df_var_forcing
[ "def", "gen_df_forcing", "(", "path_csv_in", "=", "'SSss_YYYY_data_tt.csv'", ",", "url_base", "=", "url_repo_input", ",", ")", "->", "pd", ".", "DataFrame", ":", "try", ":", "# load info from SUEWS docs repo", "# this is regarded as the official source", "urlpath_table", ...
Generate description info of supy forcing data into a dataframe Parameters ---------- path_csv_in : str, optional path to the input csv file relative to url_base (the default is '/input_files/SSss_YYYY_data_tt.csv']) url_base : urlpath.URL, optional URL to the input files of repo base (the default is url_repo_input, which is defined at the top of this file) Returns ------- pd.DataFrame Description info of supy forcing data
[ "Generate", "description", "info", "of", "supy", "forcing", "data", "into", "a", "dataframe" ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/docs/source/proc_var_info/gen_df_forcing_output_csv.py#L38-L76
22,130
sunt05/SuPy
docs/source/proc_var_info/gen_df_forcing_output_csv.py
gen_df_output
def gen_df_output( list_csv_in=[ 'SSss_YYYY_SUEWS_TT.csv', 'SSss_DailyState.csv', 'SSss_YYYY_snow_TT.csv', ], url_base=url_repo_output)->Path: '''Generate description info of supy output results into dataframe Parameters ---------- list_csv_in : list, optional list of file names for csv files with meta info (the default is ['SSss_YYYY_SUEWS_TT.csv','SSss_DailyState.csv','SSss_YYYY_snow_TT.csv',], which [default_description]) url_base : [type], optional URL to the output dir of repo base (the default is url_repo_output, which is defined at the top of this file) Returns ------- pd.DataFrame Description info of supy output results ''' # list of URLs list_url_table = [ url_base/table for table in list_csv_in ] try: df_var_info = pd.concat( [pd.read_csv(f) for f in list_url_table], sort=False) except: for url in list_url_table: if not url.get().ok: print(f'{url} not existing!') else: # clean meta info df_var_info_x = df_var_info\ .set_index('Name')\ .loc[:, ['Description']]\ .drop_duplicates() df_var_output = df_var_info_x\ .copy()\ .assign(lower=df_var_info_x.index.str.lower())\ .reset_index()\ .set_index('lower') df_var_group = df_output_sample.columns.to_frame() df_var_group.index = df_var_group.index.droplevel(0).rename('Name') # wrap into a dataframe df_var_output = df_var_group\ .merge( df_var_output.set_index('Name'), left_on='Name', right_on='Name')\ .rename(columns={ 'var': 'variable', 'group': 'Group', })\ .set_index('variable')\ .drop_duplicates() return df_var_output
python
def gen_df_output( list_csv_in=[ 'SSss_YYYY_SUEWS_TT.csv', 'SSss_DailyState.csv', 'SSss_YYYY_snow_TT.csv', ], url_base=url_repo_output)->Path: '''Generate description info of supy output results into dataframe Parameters ---------- list_csv_in : list, optional list of file names for csv files with meta info (the default is ['SSss_YYYY_SUEWS_TT.csv','SSss_DailyState.csv','SSss_YYYY_snow_TT.csv',], which [default_description]) url_base : [type], optional URL to the output dir of repo base (the default is url_repo_output, which is defined at the top of this file) Returns ------- pd.DataFrame Description info of supy output results ''' # list of URLs list_url_table = [ url_base/table for table in list_csv_in ] try: df_var_info = pd.concat( [pd.read_csv(f) for f in list_url_table], sort=False) except: for url in list_url_table: if not url.get().ok: print(f'{url} not existing!') else: # clean meta info df_var_info_x = df_var_info\ .set_index('Name')\ .loc[:, ['Description']]\ .drop_duplicates() df_var_output = df_var_info_x\ .copy()\ .assign(lower=df_var_info_x.index.str.lower())\ .reset_index()\ .set_index('lower') df_var_group = df_output_sample.columns.to_frame() df_var_group.index = df_var_group.index.droplevel(0).rename('Name') # wrap into a dataframe df_var_output = df_var_group\ .merge( df_var_output.set_index('Name'), left_on='Name', right_on='Name')\ .rename(columns={ 'var': 'variable', 'group': 'Group', })\ .set_index('variable')\ .drop_duplicates() return df_var_output
[ "def", "gen_df_output", "(", "list_csv_in", "=", "[", "'SSss_YYYY_SUEWS_TT.csv'", ",", "'SSss_DailyState.csv'", ",", "'SSss_YYYY_snow_TT.csv'", ",", "]", ",", "url_base", "=", "url_repo_output", ")", "->", "Path", ":", "# list of URLs", "list_url_table", "=", "[", "...
Generate description info of supy output results into dataframe Parameters ---------- list_csv_in : list, optional list of file names for csv files with meta info (the default is ['SSss_YYYY_SUEWS_TT.csv','SSss_DailyState.csv','SSss_YYYY_snow_TT.csv',], which [default_description]) url_base : [type], optional URL to the output dir of repo base (the default is url_repo_output, which is defined at the top of this file) Returns ------- pd.DataFrame Description info of supy output results
[ "Generate", "description", "info", "of", "supy", "output", "results", "into", "dataframe" ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/docs/source/proc_var_info/gen_df_forcing_output_csv.py#L84-L147
22,131
sunt05/SuPy
docs/source/proc_var_info/gen_rst.py
gen_opt_str
def gen_opt_str(ser_rec: pd.Series)->str: '''generate rst option string Parameters ---------- ser_rec : pd.Series record for specifications Returns ------- str rst string ''' name = ser_rec.name indent = r' ' str_opt = f'.. option:: {name}'+'\n\n' for spec in ser_rec.sort_index().index: str_opt += indent+f':{spec}:'+'\n' spec_content = ser_rec[spec] str_opt += indent+indent+f'{spec_content}'+'\n' return str_opt
python
def gen_opt_str(ser_rec: pd.Series)->str: '''generate rst option string Parameters ---------- ser_rec : pd.Series record for specifications Returns ------- str rst string ''' name = ser_rec.name indent = r' ' str_opt = f'.. option:: {name}'+'\n\n' for spec in ser_rec.sort_index().index: str_opt += indent+f':{spec}:'+'\n' spec_content = ser_rec[spec] str_opt += indent+indent+f'{spec_content}'+'\n' return str_opt
[ "def", "gen_opt_str", "(", "ser_rec", ":", "pd", ".", "Series", ")", "->", "str", ":", "name", "=", "ser_rec", ".", "name", "indent", "=", "r' '", "str_opt", "=", "f'.. option:: {name}'", "+", "'\\n\\n'", "for", "spec", "in", "ser_rec", ".", "sort_index...
generate rst option string Parameters ---------- ser_rec : pd.Series record for specifications Returns ------- str rst string
[ "generate", "rst", "option", "string" ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/docs/source/proc_var_info/gen_rst.py#L71-L92
22,132
sunt05/SuPy
src/supy/supy_module.py
init_supy
def init_supy(path_init: str)->pd.DataFrame: '''Initialise supy by loading initial model states. Parameters ---------- path_init : str Path to a file that can initialise SuPy, which can be either of the follows: * SUEWS :ref:`RunControl.nml<suews:RunControl.nml>`: a namelist file for SUEWS configurations * SuPy `df_state.csv`: a CSV file including model states produced by a SuPy run via :py:func:`supy.save_supy` Returns ------- df_state_init: pandas.DataFrame Initial model states. See `df_state_var` for details. Examples -------- 1. Use :ref:`RunControl.nml<suews:RunControl.nml>` to initialise SuPy >>> path_init = "~/SUEWS_sims/RunControl.nml" >>> df_state_init = supy.init_supy(path_init) 2. Use ``df_state.csv`` to initialise SuPy >>> path_init = "~/SuPy_res/df_state_test.csv" >>> df_state_init = supy.init_supy(path_init) ''' try: path_init_x = Path(path_init).expanduser().resolve() except FileNotFoundError: print('{path} does not exists!'.format(path=path_init_x)) else: if path_init_x.suffix == '.nml': # SUEWS `RunControl.nml`: df_state_init = load_InitialCond_grid_df(path_init_x) elif path_init_x.suffix == '.csv': # SuPy `df_state.csv`: df_state_init = load_df_state(path_init_x) else: print('{path} is NOT a valid file to initialise SuPy!'.format( path=path_init_x)) sys.exit() return df_state_init
python
def init_supy(path_init: str)->pd.DataFrame: '''Initialise supy by loading initial model states. Parameters ---------- path_init : str Path to a file that can initialise SuPy, which can be either of the follows: * SUEWS :ref:`RunControl.nml<suews:RunControl.nml>`: a namelist file for SUEWS configurations * SuPy `df_state.csv`: a CSV file including model states produced by a SuPy run via :py:func:`supy.save_supy` Returns ------- df_state_init: pandas.DataFrame Initial model states. See `df_state_var` for details. Examples -------- 1. Use :ref:`RunControl.nml<suews:RunControl.nml>` to initialise SuPy >>> path_init = "~/SUEWS_sims/RunControl.nml" >>> df_state_init = supy.init_supy(path_init) 2. Use ``df_state.csv`` to initialise SuPy >>> path_init = "~/SuPy_res/df_state_test.csv" >>> df_state_init = supy.init_supy(path_init) ''' try: path_init_x = Path(path_init).expanduser().resolve() except FileNotFoundError: print('{path} does not exists!'.format(path=path_init_x)) else: if path_init_x.suffix == '.nml': # SUEWS `RunControl.nml`: df_state_init = load_InitialCond_grid_df(path_init_x) elif path_init_x.suffix == '.csv': # SuPy `df_state.csv`: df_state_init = load_df_state(path_init_x) else: print('{path} is NOT a valid file to initialise SuPy!'.format( path=path_init_x)) sys.exit() return df_state_init
[ "def", "init_supy", "(", "path_init", ":", "str", ")", "->", "pd", ".", "DataFrame", ":", "try", ":", "path_init_x", "=", "Path", "(", "path_init", ")", ".", "expanduser", "(", ")", ".", "resolve", "(", ")", "except", "FileNotFoundError", ":", "print", ...
Initialise supy by loading initial model states. Parameters ---------- path_init : str Path to a file that can initialise SuPy, which can be either of the follows: * SUEWS :ref:`RunControl.nml<suews:RunControl.nml>`: a namelist file for SUEWS configurations * SuPy `df_state.csv`: a CSV file including model states produced by a SuPy run via :py:func:`supy.save_supy` Returns ------- df_state_init: pandas.DataFrame Initial model states. See `df_state_var` for details. Examples -------- 1. Use :ref:`RunControl.nml<suews:RunControl.nml>` to initialise SuPy >>> path_init = "~/SUEWS_sims/RunControl.nml" >>> df_state_init = supy.init_supy(path_init) 2. Use ``df_state.csv`` to initialise SuPy >>> path_init = "~/SuPy_res/df_state_test.csv" >>> df_state_init = supy.init_supy(path_init)
[ "Initialise", "supy", "by", "loading", "initial", "model", "states", "." ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_module.py#L50-L95
22,133
sunt05/SuPy
src/supy/supy_module.py
load_SampleData
def load_SampleData()->Tuple[pandas.DataFrame, pandas.DataFrame]: '''Load sample data for quickly starting a demo run. Returns ------- df_state_init, df_forcing: Tuple[pandas.DataFrame, pandas.DataFrame] - df_state_init: `initial model states <df_state_var>` - df_forcing: `forcing data <df_forcing_var>` Examples -------- >>> df_state_init, df_forcing = supy.load_SampleData() ''' path_SampleData = Path(path_supy_module) / 'sample_run' path_runcontrol = path_SampleData / 'RunControl.nml' df_state_init = init_supy(path_runcontrol) # path_input = path_runcontrol.parent / ser_mod_cfg['fileinputpath'] df_forcing = load_forcing_grid( path_runcontrol, df_state_init.index[0] ) return df_state_init, df_forcing
python
def load_SampleData()->Tuple[pandas.DataFrame, pandas.DataFrame]: '''Load sample data for quickly starting a demo run. Returns ------- df_state_init, df_forcing: Tuple[pandas.DataFrame, pandas.DataFrame] - df_state_init: `initial model states <df_state_var>` - df_forcing: `forcing data <df_forcing_var>` Examples -------- >>> df_state_init, df_forcing = supy.load_SampleData() ''' path_SampleData = Path(path_supy_module) / 'sample_run' path_runcontrol = path_SampleData / 'RunControl.nml' df_state_init = init_supy(path_runcontrol) # path_input = path_runcontrol.parent / ser_mod_cfg['fileinputpath'] df_forcing = load_forcing_grid( path_runcontrol, df_state_init.index[0] ) return df_state_init, df_forcing
[ "def", "load_SampleData", "(", ")", "->", "Tuple", "[", "pandas", ".", "DataFrame", ",", "pandas", ".", "DataFrame", "]", ":", "path_SampleData", "=", "Path", "(", "path_supy_module", ")", "/", "'sample_run'", "path_runcontrol", "=", "path_SampleData", "/", "'...
Load sample data for quickly starting a demo run. Returns ------- df_state_init, df_forcing: Tuple[pandas.DataFrame, pandas.DataFrame] - df_state_init: `initial model states <df_state_var>` - df_forcing: `forcing data <df_forcing_var>` Examples -------- >>> df_state_init, df_forcing = supy.load_SampleData()
[ "Load", "sample", "data", "for", "quickly", "starting", "a", "demo", "run", "." ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_module.py#L218-L242
22,134
sunt05/SuPy
src/supy/supy_module.py
save_supy
def save_supy( df_output: pandas.DataFrame, df_state_final: pandas.DataFrame, freq_s: int = 3600, site: str = '', path_dir_save: str = Path('.'), path_runcontrol: str = None,)->list: '''Save SuPy run results to files Parameters ---------- df_output : pandas.DataFrame DataFrame of output df_state_final : pandas.DataFrame DataFrame of final model states freq_s : int, optional Output frequency in seconds (the default is 3600, which indicates hourly output) site : str, optional Site identifier (the default is '', which indicates site identifier will be left empty) path_dir_save : str, optional Path to directory to saving the files (the default is Path('.'), which indicates the current working directory) path_runcontrol : str, optional Path to SUEWS :ref:`RunControl.nml <suews:RunControl.nml>`, which, if set, will be preferably used to derive `freq_s`, `site` and `path_dir_save`. (the default is None, which is unset) Returns ------- list a list of paths of saved files Examples -------- 1. save results of a supy run to the current working directory with default settings >>> list_path_save = supy.save_supy(df_output, df_state_final) 2. save results according to settings in :ref:`RunControl.nml <suews:RunControl.nml>` >>> list_path_save = supy.save_supy(df_output, df_state_final, path_runcontrol='path/to/RunControl.nml') 3. save results of a supy run at resampling frequency of 1800 s (i.e., half-hourly results) under the site code ``Test`` to a customised location 'path/to/some/dir' >>> list_path_save = supy.save_supy(df_output, df_state_final, freq_s=1800, site='Test', path_dir_save='path/to/some/dir') ''' # get necessary information for saving procedure if path_runcontrol is not None: freq_s, path_dir_save, site = get_save_info(path_runcontrol) # save df_output to several files list_path_save = save_df_output(df_output, freq_s, site, path_dir_save) # save df_state path_state_save = save_df_state(df_state_final, site, path_dir_save) # update list_path_save list_path_save.append(path_state_save) return list_path_save
python
def save_supy( df_output: pandas.DataFrame, df_state_final: pandas.DataFrame, freq_s: int = 3600, site: str = '', path_dir_save: str = Path('.'), path_runcontrol: str = None,)->list: '''Save SuPy run results to files Parameters ---------- df_output : pandas.DataFrame DataFrame of output df_state_final : pandas.DataFrame DataFrame of final model states freq_s : int, optional Output frequency in seconds (the default is 3600, which indicates hourly output) site : str, optional Site identifier (the default is '', which indicates site identifier will be left empty) path_dir_save : str, optional Path to directory to saving the files (the default is Path('.'), which indicates the current working directory) path_runcontrol : str, optional Path to SUEWS :ref:`RunControl.nml <suews:RunControl.nml>`, which, if set, will be preferably used to derive `freq_s`, `site` and `path_dir_save`. (the default is None, which is unset) Returns ------- list a list of paths of saved files Examples -------- 1. save results of a supy run to the current working directory with default settings >>> list_path_save = supy.save_supy(df_output, df_state_final) 2. save results according to settings in :ref:`RunControl.nml <suews:RunControl.nml>` >>> list_path_save = supy.save_supy(df_output, df_state_final, path_runcontrol='path/to/RunControl.nml') 3. save results of a supy run at resampling frequency of 1800 s (i.e., half-hourly results) under the site code ``Test`` to a customised location 'path/to/some/dir' >>> list_path_save = supy.save_supy(df_output, df_state_final, freq_s=1800, site='Test', path_dir_save='path/to/some/dir') ''' # get necessary information for saving procedure if path_runcontrol is not None: freq_s, path_dir_save, site = get_save_info(path_runcontrol) # save df_output to several files list_path_save = save_df_output(df_output, freq_s, site, path_dir_save) # save df_state path_state_save = save_df_state(df_state_final, site, path_dir_save) # update list_path_save list_path_save.append(path_state_save) return list_path_save
[ "def", "save_supy", "(", "df_output", ":", "pandas", ".", "DataFrame", ",", "df_state_final", ":", "pandas", ".", "DataFrame", ",", "freq_s", ":", "int", "=", "3600", ",", "site", ":", "str", "=", "''", ",", "path_dir_save", ":", "str", "=", "Path", "(...
Save SuPy run results to files Parameters ---------- df_output : pandas.DataFrame DataFrame of output df_state_final : pandas.DataFrame DataFrame of final model states freq_s : int, optional Output frequency in seconds (the default is 3600, which indicates hourly output) site : str, optional Site identifier (the default is '', which indicates site identifier will be left empty) path_dir_save : str, optional Path to directory to saving the files (the default is Path('.'), which indicates the current working directory) path_runcontrol : str, optional Path to SUEWS :ref:`RunControl.nml <suews:RunControl.nml>`, which, if set, will be preferably used to derive `freq_s`, `site` and `path_dir_save`. (the default is None, which is unset) Returns ------- list a list of paths of saved files Examples -------- 1. save results of a supy run to the current working directory with default settings >>> list_path_save = supy.save_supy(df_output, df_state_final) 2. save results according to settings in :ref:`RunControl.nml <suews:RunControl.nml>` >>> list_path_save = supy.save_supy(df_output, df_state_final, path_runcontrol='path/to/RunControl.nml') 3. save results of a supy run at resampling frequency of 1800 s (i.e., half-hourly results) under the site code ``Test`` to a customised location 'path/to/some/dir' >>> list_path_save = supy.save_supy(df_output, df_state_final, freq_s=1800, site='Test', path_dir_save='path/to/some/dir')
[ "Save", "SuPy", "run", "results", "to", "files" ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_module.py#L488-L547
22,135
sunt05/SuPy
src/supy/supy_load.py
load_df_state
def load_df_state(path_csv: Path)->pd.DataFrame: '''load `df_state` from `path_csv` Parameters ---------- path_csv : Path path to the csv file that stores `df_state` produced by a supy run Returns ------- pd.DataFrame `df_state` produced by a supy run ''' df_state = pd.read_csv( path_csv, header=[0, 1], index_col=[0, 1], parse_dates=True, infer_datetime_format=True, ) return df_state
python
def load_df_state(path_csv: Path)->pd.DataFrame: '''load `df_state` from `path_csv` Parameters ---------- path_csv : Path path to the csv file that stores `df_state` produced by a supy run Returns ------- pd.DataFrame `df_state` produced by a supy run ''' df_state = pd.read_csv( path_csv, header=[0, 1], index_col=[0, 1], parse_dates=True, infer_datetime_format=True, ) return df_state
[ "def", "load_df_state", "(", "path_csv", ":", "Path", ")", "->", "pd", ".", "DataFrame", ":", "df_state", "=", "pd", ".", "read_csv", "(", "path_csv", ",", "header", "=", "[", "0", ",", "1", "]", ",", "index_col", "=", "[", "0", ",", "1", "]", ",...
load `df_state` from `path_csv` Parameters ---------- path_csv : Path path to the csv file that stores `df_state` produced by a supy run Returns ------- pd.DataFrame `df_state` produced by a supy run
[ "load", "df_state", "from", "path_csv" ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_load.py#L1600-L1621
22,136
sunt05/SuPy
docs/source/proc_var_info/gen_df_state_csv.py
extract_var_suews
def extract_var_suews(dict_var_full: dict, var_supy: str)->list: '''extract related SUEWS variables for a supy variable `var_supy` Parameters ---------- dict_var_full : dict dict_var_full = sp.supy_load.exp_dict_full(sp.supy_load.dict_var2SiteSelect) var_supy : str supy variable name Returns ------- list related SUEWS variables for `var_supy` ''' x = sp.supy_load.flatten_list(dict_var_full[var_supy]) x = np.unique(x) x = [ xx for xx in x if xx not in ['base', 'const', '0.0'] + [str(x) for x in range(24)] ] x = [xx for xx in x if 'Code' not in xx] return x
python
def extract_var_suews(dict_var_full: dict, var_supy: str)->list: '''extract related SUEWS variables for a supy variable `var_supy` Parameters ---------- dict_var_full : dict dict_var_full = sp.supy_load.exp_dict_full(sp.supy_load.dict_var2SiteSelect) var_supy : str supy variable name Returns ------- list related SUEWS variables for `var_supy` ''' x = sp.supy_load.flatten_list(dict_var_full[var_supy]) x = np.unique(x) x = [ xx for xx in x if xx not in ['base', 'const', '0.0'] + [str(x) for x in range(24)] ] x = [xx for xx in x if 'Code' not in xx] return x
[ "def", "extract_var_suews", "(", "dict_var_full", ":", "dict", ",", "var_supy", ":", "str", ")", "->", "list", ":", "x", "=", "sp", ".", "supy_load", ".", "flatten_list", "(", "dict_var_full", "[", "var_supy", "]", ")", "x", "=", "np", ".", "unique", "...
extract related SUEWS variables for a supy variable `var_supy` Parameters ---------- dict_var_full : dict dict_var_full = sp.supy_load.exp_dict_full(sp.supy_load.dict_var2SiteSelect) var_supy : str supy variable name Returns ------- list related SUEWS variables for `var_supy`
[ "extract", "related", "SUEWS", "variables", "for", "a", "supy", "variable", "var_supy" ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/docs/source/proc_var_info/gen_df_state_csv.py#L56-L79
22,137
sunt05/SuPy
docs/source/proc_var_info/gen_df_state_csv.py
gen_df_site
def gen_df_site( list_csv_in=list_table, url_base=url_repo_input_site)->pd.DataFrame: '''Generate description info of supy output results as a dataframe Parameters ---------- path_csv_out : str, optional path to the output csv file (the default is 'df_output.csv') list_csv_in : list, optional list of file names for csv files with meta info (the default is url_repo_input_site, which is defined at the top of this file) url_base : URL, optional URL to the input dir of repo base (the default is url_repo_input, which is defined at the top of this file) Returns ------- pd.DataFrame full path to the output csv file ''' # list of URLs list_url_table = [ url_base/table for table in list_csv_in ] try: df_var_info = pd.concat([pd.read_csv(f) for f in list_url_table]) # df_var_info = pd.concat( # [pd.read_csv(f) for f in list_url_table], # sort=False) except: for url in list_url_table: if not url.get().ok: print(f'{url} not existing!') else: # clean meta info df_var_info_x = df_var_info\ .drop(['No.', 'Use'], axis=1)\ .set_index('Column Name') df_var_info_x.index = df_var_info_x.index.map( lambda x: x.replace('`', '')) # retrieve SUEWS-related variables dict_var_full = sp.supy_load.exp_dict_full( sp.supy_load.dict_var2SiteSelect) dict_var_ref_suews = { k: extract_var_suews(dict_var_full, k) for k in dict_var_full } df_var_ref_suews = pd.DataFrame( {k: ', '.join(dict_var_ref_suews[k]) for k in dict_var_ref_suews}, index=[0]).T.rename({ 0: 'SUEWS-related variables' }, axis=1) # retrive supy variable description dict_var_desc = { k: '\n'.join(df_var_info_x.loc[v].values.flatten()) for k, v in dict_var_ref_suews.items() } df_var_desc = pd.DataFrame(dict_var_desc, index=[0]).T\ .rename(columns={0: 'Description'}) # retrieve variable dimensionality df_var_dim = gen_df_dim(df_init_sample) df_var_site_raw = pd.concat( [df_var_dim, df_var_desc, df_var_ref_suews], axis=1, sort=False) df_var_site = df_var_site_raw.filter(items=set_input, axis=0).dropna() return df_var_site
python
def gen_df_site( list_csv_in=list_table, url_base=url_repo_input_site)->pd.DataFrame: '''Generate description info of supy output results as a dataframe Parameters ---------- path_csv_out : str, optional path to the output csv file (the default is 'df_output.csv') list_csv_in : list, optional list of file names for csv files with meta info (the default is url_repo_input_site, which is defined at the top of this file) url_base : URL, optional URL to the input dir of repo base (the default is url_repo_input, which is defined at the top of this file) Returns ------- pd.DataFrame full path to the output csv file ''' # list of URLs list_url_table = [ url_base/table for table in list_csv_in ] try: df_var_info = pd.concat([pd.read_csv(f) for f in list_url_table]) # df_var_info = pd.concat( # [pd.read_csv(f) for f in list_url_table], # sort=False) except: for url in list_url_table: if not url.get().ok: print(f'{url} not existing!') else: # clean meta info df_var_info_x = df_var_info\ .drop(['No.', 'Use'], axis=1)\ .set_index('Column Name') df_var_info_x.index = df_var_info_x.index.map( lambda x: x.replace('`', '')) # retrieve SUEWS-related variables dict_var_full = sp.supy_load.exp_dict_full( sp.supy_load.dict_var2SiteSelect) dict_var_ref_suews = { k: extract_var_suews(dict_var_full, k) for k in dict_var_full } df_var_ref_suews = pd.DataFrame( {k: ', '.join(dict_var_ref_suews[k]) for k in dict_var_ref_suews}, index=[0]).T.rename({ 0: 'SUEWS-related variables' }, axis=1) # retrive supy variable description dict_var_desc = { k: '\n'.join(df_var_info_x.loc[v].values.flatten()) for k, v in dict_var_ref_suews.items() } df_var_desc = pd.DataFrame(dict_var_desc, index=[0]).T\ .rename(columns={0: 'Description'}) # retrieve variable dimensionality df_var_dim = gen_df_dim(df_init_sample) df_var_site_raw = pd.concat( [df_var_dim, df_var_desc, df_var_ref_suews], axis=1, sort=False) df_var_site = df_var_site_raw.filter(items=set_input, axis=0).dropna() return df_var_site
[ "def", "gen_df_site", "(", "list_csv_in", "=", "list_table", ",", "url_base", "=", "url_repo_input_site", ")", "->", "pd", ".", "DataFrame", ":", "# list of URLs", "list_url_table", "=", "[", "url_base", "/", "table", "for", "table", "in", "list_csv_in", "]", ...
Generate description info of supy output results as a dataframe Parameters ---------- path_csv_out : str, optional path to the output csv file (the default is 'df_output.csv') list_csv_in : list, optional list of file names for csv files with meta info (the default is url_repo_input_site, which is defined at the top of this file) url_base : URL, optional URL to the input dir of repo base (the default is url_repo_input, which is defined at the top of this file) Returns ------- pd.DataFrame full path to the output csv file
[ "Generate", "description", "info", "of", "supy", "output", "results", "as", "a", "dataframe" ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/docs/source/proc_var_info/gen_df_state_csv.py#L105-L178
22,138
sunt05/SuPy
docs/source/proc_var_info/gen_df_state_csv.py
gen_rst_url_split_opts
def gen_rst_url_split_opts(opts_str): """generate option list for RST docs Parameters ---------- opts_str : str a string including all SUEWS related options/variables. e.g. 'SUEWS_a, SUEWS_b' Returns ------- list a list of parsed RST `:ref:` roles. e.g. [':option:`SUEWS_a <suews:SUEWS_a>`'] """ if opts_str is not 'None': list_opts = opts_str.split(',') # list_rst = [gen_rst_url_opt(opt.strip()) for opt in list_opts] list_rst = [opt.strip() for opt in list_opts] # list_rst = [f'`{opt}`' for opt in list_rst] # more properly handle SUEWS options by explicitly adding prefix `suews`: list_rst = [f':option:`{opt} <suews:{opt}>`' for opt in list_rst] list_url_rst = ', '.join(list_rst) else: list_url_rst = 'None' return list_url_rst
python
def gen_rst_url_split_opts(opts_str): if opts_str is not 'None': list_opts = opts_str.split(',') # list_rst = [gen_rst_url_opt(opt.strip()) for opt in list_opts] list_rst = [opt.strip() for opt in list_opts] # list_rst = [f'`{opt}`' for opt in list_rst] # more properly handle SUEWS options by explicitly adding prefix `suews`: list_rst = [f':option:`{opt} <suews:{opt}>`' for opt in list_rst] list_url_rst = ', '.join(list_rst) else: list_url_rst = 'None' return list_url_rst
[ "def", "gen_rst_url_split_opts", "(", "opts_str", ")", ":", "if", "opts_str", "is", "not", "'None'", ":", "list_opts", "=", "opts_str", ".", "split", "(", "','", ")", "# list_rst = [gen_rst_url_opt(opt.strip()) for opt in list_opts]", "list_rst", "=", "[", "opt", "....
generate option list for RST docs Parameters ---------- opts_str : str a string including all SUEWS related options/variables. e.g. 'SUEWS_a, SUEWS_b' Returns ------- list a list of parsed RST `:ref:` roles. e.g. [':option:`SUEWS_a <suews:SUEWS_a>`']
[ "generate", "option", "list", "for", "RST", "docs" ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/docs/source/proc_var_info/gen_df_state_csv.py#L344-L370
22,139
sunt05/SuPy
docs/source/proc_var_info/gen_df_state_csv.py
gen_df_state
def gen_df_state( list_table: list, set_initcond: set, set_runcontrol: set, set_input_runcontrol: set)->pd.DataFrame: '''generate dataframe of all state variables used by supy Parameters ---------- list_table : list csv files for site info: `SUEWS_xx.csv` on github SUEWS-docs repo set_initcond : set initial condition related variables set_runcontrol : set runcontrol related variables set_input_runcontrol : set runcontrol related variables used as supy input Returns ------- pd.DataFrame Description of all state variables used by supy ''' # generate a base df for site characteristics related variables df_var_site = gen_df_site(list_table) # generate a base df for runcontrol related variables df_var_runcontrol = gen_df_runcontrol( set_initcond, set_runcontrol, set_input_runcontrol) # generate a base df for initial condition related variables df_var_initcond = gen_df_initcond(set_initcond, set_runcontrol) # further processing by modifying several entries df_var_state = proc_df_state( df_var_site, df_var_runcontrol, df_var_initcond) # reorganising the result: df_var_state = df_var_state.sort_index() # delete duplicates while considering the variable name (stored as index) df_var_state = df_var_state.reset_index() df_var_state = df_var_state.drop_duplicates() # convert index back df_var_state = df_var_state.set_index('variable') return df_var_state
python
def gen_df_state( list_table: list, set_initcond: set, set_runcontrol: set, set_input_runcontrol: set)->pd.DataFrame: '''generate dataframe of all state variables used by supy Parameters ---------- list_table : list csv files for site info: `SUEWS_xx.csv` on github SUEWS-docs repo set_initcond : set initial condition related variables set_runcontrol : set runcontrol related variables set_input_runcontrol : set runcontrol related variables used as supy input Returns ------- pd.DataFrame Description of all state variables used by supy ''' # generate a base df for site characteristics related variables df_var_site = gen_df_site(list_table) # generate a base df for runcontrol related variables df_var_runcontrol = gen_df_runcontrol( set_initcond, set_runcontrol, set_input_runcontrol) # generate a base df for initial condition related variables df_var_initcond = gen_df_initcond(set_initcond, set_runcontrol) # further processing by modifying several entries df_var_state = proc_df_state( df_var_site, df_var_runcontrol, df_var_initcond) # reorganising the result: df_var_state = df_var_state.sort_index() # delete duplicates while considering the variable name (stored as index) df_var_state = df_var_state.reset_index() df_var_state = df_var_state.drop_duplicates() # convert index back df_var_state = df_var_state.set_index('variable') return df_var_state
[ "def", "gen_df_state", "(", "list_table", ":", "list", ",", "set_initcond", ":", "set", ",", "set_runcontrol", ":", "set", ",", "set_input_runcontrol", ":", "set", ")", "->", "pd", ".", "DataFrame", ":", "# generate a base df for site characteristics related variables...
generate dataframe of all state variables used by supy Parameters ---------- list_table : list csv files for site info: `SUEWS_xx.csv` on github SUEWS-docs repo set_initcond : set initial condition related variables set_runcontrol : set runcontrol related variables set_input_runcontrol : set runcontrol related variables used as supy input Returns ------- pd.DataFrame Description of all state variables used by supy
[ "generate", "dataframe", "of", "all", "state", "variables", "used", "by", "supy" ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/docs/source/proc_var_info/gen_df_state_csv.py#L510-L552
22,140
sunt05/SuPy
src/supy/supy_save.py
gen_df_save
def gen_df_save(df_grid_group: pd.DataFrame)->pd.DataFrame: '''generate a dataframe for saving Parameters ---------- df_output_grid_group : pd.DataFrame an output dataframe of a single group and grid Returns ------- pd.DataFrame a dataframe with date time info prepended for saving ''' # generate df_datetime for prepending idx_dt = df_grid_group.index ser_year = pd.Series(idx_dt.year, index=idx_dt, name='Year') ser_DOY = pd.Series(idx_dt.dayofyear, index=idx_dt, name='DOY') ser_hour = pd.Series(idx_dt.hour, index=idx_dt, name='Hour') ser_min = pd.Series(idx_dt.minute, index=idx_dt, name='Min') df_datetime = pd.concat([ ser_year, ser_DOY, ser_hour, ser_min, ], axis=1) df_datetime['Dectime'] = ser_DOY-1+idx_dt.to_perioddelta( 'd').total_seconds()/(24*60*60) df_save = pd.concat([df_datetime, df_grid_group], axis=1) return df_save
python
def gen_df_save(df_grid_group: pd.DataFrame)->pd.DataFrame: '''generate a dataframe for saving Parameters ---------- df_output_grid_group : pd.DataFrame an output dataframe of a single group and grid Returns ------- pd.DataFrame a dataframe with date time info prepended for saving ''' # generate df_datetime for prepending idx_dt = df_grid_group.index ser_year = pd.Series(idx_dt.year, index=idx_dt, name='Year') ser_DOY = pd.Series(idx_dt.dayofyear, index=idx_dt, name='DOY') ser_hour = pd.Series(idx_dt.hour, index=idx_dt, name='Hour') ser_min = pd.Series(idx_dt.minute, index=idx_dt, name='Min') df_datetime = pd.concat([ ser_year, ser_DOY, ser_hour, ser_min, ], axis=1) df_datetime['Dectime'] = ser_DOY-1+idx_dt.to_perioddelta( 'd').total_seconds()/(24*60*60) df_save = pd.concat([df_datetime, df_grid_group], axis=1) return df_save
[ "def", "gen_df_save", "(", "df_grid_group", ":", "pd", ".", "DataFrame", ")", "->", "pd", ".", "DataFrame", ":", "# generate df_datetime for prepending", "idx_dt", "=", "df_grid_group", ".", "index", "ser_year", "=", "pd", ".", "Series", "(", "idx_dt", ".", "y...
generate a dataframe for saving Parameters ---------- df_output_grid_group : pd.DataFrame an output dataframe of a single group and grid Returns ------- pd.DataFrame a dataframe with date time info prepended for saving
[ "generate", "a", "dataframe", "for", "saving" ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_save.py#L12-L40
22,141
sunt05/SuPy
src/supy/supy_save.py
save_df_output
def save_df_output( df_output: pd.DataFrame, freq_s: int = 3600, site: str = '', path_dir_save: Path = Path('.'),)->list: '''save supy output dataframe to txt files Parameters ---------- df_output : pd.DataFrame output dataframe of supy simulation freq_s : int, optional output frequency in second (the default is 3600, which indicates the a txt with hourly values) path_dir_save : Path, optional directory to save txt files (the default is '.', which the current working directory) site : str, optional site code used for filename (the default is '', which indicates no site name prepended to the filename) path_runcontrol : str or anything that can be parsed as `Path`, optional path to SUEWS 'RunControl.nml' file (the default is None, which indicates necessary saving options should be specified via other parameters) Returns ------- list a list of `Path` objects for saved txt files ''' list_path_save = [] list_group = df_output.columns.get_level_values('group').unique() list_grid = df_output.index.get_level_values('grid').unique() for grid in list_grid: for group in list_group: df_output_grid_group = df_output\ .loc[grid, group]\ .dropna(how='all', axis=0) # save output at the runtime frequency (usually 5 min) # 'DailyState' group will be save a daily frequency path_save = save_df_grid_group( df_output_grid_group, grid, group, site=site, dir_save=path_dir_save) list_path_save.append(path_save) # resample output if freq_s is different from runtime freq (usually 5 min) freq_save = pd.Timedelta(freq_s, 's') # resample `df_output` at `freq_save` df_rsmp = resample_output(df_output, freq_save) # 'DailyState' group will be dropped in `resample_output` as resampling is not needed df_rsmp = df_rsmp.drop(columns='DailyState') list_group = df_rsmp.columns.get_level_values('group').unique() list_grid = df_rsmp.index.get_level_values('grid').unique() # save output at the resampling frequency for grid in list_grid: for group in list_group: df_output_grid_group = df_rsmp.loc[grid, group] path_save = save_df_grid_group( df_output_grid_group, grid, group, site=site, dir_save=path_dir_save) list_path_save.append(path_save) return list_path_save
python
def save_df_output( df_output: pd.DataFrame, freq_s: int = 3600, site: str = '', path_dir_save: Path = Path('.'),)->list: '''save supy output dataframe to txt files Parameters ---------- df_output : pd.DataFrame output dataframe of supy simulation freq_s : int, optional output frequency in second (the default is 3600, which indicates the a txt with hourly values) path_dir_save : Path, optional directory to save txt files (the default is '.', which the current working directory) site : str, optional site code used for filename (the default is '', which indicates no site name prepended to the filename) path_runcontrol : str or anything that can be parsed as `Path`, optional path to SUEWS 'RunControl.nml' file (the default is None, which indicates necessary saving options should be specified via other parameters) Returns ------- list a list of `Path` objects for saved txt files ''' list_path_save = [] list_group = df_output.columns.get_level_values('group').unique() list_grid = df_output.index.get_level_values('grid').unique() for grid in list_grid: for group in list_group: df_output_grid_group = df_output\ .loc[grid, group]\ .dropna(how='all', axis=0) # save output at the runtime frequency (usually 5 min) # 'DailyState' group will be save a daily frequency path_save = save_df_grid_group( df_output_grid_group, grid, group, site=site, dir_save=path_dir_save) list_path_save.append(path_save) # resample output if freq_s is different from runtime freq (usually 5 min) freq_save = pd.Timedelta(freq_s, 's') # resample `df_output` at `freq_save` df_rsmp = resample_output(df_output, freq_save) # 'DailyState' group will be dropped in `resample_output` as resampling is not needed df_rsmp = df_rsmp.drop(columns='DailyState') list_group = df_rsmp.columns.get_level_values('group').unique() list_grid = df_rsmp.index.get_level_values('grid').unique() # save output at the resampling frequency for grid in list_grid: for group in list_group: df_output_grid_group = df_rsmp.loc[grid, group] path_save = save_df_grid_group( df_output_grid_group, grid, group, site=site, dir_save=path_dir_save) list_path_save.append(path_save) return list_path_save
[ "def", "save_df_output", "(", "df_output", ":", "pd", ".", "DataFrame", ",", "freq_s", ":", "int", "=", "3600", ",", "site", ":", "str", "=", "''", ",", "path_dir_save", ":", "Path", "=", "Path", "(", "'.'", ")", ",", ")", "->", "list", ":", "list_...
save supy output dataframe to txt files Parameters ---------- df_output : pd.DataFrame output dataframe of supy simulation freq_s : int, optional output frequency in second (the default is 3600, which indicates the a txt with hourly values) path_dir_save : Path, optional directory to save txt files (the default is '.', which the current working directory) site : str, optional site code used for filename (the default is '', which indicates no site name prepended to the filename) path_runcontrol : str or anything that can be parsed as `Path`, optional path to SUEWS 'RunControl.nml' file (the default is None, which indicates necessary saving options should be specified via other parameters) Returns ------- list a list of `Path` objects for saved txt files
[ "save", "supy", "output", "dataframe", "to", "txt", "files" ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_save.py#L130-L189
22,142
sunt05/SuPy
src/supy/supy_save.py
save_df_state
def save_df_state( df_state: pd.DataFrame, site: str = '', path_dir_save: Path = Path('.'),)->Path: '''save `df_state` to a csv file Parameters ---------- df_state : pd.DataFrame a dataframe of model states produced by a supy run site : str, optional site identifier (the default is '', which indicates an empty site code) path_dir_save : Path, optional path to directory to save results (the default is Path('.'), which the current working directory) Returns ------- Path path to the saved csv file ''' file_state_save = 'df_state_{site}.csv'.format(site=site) # trim filename if site == '' file_state_save = file_state_save.replace('_.csv', '.csv') path_state_save = path_dir_save/file_state_save print('writing out: {path_out}'.format(path_out=path_state_save)) df_state.to_csv(path_state_save) return path_state_save
python
def save_df_state( df_state: pd.DataFrame, site: str = '', path_dir_save: Path = Path('.'),)->Path: '''save `df_state` to a csv file Parameters ---------- df_state : pd.DataFrame a dataframe of model states produced by a supy run site : str, optional site identifier (the default is '', which indicates an empty site code) path_dir_save : Path, optional path to directory to save results (the default is Path('.'), which the current working directory) Returns ------- Path path to the saved csv file ''' file_state_save = 'df_state_{site}.csv'.format(site=site) # trim filename if site == '' file_state_save = file_state_save.replace('_.csv', '.csv') path_state_save = path_dir_save/file_state_save print('writing out: {path_out}'.format(path_out=path_state_save)) df_state.to_csv(path_state_save) return path_state_save
[ "def", "save_df_state", "(", "df_state", ":", "pd", ".", "DataFrame", ",", "site", ":", "str", "=", "''", ",", "path_dir_save", ":", "Path", "=", "Path", "(", "'.'", ")", ",", ")", "->", "Path", ":", "file_state_save", "=", "'df_state_{site}.csv'", ".", ...
save `df_state` to a csv file Parameters ---------- df_state : pd.DataFrame a dataframe of model states produced by a supy run site : str, optional site identifier (the default is '', which indicates an empty site code) path_dir_save : Path, optional path to directory to save results (the default is Path('.'), which the current working directory) Returns ------- Path path to the saved csv file
[ "save", "df_state", "to", "a", "csv", "file" ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_save.py#L194-L221
22,143
sunt05/SuPy
src/supy/supy_util.py
gen_FS_DF
def gen_FS_DF(df_output): """generate DataFrame of scores. Parameters ---------- df_WS_data : type Description of parameter `df_WS_data`. Returns ------- type Description of returned object. """ df_day = pd.pivot_table( df_output, values=['T2', 'U10', 'Kdown', 'RH2'], index=['Year', 'Month', 'Day'], aggfunc=[min, max, np.mean, ]) df_day_all_year = pd.pivot_table( df_output, values=['T2', 'U10', 'Kdown', 'RH2'], index=['Month', 'Day'], aggfunc=[min, max, np.mean, ]) array_yr_mon = df_day.index.droplevel( 'Day').to_frame().drop_duplicates().values df_fs = pd.DataFrame( {(yr, mon): (df_day.loc[(yr, mon)].apply(gen_score_ser) - df_day_all_year.loc[mon].apply(gen_score_ser)).abs().mean() for yr, mon in array_yr_mon}) return df_fs
python
def gen_FS_DF(df_output): df_day = pd.pivot_table( df_output, values=['T2', 'U10', 'Kdown', 'RH2'], index=['Year', 'Month', 'Day'], aggfunc=[min, max, np.mean, ]) df_day_all_year = pd.pivot_table( df_output, values=['T2', 'U10', 'Kdown', 'RH2'], index=['Month', 'Day'], aggfunc=[min, max, np.mean, ]) array_yr_mon = df_day.index.droplevel( 'Day').to_frame().drop_duplicates().values df_fs = pd.DataFrame( {(yr, mon): (df_day.loc[(yr, mon)].apply(gen_score_ser) - df_day_all_year.loc[mon].apply(gen_score_ser)).abs().mean() for yr, mon in array_yr_mon}) return df_fs
[ "def", "gen_FS_DF", "(", "df_output", ")", ":", "df_day", "=", "pd", ".", "pivot_table", "(", "df_output", ",", "values", "=", "[", "'T2'", ",", "'U10'", ",", "'Kdown'", ",", "'RH2'", "]", ",", "index", "=", "[", "'Year'", ",", "'Month'", ",", "'Day'...
generate DataFrame of scores. Parameters ---------- df_WS_data : type Description of parameter `df_WS_data`. Returns ------- type Description of returned object.
[ "generate", "DataFrame", "of", "scores", "." ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_util.py#L140-L174
22,144
sunt05/SuPy
src/supy/supy_util.py
gen_WS_DF
def gen_WS_DF(df_WS_data): """generate DataFrame of weighted sums. Parameters ---------- df_WS_data : type Description of parameter `df_WS_data`. Returns ------- type Description of returned object. """ df_fs = gen_FS_DF(df_WS_data) list_index = [('mean', 'T2'), ('max', 'T2'), ('min', 'T2'), ('mean', 'U10'), ('max', 'U10'), ('min', 'U10'), ('mean', 'RH2'), ('max', 'RH2'), ('min', 'RH2'), ('mean', 'Kdown')] list_const = [getattr(const, attr) for attr in ['T_MEAN', 'T_MAX', 'T_MIN', 'WIND_MEAN', 'WIND_MAX', 'WIND_MIN', 'RH_MEAN', 'RH_MAX', 'RH_MIN', 'SOLAR_RADIATION_GLOBAL']] list_ws = [df_fs.loc[idx] * cst for idx, cst in zip(list_index, list_const)] df_ws = pd.concat(list_ws, axis=1).sum(axis=1).unstack().dropna() return df_ws
python
def gen_WS_DF(df_WS_data): df_fs = gen_FS_DF(df_WS_data) list_index = [('mean', 'T2'), ('max', 'T2'), ('min', 'T2'), ('mean', 'U10'), ('max', 'U10'), ('min', 'U10'), ('mean', 'RH2'), ('max', 'RH2'), ('min', 'RH2'), ('mean', 'Kdown')] list_const = [getattr(const, attr) for attr in ['T_MEAN', 'T_MAX', 'T_MIN', 'WIND_MEAN', 'WIND_MAX', 'WIND_MIN', 'RH_MEAN', 'RH_MAX', 'RH_MIN', 'SOLAR_RADIATION_GLOBAL']] list_ws = [df_fs.loc[idx] * cst for idx, cst in zip(list_index, list_const)] df_ws = pd.concat(list_ws, axis=1).sum(axis=1).unstack().dropna() return df_ws
[ "def", "gen_WS_DF", "(", "df_WS_data", ")", ":", "df_fs", "=", "gen_FS_DF", "(", "df_WS_data", ")", "list_index", "=", "[", "(", "'mean'", ",", "'T2'", ")", ",", "(", "'max'", ",", "'T2'", ")", ",", "(", "'min'", ",", "'T2'", ")", ",", "(", "'mean'...
generate DataFrame of weighted sums. Parameters ---------- df_WS_data : type Description of parameter `df_WS_data`. Returns ------- type Description of returned object.
[ "generate", "DataFrame", "of", "weighted", "sums", "." ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_util.py#L177-L208
22,145
sunt05/SuPy
src/supy/supy_util.py
_geoid_radius
def _geoid_radius(latitude: float) -> float: """Calculates the GEOID radius at a given latitude Parameters ---------- latitude : float Latitude (degrees) Returns ------- R : float GEOID Radius (meters) """ lat = deg2rad(latitude) return sqrt(1/(cos(lat) ** 2 / Rmax_WGS84 ** 2 + sin(lat) ** 2 / Rmin_WGS84 ** 2))
python
def _geoid_radius(latitude: float) -> float: lat = deg2rad(latitude) return sqrt(1/(cos(lat) ** 2 / Rmax_WGS84 ** 2 + sin(lat) ** 2 / Rmin_WGS84 ** 2))
[ "def", "_geoid_radius", "(", "latitude", ":", "float", ")", "->", "float", ":", "lat", "=", "deg2rad", "(", "latitude", ")", "return", "sqrt", "(", "1", "/", "(", "cos", "(", "lat", ")", "**", "2", "/", "Rmax_WGS84", "**", "2", "+", "sin", "(", "...
Calculates the GEOID radius at a given latitude Parameters ---------- latitude : float Latitude (degrees) Returns ------- R : float GEOID Radius (meters)
[ "Calculates", "the", "GEOID", "radius", "at", "a", "given", "latitude" ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_util.py#L518-L532
22,146
sunt05/SuPy
src/supy/supy_util.py
geometric2geopotential
def geometric2geopotential(z: float, latitude: float) -> float: """Converts geometric height to geopoential height Parameters ---------- z : float Geometric height (meters) latitude : float Latitude (degrees) Returns ------- h : float Geopotential Height (meters) above the reference ellipsoid """ twolat = deg2rad(2 * latitude) g = 9.80616 * (1 - 0.002637*cos(twolat) + 0.0000059*cos(twolat)**2) re = _geoid_radius(latitude) return z * g * re / (re + z)
python
def geometric2geopotential(z: float, latitude: float) -> float: twolat = deg2rad(2 * latitude) g = 9.80616 * (1 - 0.002637*cos(twolat) + 0.0000059*cos(twolat)**2) re = _geoid_radius(latitude) return z * g * re / (re + z)
[ "def", "geometric2geopotential", "(", "z", ":", "float", ",", "latitude", ":", "float", ")", "->", "float", ":", "twolat", "=", "deg2rad", "(", "2", "*", "latitude", ")", "g", "=", "9.80616", "*", "(", "1", "-", "0.002637", "*", "cos", "(", "twolat",...
Converts geometric height to geopoential height Parameters ---------- z : float Geometric height (meters) latitude : float Latitude (degrees) Returns ------- h : float Geopotential Height (meters) above the reference ellipsoid
[ "Converts", "geometric", "height", "to", "geopoential", "height" ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_util.py#L535-L553
22,147
sunt05/SuPy
src/supy/supy_util.py
geopotential2geometric
def geopotential2geometric(h: float, latitude: float) -> float: """Converts geopoential height to geometric height Parameters ---------- h : float Geopotential height (meters) latitude : float Latitude (degrees) Returns ------- z : float Geometric Height (meters) above the reference ellipsoid """ twolat = deg2rad(2 * latitude) g = 9.80616 * (1 - 0.002637*cos(twolat) + 0.0000059*cos(twolat)**2) re = _geoid_radius(latitude) return h * re / (g * re - h)
python
def geopotential2geometric(h: float, latitude: float) -> float: twolat = deg2rad(2 * latitude) g = 9.80616 * (1 - 0.002637*cos(twolat) + 0.0000059*cos(twolat)**2) re = _geoid_radius(latitude) return h * re / (g * re - h)
[ "def", "geopotential2geometric", "(", "h", ":", "float", ",", "latitude", ":", "float", ")", "->", "float", ":", "twolat", "=", "deg2rad", "(", "2", "*", "latitude", ")", "g", "=", "9.80616", "*", "(", "1", "-", "0.002637", "*", "cos", "(", "twolat",...
Converts geopoential height to geometric height Parameters ---------- h : float Geopotential height (meters) latitude : float Latitude (degrees) Returns ------- z : float Geometric Height (meters) above the reference ellipsoid
[ "Converts", "geopoential", "height", "to", "geometric", "height" ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_util.py#L556-L574
22,148
sunt05/SuPy
src/supy/supy_util.py
get_ser_val_alt
def get_ser_val_alt(lat: float, lon: float, da_alt_x: xr.DataArray, da_alt: xr.DataArray, da_val: xr.DataArray)->pd.Series: '''interpolate atmospheric variable to a specified altitude Parameters ---------- lat : float latitude of specified site lon : float longitude of specified site da_alt_x : xr.DataArray desired altitude to interpolate variable at da_alt : xr.DataArray altitude associated with `da_val`: variable array to interpolate da_val : xr.DataArray atmospheric varialble to interpolate Returns ------- pd.Series interpolated values at the specified altitude of site positioned by [`lat`, `lon`] ''' alt_t_1d = da_alt.sel( latitude=lat, longitude=lon, method='nearest') val_t_1d = da_val.sel( latitude=lat, longitude=lon, method='nearest') alt_x = da_alt_x.sel( latitude=lat, longitude=lon, method='nearest')[0] val_alt = np.array( [interp1d(alt_1d, val_1d)(alt_x) for alt_1d, val_1d in zip(alt_t_1d, val_t_1d)]) ser_alt = pd.Series( val_alt, index=da_val.time.values, name=da_val.name, ) return ser_alt
python
def get_ser_val_alt(lat: float, lon: float, da_alt_x: xr.DataArray, da_alt: xr.DataArray, da_val: xr.DataArray)->pd.Series: '''interpolate atmospheric variable to a specified altitude Parameters ---------- lat : float latitude of specified site lon : float longitude of specified site da_alt_x : xr.DataArray desired altitude to interpolate variable at da_alt : xr.DataArray altitude associated with `da_val`: variable array to interpolate da_val : xr.DataArray atmospheric varialble to interpolate Returns ------- pd.Series interpolated values at the specified altitude of site positioned by [`lat`, `lon`] ''' alt_t_1d = da_alt.sel( latitude=lat, longitude=lon, method='nearest') val_t_1d = da_val.sel( latitude=lat, longitude=lon, method='nearest') alt_x = da_alt_x.sel( latitude=lat, longitude=lon, method='nearest')[0] val_alt = np.array( [interp1d(alt_1d, val_1d)(alt_x) for alt_1d, val_1d in zip(alt_t_1d, val_t_1d)]) ser_alt = pd.Series( val_alt, index=da_val.time.values, name=da_val.name, ) return ser_alt
[ "def", "get_ser_val_alt", "(", "lat", ":", "float", ",", "lon", ":", "float", ",", "da_alt_x", ":", "xr", ".", "DataArray", ",", "da_alt", ":", "xr", ".", "DataArray", ",", "da_val", ":", "xr", ".", "DataArray", ")", "->", "pd", ".", "Series", ":", ...
interpolate atmospheric variable to a specified altitude Parameters ---------- lat : float latitude of specified site lon : float longitude of specified site da_alt_x : xr.DataArray desired altitude to interpolate variable at da_alt : xr.DataArray altitude associated with `da_val`: variable array to interpolate da_val : xr.DataArray atmospheric varialble to interpolate Returns ------- pd.Series interpolated values at the specified altitude of site positioned by [`lat`, `lon`]
[ "interpolate", "atmospheric", "variable", "to", "a", "specified", "altitude" ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_util.py#L578-L617
22,149
sunt05/SuPy
src/supy/supy_util.py
get_df_val_alt
def get_df_val_alt(lat: float, lon: float, da_alt_meas: xr.DataArray, ds_val: xr.Dataset): '''interpolate atmospheric variables to a specified altitude Parameters ---------- lat : float latitude of specified site lon : float longitude of specified site da_alt_x : xr.DataArray desired altitude to interpolate variable at da_alt : xr.DataArray altitude associated with `da_val`: variable array to interpolate da_val : xr.DataArray atmospheric varialble to interpolate Returns ------- pd.DataFrame interpolated values at the specified altitude of site positioned by [`lat`, `lon`] ''' da_alt = geopotential2geometric(ds_val.z, ds_val.latitude) # generate pressure series for grid x da_alt_x = da_alt.sel( latitude=lat, longitude=lon, method='nearest') alt_meas_x = da_alt_meas.sel( latitude=lat, longitude=lon, method='nearest')[0] val_pres = np.array([interp1d(alt, da_alt_x.level)(alt_meas_x) for alt in da_alt_x]) df_val_alt = pd.concat( [get_ser_val_alt( lat, lon, da_alt_meas, da_alt, ds_val[var]) for var in ds_val.data_vars], axis=1 ) # add pressure df_val_alt['p'] = val_pres df_val_alt.index = df_val_alt.index.set_names('time') df_val_alt.columns = df_val_alt.columns.set_names('var') return df_val_alt
python
def get_df_val_alt(lat: float, lon: float, da_alt_meas: xr.DataArray, ds_val: xr.Dataset): '''interpolate atmospheric variables to a specified altitude Parameters ---------- lat : float latitude of specified site lon : float longitude of specified site da_alt_x : xr.DataArray desired altitude to interpolate variable at da_alt : xr.DataArray altitude associated with `da_val`: variable array to interpolate da_val : xr.DataArray atmospheric varialble to interpolate Returns ------- pd.DataFrame interpolated values at the specified altitude of site positioned by [`lat`, `lon`] ''' da_alt = geopotential2geometric(ds_val.z, ds_val.latitude) # generate pressure series for grid x da_alt_x = da_alt.sel( latitude=lat, longitude=lon, method='nearest') alt_meas_x = da_alt_meas.sel( latitude=lat, longitude=lon, method='nearest')[0] val_pres = np.array([interp1d(alt, da_alt_x.level)(alt_meas_x) for alt in da_alt_x]) df_val_alt = pd.concat( [get_ser_val_alt( lat, lon, da_alt_meas, da_alt, ds_val[var]) for var in ds_val.data_vars], axis=1 ) # add pressure df_val_alt['p'] = val_pres df_val_alt.index = df_val_alt.index.set_names('time') df_val_alt.columns = df_val_alt.columns.set_names('var') return df_val_alt
[ "def", "get_df_val_alt", "(", "lat", ":", "float", ",", "lon", ":", "float", ",", "da_alt_meas", ":", "xr", ".", "DataArray", ",", "ds_val", ":", "xr", ".", "Dataset", ")", ":", "da_alt", "=", "geopotential2geometric", "(", "ds_val", ".", "z", ",", "ds...
interpolate atmospheric variables to a specified altitude Parameters ---------- lat : float latitude of specified site lon : float longitude of specified site da_alt_x : xr.DataArray desired altitude to interpolate variable at da_alt : xr.DataArray altitude associated with `da_val`: variable array to interpolate da_val : xr.DataArray atmospheric varialble to interpolate Returns ------- pd.DataFrame interpolated values at the specified altitude of site positioned by [`lat`, `lon`]
[ "interpolate", "atmospheric", "variables", "to", "a", "specified", "altitude" ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_util.py#L620-L661
22,150
sunt05/SuPy
src/supy/supy_util.py
sel_list_pres
def sel_list_pres(ds_sfc_x): ''' select proper levels for model level data download ''' p_min, p_max = ds_sfc_x.sp.min().values, ds_sfc_x.sp.max().values list_pres_level = [ '1', '2', '3', '5', '7', '10', '20', '30', '50', '70', '100', '125', '150', '175', '200', '225', '250', '300', '350', '400', '450', '500', '550', '600', '650', '700', '750', '775', '800', '825', '850', '875', '900', '925', '950', '975', '1000', ] ser_pres_level = pd.Series(list_pres_level).map(int)*100 pos_lev_max, pos_lev_min = ( ser_pres_level[ser_pres_level > p_max].idxmin(), ser_pres_level[ser_pres_level < p_min].idxmax() ) list_pres_sel = ser_pres_level.loc[pos_lev_min:pos_lev_max]/100 list_pres_sel = list_pres_sel.map(int).map(str).to_list() return list_pres_sel
python
def sel_list_pres(ds_sfc_x): ''' select proper levels for model level data download ''' p_min, p_max = ds_sfc_x.sp.min().values, ds_sfc_x.sp.max().values list_pres_level = [ '1', '2', '3', '5', '7', '10', '20', '30', '50', '70', '100', '125', '150', '175', '200', '225', '250', '300', '350', '400', '450', '500', '550', '600', '650', '700', '750', '775', '800', '825', '850', '875', '900', '925', '950', '975', '1000', ] ser_pres_level = pd.Series(list_pres_level).map(int)*100 pos_lev_max, pos_lev_min = ( ser_pres_level[ser_pres_level > p_max].idxmin(), ser_pres_level[ser_pres_level < p_min].idxmax() ) list_pres_sel = ser_pres_level.loc[pos_lev_min:pos_lev_max]/100 list_pres_sel = list_pres_sel.map(int).map(str).to_list() return list_pres_sel
[ "def", "sel_list_pres", "(", "ds_sfc_x", ")", ":", "p_min", ",", "p_max", "=", "ds_sfc_x", ".", "sp", ".", "min", "(", ")", ".", "values", ",", "ds_sfc_x", ".", "sp", ".", "max", "(", ")", ".", "values", "list_pres_level", "=", "[", "'1'", ",", "'2...
select proper levels for model level data download
[ "select", "proper", "levels", "for", "model", "level", "data", "download" ]
47178bd5aee50a059414e3e504940662fbfae0dc
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_util.py#L811-L838
22,151
ecell/ecell4
ecell4/util/simulation.py
load_world
def load_world(filename): """ Load a world from the given HDF5 filename. The return type is determined by ``ecell4_base.core.load_version_information``. Parameters ---------- filename : str A HDF5 filename. Returns ------- w : World Return one from ``BDWorld``, ``EGFRDWorld``, ``MesoscopicWorld``, ``ODEWorld``, ``GillespieWorld`` and ``SpatiocyteWorld``. """ import ecell4_base vinfo = ecell4_base.core.load_version_information(filename) if vinfo.startswith("ecell4-bd"): return ecell4_base.bd.World(filename) elif vinfo.startswith("ecell4-egfrd"): return ecell4_base.egfrd.World(filename) elif vinfo.startswith("ecell4-meso"): return ecell4_base.meso.World(filename) elif vinfo.startswith("ecell4-ode"): return ecell4_base.ode.World(filename) elif vinfo.startswith("ecell4-gillespie"): return ecell4_base.gillespie.World(filename) elif vinfo.startswith("ecell4-spatiocyte"): return ecell4_base.spatiocyte.World(filename) elif vinfo == "": raise RuntimeError("No version information was found in [{0}]".format(filename)) raise RuntimeError("Unkown version information [{0}]".format(vinfo))
python
def load_world(filename): import ecell4_base vinfo = ecell4_base.core.load_version_information(filename) if vinfo.startswith("ecell4-bd"): return ecell4_base.bd.World(filename) elif vinfo.startswith("ecell4-egfrd"): return ecell4_base.egfrd.World(filename) elif vinfo.startswith("ecell4-meso"): return ecell4_base.meso.World(filename) elif vinfo.startswith("ecell4-ode"): return ecell4_base.ode.World(filename) elif vinfo.startswith("ecell4-gillespie"): return ecell4_base.gillespie.World(filename) elif vinfo.startswith("ecell4-spatiocyte"): return ecell4_base.spatiocyte.World(filename) elif vinfo == "": raise RuntimeError("No version information was found in [{0}]".format(filename)) raise RuntimeError("Unkown version information [{0}]".format(vinfo))
[ "def", "load_world", "(", "filename", ")", ":", "import", "ecell4_base", "vinfo", "=", "ecell4_base", ".", "core", ".", "load_version_information", "(", "filename", ")", "if", "vinfo", ".", "startswith", "(", "\"ecell4-bd\"", ")", ":", "return", "ecell4_base", ...
Load a world from the given HDF5 filename. The return type is determined by ``ecell4_base.core.load_version_information``. Parameters ---------- filename : str A HDF5 filename. Returns ------- w : World Return one from ``BDWorld``, ``EGFRDWorld``, ``MesoscopicWorld``, ``ODEWorld``, ``GillespieWorld`` and ``SpatiocyteWorld``.
[ "Load", "a", "world", "from", "the", "given", "HDF5", "filename", ".", "The", "return", "type", "is", "determined", "by", "ecell4_base", ".", "core", ".", "load_version_information", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/simulation.py#L10-L44
22,152
ecell/ecell4
ecell4/util/show.py
show
def show(target, *args, **kwargs): """ An utility function to display the given target object in the proper way. Paramters --------- target : NumberObserver, TrajectoryObserver, World, str When a NumberObserver object is given, show it with viz.plot_number_observer. When a TrajectoryObserver object is given, show it with viz.plot_trajectory_observer. When a World or a filename suggesting HDF5 is given, show it with viz.plot_world. """ if isinstance(target, (ecell4_base.core.FixedIntervalNumberObserver, ecell4_base.core.NumberObserver, ecell4_base.core.TimingNumberObserver, )): plot_number_observer(target, *args, **kwargs) elif isinstance(target, (ecell4_base.core.FixedIntervalTrajectoryObserver, ecell4_base.core.FixedIntervalTrackingObserver)): plot_trajectory(target, *args, **kwargs) elif isinstance(target, (ecell4_base.ode.ODEWorld, ecell4_base.gillespie.GillespieWorld, ecell4_base.spatiocyte.SpatiocyteWorld, ecell4_base.meso.MesoscopicWorld, ecell4_base.bd.BDWorld, ecell4_base.egfrd.EGFRDWorld)): plot_world(target, *args, **kwargs) elif isinstance(target, (ecell4_base.core.Model, ecell4_base.core.NetworkModel, ecell4_base.core.NetfreeModel)): dump_model(target) elif isinstance(target, str): try: w = simulation.load_world(target) except RuntimeError as e: raise ValueError("The given target [{}] is not supported.".format(repr(target))) else: show(w, *args, **kwargs) else: raise ValueError("The given target [{}] is not supported.".format(repr(target)))
python
def show(target, *args, **kwargs): if isinstance(target, (ecell4_base.core.FixedIntervalNumberObserver, ecell4_base.core.NumberObserver, ecell4_base.core.TimingNumberObserver, )): plot_number_observer(target, *args, **kwargs) elif isinstance(target, (ecell4_base.core.FixedIntervalTrajectoryObserver, ecell4_base.core.FixedIntervalTrackingObserver)): plot_trajectory(target, *args, **kwargs) elif isinstance(target, (ecell4_base.ode.ODEWorld, ecell4_base.gillespie.GillespieWorld, ecell4_base.spatiocyte.SpatiocyteWorld, ecell4_base.meso.MesoscopicWorld, ecell4_base.bd.BDWorld, ecell4_base.egfrd.EGFRDWorld)): plot_world(target, *args, **kwargs) elif isinstance(target, (ecell4_base.core.Model, ecell4_base.core.NetworkModel, ecell4_base.core.NetfreeModel)): dump_model(target) elif isinstance(target, str): try: w = simulation.load_world(target) except RuntimeError as e: raise ValueError("The given target [{}] is not supported.".format(repr(target))) else: show(w, *args, **kwargs) else: raise ValueError("The given target [{}] is not supported.".format(repr(target)))
[ "def", "show", "(", "target", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "target", ",", "(", "ecell4_base", ".", "core", ".", "FixedIntervalNumberObserver", ",", "ecell4_base", ".", "core", ".", "NumberObserver", ",", "...
An utility function to display the given target object in the proper way. Paramters --------- target : NumberObserver, TrajectoryObserver, World, str When a NumberObserver object is given, show it with viz.plot_number_observer. When a TrajectoryObserver object is given, show it with viz.plot_trajectory_observer. When a World or a filename suggesting HDF5 is given, show it with viz.plot_world.
[ "An", "utility", "function", "to", "display", "the", "given", "target", "object", "in", "the", "proper", "way", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/show.py#L15-L43
22,153
ecell/ecell4
ecell4/extra/azure_batch.py
print_batch_exception
def print_batch_exception(batch_exception): """Prints the contents of the specified Batch exception. :param batch_exception: """ _log.error('-------------------------------------------') _log.error('Exception encountered:') if batch_exception.error and \ batch_exception.error.message and \ batch_exception.error.message.value: _log.error(batch_exception.error.message.value) if batch_exception.error.values: _log.error('') for mesg in batch_exception.error.values: _log.error('{}:\t{}'.format(mesg.key, mesg.value)) _log.error('-------------------------------------------')
python
def print_batch_exception(batch_exception): _log.error('-------------------------------------------') _log.error('Exception encountered:') if batch_exception.error and \ batch_exception.error.message and \ batch_exception.error.message.value: _log.error(batch_exception.error.message.value) if batch_exception.error.values: _log.error('') for mesg in batch_exception.error.values: _log.error('{}:\t{}'.format(mesg.key, mesg.value)) _log.error('-------------------------------------------')
[ "def", "print_batch_exception", "(", "batch_exception", ")", ":", "_log", ".", "error", "(", "'-------------------------------------------'", ")", "_log", ".", "error", "(", "'Exception encountered:'", ")", "if", "batch_exception", ".", "error", "and", "batch_exception"...
Prints the contents of the specified Batch exception. :param batch_exception:
[ "Prints", "the", "contents", "of", "the", "specified", "Batch", "exception", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/azure_batch.py#L45-L60
22,154
ecell/ecell4
ecell4/extra/azure_batch.py
upload_file_to_container
def upload_file_to_container(block_blob_client, container_name, file_path): """Uploads a local file to an Azure Blob storage container. :param block_blob_client: A blob service client. :type block_blob_client: `azure.storage.blob.BlockBlobService` :param str container_name: The name of the Azure Blob storage container. :param str file_path: The local path to the file. :rtype: `azure.batch.models.ResourceFile` :return: A ResourceFile initialized with a SAS URL appropriate for Batch tasks. """ blob_name = os.path.basename(file_path) _log.info('Uploading file {} to container [{}]...'.format(file_path, container_name)) block_blob_client.create_blob_from_path(container_name, blob_name, file_path) sas_token = block_blob_client.generate_blob_shared_access_signature( container_name, blob_name, permission=azureblob.BlobPermissions.READ, expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=2)) sas_url = block_blob_client.make_blob_url(container_name, blob_name, sas_token=sas_token) return batchmodels.ResourceFile(http_url=sas_url, file_path=blob_name)
python
def upload_file_to_container(block_blob_client, container_name, file_path): blob_name = os.path.basename(file_path) _log.info('Uploading file {} to container [{}]...'.format(file_path, container_name)) block_blob_client.create_blob_from_path(container_name, blob_name, file_path) sas_token = block_blob_client.generate_blob_shared_access_signature( container_name, blob_name, permission=azureblob.BlobPermissions.READ, expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=2)) sas_url = block_blob_client.make_blob_url(container_name, blob_name, sas_token=sas_token) return batchmodels.ResourceFile(http_url=sas_url, file_path=blob_name)
[ "def", "upload_file_to_container", "(", "block_blob_client", ",", "container_name", ",", "file_path", ")", ":", "blob_name", "=", "os", ".", "path", ".", "basename", "(", "file_path", ")", "_log", ".", "info", "(", "'Uploading file {} to container [{}]...'", ".", ...
Uploads a local file to an Azure Blob storage container. :param block_blob_client: A blob service client. :type block_blob_client: `azure.storage.blob.BlockBlobService` :param str container_name: The name of the Azure Blob storage container. :param str file_path: The local path to the file. :rtype: `azure.batch.models.ResourceFile` :return: A ResourceFile initialized with a SAS URL appropriate for Batch tasks.
[ "Uploads", "a", "local", "file", "to", "an", "Azure", "Blob", "storage", "container", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/azure_batch.py#L62-L91
22,155
ecell/ecell4
ecell4/extra/azure_batch.py
get_container_sas_token
def get_container_sas_token(block_blob_client, container_name, blob_permissions): """Obtains a shared access signature granting the specified permissions to the container. :param block_blob_client: A blob service client. :type block_blob_client: `azure.storage.blob.BlockBlobService` :param str container_name: The name of the Azure Blob storage container. :param BlobPermissions blob_permissions: :rtype: str :return: A SAS token granting the specified permissions to the container. """ # Obtain the SAS token for the container, setting the expiry time and # permissions. In this case, no start time is specified, so the shared # access signature becomes valid immediately. container_sas_token = \ block_blob_client.generate_container_shared_access_signature( container_name, permission=blob_permissions, expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=2)) return container_sas_token
python
def get_container_sas_token(block_blob_client, container_name, blob_permissions): # Obtain the SAS token for the container, setting the expiry time and # permissions. In this case, no start time is specified, so the shared # access signature becomes valid immediately. container_sas_token = \ block_blob_client.generate_container_shared_access_signature( container_name, permission=blob_permissions, expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=2)) return container_sas_token
[ "def", "get_container_sas_token", "(", "block_blob_client", ",", "container_name", ",", "blob_permissions", ")", ":", "# Obtain the SAS token for the container, setting the expiry time and", "# permissions. In this case, no start time is specified, so the shared", "# access signature becomes...
Obtains a shared access signature granting the specified permissions to the container. :param block_blob_client: A blob service client. :type block_blob_client: `azure.storage.blob.BlockBlobService` :param str container_name: The name of the Azure Blob storage container. :param BlobPermissions blob_permissions: :rtype: str :return: A SAS token granting the specified permissions to the container.
[ "Obtains", "a", "shared", "access", "signature", "granting", "the", "specified", "permissions", "to", "the", "container", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/azure_batch.py#L93-L114
22,156
ecell/ecell4
ecell4/extra/azure_batch.py
create_pool
def create_pool(batch_service_client, pool_id, resource_files, publisher, offer, sku, task_file, vm_size, node_count): """Creates a pool of compute nodes with the specified OS settings. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str pool_id: An ID for the new pool. :param list resource_files: A collection of resource files for the pool's start task. :param str publisher: Marketplace image publisher :param str offer: Marketplace image offer :param str sku: Marketplace image sku :param str task_file: A file name of the script :param str vm_size: A type of vm :param str node_count: The number of nodes """ _log.info('Creating pool [{}]...'.format(pool_id)) # Create a new pool of Linux compute nodes using an Azure Virtual Machines # Marketplace image. For more information about creating pools of Linux # nodes, see: # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/ # Specify the commands for the pool's start task. The start task is run # on each node as it joins the pool, and when it's rebooted or re-imaged. # We use the start task to prep the node for running our task script. task_commands = [ # Copy the python_tutorial_task.py script to the "shared" directory # that all tasks that run on the node have access to. Note that # we are using the -p flag with cp to preserve the file uid/gid, # otherwise since this start task is run as an admin, it would not # be accessible by tasks run as a non-admin user. 'cp -p {} $AZ_BATCH_NODE_SHARED_DIR'.format(os.path.basename(task_file)), # Install pip 'curl -fSsL https://bootstrap.pypa.io/get-pip.py | python', # Install the azure-storage module so that the task script can access # Azure Blob storage, pre-cryptography version 'pip install azure-storage==0.32.0', # Install E-Cell 4 'pip install https://1028-6348303-gh.circle-artifacts.com/0/root/circle/wheelhouse/ecell-4.1.2-cp27-cp27mu-manylinux1_x86_64.whl'] # Get the node agent SKU and image reference for the virtual machine # configuration. # For more information about the virtual machine configuration, see: # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/ sku_to_use, image_ref_to_use = \ select_latest_verified_vm_image_with_node_agent_sku( batch_service_client, publisher, offer, sku) user = batchmodels.AutoUserSpecification( scope=batchmodels.AutoUserScope.pool, elevation_level=batchmodels.ElevationLevel.admin) new_pool = batch.models.PoolAddParameter( id=pool_id, virtual_machine_configuration=batchmodels.VirtualMachineConfiguration( image_reference=image_ref_to_use, node_agent_sku_id=sku_to_use), vm_size=vm_size, target_dedicated_nodes=0, target_low_priority_nodes=node_count, start_task=batch.models.StartTask( command_line=wrap_commands_in_shell('linux', task_commands), user_identity=batchmodels.UserIdentity(auto_user=user), wait_for_success=True, resource_files=resource_files), ) try: batch_service_client.pool.add(new_pool) except batchmodels.BatchErrorException as err: print_batch_exception(err) raise
python
def create_pool(batch_service_client, pool_id, resource_files, publisher, offer, sku, task_file, vm_size, node_count): _log.info('Creating pool [{}]...'.format(pool_id)) # Create a new pool of Linux compute nodes using an Azure Virtual Machines # Marketplace image. For more information about creating pools of Linux # nodes, see: # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/ # Specify the commands for the pool's start task. The start task is run # on each node as it joins the pool, and when it's rebooted or re-imaged. # We use the start task to prep the node for running our task script. task_commands = [ # Copy the python_tutorial_task.py script to the "shared" directory # that all tasks that run on the node have access to. Note that # we are using the -p flag with cp to preserve the file uid/gid, # otherwise since this start task is run as an admin, it would not # be accessible by tasks run as a non-admin user. 'cp -p {} $AZ_BATCH_NODE_SHARED_DIR'.format(os.path.basename(task_file)), # Install pip 'curl -fSsL https://bootstrap.pypa.io/get-pip.py | python', # Install the azure-storage module so that the task script can access # Azure Blob storage, pre-cryptography version 'pip install azure-storage==0.32.0', # Install E-Cell 4 'pip install https://1028-6348303-gh.circle-artifacts.com/0/root/circle/wheelhouse/ecell-4.1.2-cp27-cp27mu-manylinux1_x86_64.whl'] # Get the node agent SKU and image reference for the virtual machine # configuration. # For more information about the virtual machine configuration, see: # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/ sku_to_use, image_ref_to_use = \ select_latest_verified_vm_image_with_node_agent_sku( batch_service_client, publisher, offer, sku) user = batchmodels.AutoUserSpecification( scope=batchmodels.AutoUserScope.pool, elevation_level=batchmodels.ElevationLevel.admin) new_pool = batch.models.PoolAddParameter( id=pool_id, virtual_machine_configuration=batchmodels.VirtualMachineConfiguration( image_reference=image_ref_to_use, node_agent_sku_id=sku_to_use), vm_size=vm_size, target_dedicated_nodes=0, target_low_priority_nodes=node_count, start_task=batch.models.StartTask( command_line=wrap_commands_in_shell('linux', task_commands), user_identity=batchmodels.UserIdentity(auto_user=user), wait_for_success=True, resource_files=resource_files), ) try: batch_service_client.pool.add(new_pool) except batchmodels.BatchErrorException as err: print_batch_exception(err) raise
[ "def", "create_pool", "(", "batch_service_client", ",", "pool_id", ",", "resource_files", ",", "publisher", ",", "offer", ",", "sku", ",", "task_file", ",", "vm_size", ",", "node_count", ")", ":", "_log", ".", "info", "(", "'Creating pool [{}]...'", ".", "form...
Creates a pool of compute nodes with the specified OS settings. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str pool_id: An ID for the new pool. :param list resource_files: A collection of resource files for the pool's start task. :param str publisher: Marketplace image publisher :param str offer: Marketplace image offer :param str sku: Marketplace image sku :param str task_file: A file name of the script :param str vm_size: A type of vm :param str node_count: The number of nodes
[ "Creates", "a", "pool", "of", "compute", "nodes", "with", "the", "specified", "OS", "settings", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/azure_batch.py#L161-L232
22,157
ecell/ecell4
ecell4/extra/azure_batch.py
create_job
def create_job(batch_service_client, job_id, pool_id): """Creates a job with the specified ID, associated with the specified pool. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The ID for the job. :param str pool_id: The ID for the pool. """ print('Creating job [{}]...'.format(job_id)) job = batch.models.JobAddParameter( id=job_id, pool_info=batch.models.PoolInformation(pool_id=pool_id)) try: batch_service_client.job.add(job) except batchmodels.batch_error.BatchErrorException as err: print_batch_exception(err) raise
python
def create_job(batch_service_client, job_id, pool_id): print('Creating job [{}]...'.format(job_id)) job = batch.models.JobAddParameter( id=job_id, pool_info=batch.models.PoolInformation(pool_id=pool_id)) try: batch_service_client.job.add(job) except batchmodels.batch_error.BatchErrorException as err: print_batch_exception(err) raise
[ "def", "create_job", "(", "batch_service_client", ",", "job_id", ",", "pool_id", ")", ":", "print", "(", "'Creating job [{}]...'", ".", "format", "(", "job_id", ")", ")", "job", "=", "batch", ".", "models", ".", "JobAddParameter", "(", "id", "=", "job_id", ...
Creates a job with the specified ID, associated with the specified pool. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The ID for the job. :param str pool_id: The ID for the pool.
[ "Creates", "a", "job", "with", "the", "specified", "ID", "associated", "with", "the", "specified", "pool", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/azure_batch.py#L234-L252
22,158
ecell/ecell4
ecell4/extra/azure_batch.py
add_tasks
def add_tasks(batch_service_client, job_id, loads, output_container_name, output_container_sas_token, task_file, acount_name): """Adds a task for each input file in the collection to the specified job. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The ID of the job to which to add the tasks. :param list input_files: A collection of input files. One task will be created for each input file. :param output_container_name: The ID of an Azure Blob storage container to which the tasks will upload their results. :param output_container_sas_token: A SAS token granting write access to the specified Azure Blob storage container. :param str task_file: A file name of the script :param str account_name: A storage account """ _log.info('Adding {} tasks to job [{}]...'.format(len(loads), job_id)) # _log.info('Adding {} tasks to job [{}]...'.format(len(input_files), job_id)) tasks = list() for (input_file, output_file, i, j) in loads: command = ['python $AZ_BATCH_NODE_SHARED_DIR/{} ' '--filepath {} --output {} --storageaccount {} ' '--task_id {} --job_id {} ' '--storagecontainer {} --sastoken "{}"'.format( os.path.basename(task_file), input_file.file_path, output_file, acount_name, i, j, output_container_name, output_container_sas_token)] _log.debug('CMD : "{}"'.format(command[0])) tasks.append(batch.models.TaskAddParameter( id='topNtask{}-{}'.format(i, j), command_line=command, resource_files=[input_file] ) ) batch_service_client.task.add_collection(job_id, tasks) task_ids = [task.id for task in tasks] _log.info('{} tasks were added.'.format(len(task_ids))) return task_ids
python
def add_tasks(batch_service_client, job_id, loads, output_container_name, output_container_sas_token, task_file, acount_name): _log.info('Adding {} tasks to job [{}]...'.format(len(loads), job_id)) # _log.info('Adding {} tasks to job [{}]...'.format(len(input_files), job_id)) tasks = list() for (input_file, output_file, i, j) in loads: command = ['python $AZ_BATCH_NODE_SHARED_DIR/{} ' '--filepath {} --output {} --storageaccount {} ' '--task_id {} --job_id {} ' '--storagecontainer {} --sastoken "{}"'.format( os.path.basename(task_file), input_file.file_path, output_file, acount_name, i, j, output_container_name, output_container_sas_token)] _log.debug('CMD : "{}"'.format(command[0])) tasks.append(batch.models.TaskAddParameter( id='topNtask{}-{}'.format(i, j), command_line=command, resource_files=[input_file] ) ) batch_service_client.task.add_collection(job_id, tasks) task_ids = [task.id for task in tasks] _log.info('{} tasks were added.'.format(len(task_ids))) return task_ids
[ "def", "add_tasks", "(", "batch_service_client", ",", "job_id", ",", "loads", ",", "output_container_name", ",", "output_container_sas_token", ",", "task_file", ",", "acount_name", ")", ":", "_log", ".", "info", "(", "'Adding {} tasks to job [{}]...'", ".", "format", ...
Adds a task for each input file in the collection to the specified job. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The ID of the job to which to add the tasks. :param list input_files: A collection of input files. One task will be created for each input file. :param output_container_name: The ID of an Azure Blob storage container to which the tasks will upload their results. :param output_container_sas_token: A SAS token granting write access to the specified Azure Blob storage container. :param str task_file: A file name of the script :param str account_name: A storage account
[ "Adds", "a", "task", "for", "each", "input", "file", "in", "the", "collection", "to", "the", "specified", "job", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/azure_batch.py#L254-L302
22,159
ecell/ecell4
ecell4/extra/azure_batch.py
wait_for_tasks_to_complete
def wait_for_tasks_to_complete(batch_service_client, job_ids, timeout): """Returns when all tasks in the specified job reach the Completed state. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The id of the job whose tasks should be to monitored. :param timedelta timeout: The duration to wait for task completion. If all tasks in the specified job do not reach Completed state within this time period, an exception will be raised. """ timeout_expiration = datetime.datetime.now() + timeout print("Monitoring all tasks for 'Completed' state, timeout in {}...".format(timeout), end='') while datetime.datetime.now() < timeout_expiration: print('.', end='') sys.stdout.flush() # tasks = batch_service_client.task.list(job_id) # incomplete_tasks = [task for task in tasks if # task.state != batchmodels.TaskState.completed] for (job_id, _) in job_ids: tasks = batch_service_client.task.list(job_id) incomplete_tasks = [task for task in tasks if task.state != batchmodels.TaskState.completed] if incomplete_tasks: break if not incomplete_tasks: print() return True else: time.sleep(1) raise RuntimeError("ERROR: Tasks did not reach 'Completed' state within " "timeout period of " + str(timeout))
python
def wait_for_tasks_to_complete(batch_service_client, job_ids, timeout): timeout_expiration = datetime.datetime.now() + timeout print("Monitoring all tasks for 'Completed' state, timeout in {}...".format(timeout), end='') while datetime.datetime.now() < timeout_expiration: print('.', end='') sys.stdout.flush() # tasks = batch_service_client.task.list(job_id) # incomplete_tasks = [task for task in tasks if # task.state != batchmodels.TaskState.completed] for (job_id, _) in job_ids: tasks = batch_service_client.task.list(job_id) incomplete_tasks = [task for task in tasks if task.state != batchmodels.TaskState.completed] if incomplete_tasks: break if not incomplete_tasks: print() return True else: time.sleep(1) raise RuntimeError("ERROR: Tasks did not reach 'Completed' state within " "timeout period of " + str(timeout))
[ "def", "wait_for_tasks_to_complete", "(", "batch_service_client", ",", "job_ids", ",", "timeout", ")", ":", "timeout_expiration", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "+", "timeout", "print", "(", "\"Monitoring all tasks for 'Completed' state, timeout...
Returns when all tasks in the specified job reach the Completed state. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The id of the job whose tasks should be to monitored. :param timedelta timeout: The duration to wait for task completion. If all tasks in the specified job do not reach Completed state within this time period, an exception will be raised.
[ "Returns", "when", "all", "tasks", "in", "the", "specified", "job", "reach", "the", "Completed", "state", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/azure_batch.py#L304-L337
22,160
ecell/ecell4
ecell4/extra/azure_batch.py
download_blobs_from_container
def download_blobs_from_container(block_blob_client, container_name, directory_path, prefix=None): """Downloads all blobs from the specified Azure Blob storage container. :param block_blob_client: A blob service client. :type block_blob_client: `azure.storage.blob.BlockBlobService` :param container_name: The Azure Blob storage container from which to download files. :param directory_path: The local directory to which to download the files. :param str prefix: A name prefix to filter blobs. None as its default """ _log.info('Downloading all files from container [{}]...'.format(container_name)) container_blobs = block_blob_client.list_blobs(container_name, prefix=None) _log.info('{} blobs are found [{}]'.format(len(tuple(container_blobs)), ', '.join(blob.name for blob in container_blobs.items))) for blob in container_blobs.items: destination_file_path = os.path.join(directory_path, blob.name) block_blob_client.get_blob_to_path(container_name, blob.name, destination_file_path) _log.info(' Downloaded blob [{}] from container [{}] to {}'.format( blob.name, container_name, destination_file_path)) _log.info(' Download complete!')
python
def download_blobs_from_container(block_blob_client, container_name, directory_path, prefix=None): _log.info('Downloading all files from container [{}]...'.format(container_name)) container_blobs = block_blob_client.list_blobs(container_name, prefix=None) _log.info('{} blobs are found [{}]'.format(len(tuple(container_blobs)), ', '.join(blob.name for blob in container_blobs.items))) for blob in container_blobs.items: destination_file_path = os.path.join(directory_path, blob.name) block_blob_client.get_blob_to_path(container_name, blob.name, destination_file_path) _log.info(' Downloaded blob [{}] from container [{}] to {}'.format( blob.name, container_name, destination_file_path)) _log.info(' Download complete!')
[ "def", "download_blobs_from_container", "(", "block_blob_client", ",", "container_name", ",", "directory_path", ",", "prefix", "=", "None", ")", ":", "_log", ".", "info", "(", "'Downloading all files from container [{}]...'", ".", "format", "(", "container_name", ")", ...
Downloads all blobs from the specified Azure Blob storage container. :param block_blob_client: A blob service client. :type block_blob_client: `azure.storage.blob.BlockBlobService` :param container_name: The Azure Blob storage container from which to download files. :param directory_path: The local directory to which to download the files. :param str prefix: A name prefix to filter blobs. None as its default
[ "Downloads", "all", "blobs", "from", "the", "specified", "Azure", "Blob", "storage", "container", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/azure_batch.py#L339-L368
22,161
ecell/ecell4
ecell4/extra/azure_batch.py
singlerun
def singlerun(job, task_id=0, job_id=0): """This task is for an example.""" import ecell4_base import ecell4 import ecell4.util.simulation import ecell4.util.decorator print('ecell4_base.__version__ = {:s}'.format(ecell4_base.__version__)) print('ecell4.__version__ = {:s}'.format(ecell4.__version__)) print('job={}, task_id={}, job_id={}'.format(str(job), task_id, job_id)) with ecell4.util.decorator.reaction_rules(): A + B == C | (0.01, 0.3) res = ecell4.util.simulation.run_simulation( 1.0, y0={'A': job[0], 'B': job[1], 'C': job[2]}, rndseed=job_id, solver='gillespie', return_type='array') print('A simulation was successfully done.') return res
python
def singlerun(job, task_id=0, job_id=0): import ecell4_base import ecell4 import ecell4.util.simulation import ecell4.util.decorator print('ecell4_base.__version__ = {:s}'.format(ecell4_base.__version__)) print('ecell4.__version__ = {:s}'.format(ecell4.__version__)) print('job={}, task_id={}, job_id={}'.format(str(job), task_id, job_id)) with ecell4.util.decorator.reaction_rules(): A + B == C | (0.01, 0.3) res = ecell4.util.simulation.run_simulation( 1.0, y0={'A': job[0], 'B': job[1], 'C': job[2]}, rndseed=job_id, solver='gillespie', return_type='array') print('A simulation was successfully done.') return res
[ "def", "singlerun", "(", "job", ",", "task_id", "=", "0", ",", "job_id", "=", "0", ")", ":", "import", "ecell4_base", "import", "ecell4", "import", "ecell4", ".", "util", ".", "simulation", "import", "ecell4", ".", "util", ".", "decorator", "print", "(",...
This task is for an example.
[ "This", "task", "is", "for", "an", "example", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/azure_batch.py#L757-L779
22,162
ecell/ecell4
ecell4/util/viz.py
plot_number_observer
def plot_number_observer(*args, **kwargs): """ Generate a plot from NumberObservers and show it. See plot_number_observer_with_matplotlib and _with_nya for details. Parameters ---------- obs : NumberObserver (e.g. FixedIntervalNumberObserver) interactive : bool, default False Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with nyaplot. Examples -------- >>> plot_number_observer(obs1) >>> plot_number_observer(obs1, interactive=True) """ interactive = kwargs.pop('interactive', False) if interactive: plot_number_observer_with_nya(*args, **kwargs) # elif __on_ipython_notebook(): # kwargs['to_png'] = True # plot_number_observer_with_nya(*args, **kwargs) else: if kwargs.pop('to_png', None) is not None: #XXX: Remove an option available only on nyaplot for the consistency import warnings warnings.warn( "An option 'to_png' is not available with matplotlib. Just ignored.") plot_number_observer_with_matplotlib(*args, **kwargs)
python
def plot_number_observer(*args, **kwargs): interactive = kwargs.pop('interactive', False) if interactive: plot_number_observer_with_nya(*args, **kwargs) # elif __on_ipython_notebook(): # kwargs['to_png'] = True # plot_number_observer_with_nya(*args, **kwargs) else: if kwargs.pop('to_png', None) is not None: #XXX: Remove an option available only on nyaplot for the consistency import warnings warnings.warn( "An option 'to_png' is not available with matplotlib. Just ignored.") plot_number_observer_with_matplotlib(*args, **kwargs)
[ "def", "plot_number_observer", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "interactive", "=", "kwargs", ".", "pop", "(", "'interactive'", ",", "False", ")", "if", "interactive", ":", "plot_number_observer_with_nya", "(", "*", "args", ",", "*", "*...
Generate a plot from NumberObservers and show it. See plot_number_observer_with_matplotlib and _with_nya for details. Parameters ---------- obs : NumberObserver (e.g. FixedIntervalNumberObserver) interactive : bool, default False Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with nyaplot. Examples -------- >>> plot_number_observer(obs1) >>> plot_number_observer(obs1, interactive=True)
[ "Generate", "a", "plot", "from", "NumberObservers", "and", "show", "it", ".", "See", "plot_number_observer_with_matplotlib", "and", "_with_nya", "for", "details", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L28-L58
22,163
ecell/ecell4
ecell4/util/viz.py
plot_world
def plot_world(*args, **kwargs): """ Generate a plot from received instance of World and show it. See also plot_world_with_elegans and plot_world_with_matplotlib. Parameters ---------- world : World or str World or a HDF5 filename to render. interactive : bool, default True Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with elegans. Examples -------- >>> plot_world(w) >>> plot_world(w, interactive=False) """ interactive = kwargs.pop('interactive', True) if interactive: plot_world_with_elegans(*args, **kwargs) else: plot_world_with_matplotlib(*args, **kwargs)
python
def plot_world(*args, **kwargs): interactive = kwargs.pop('interactive', True) if interactive: plot_world_with_elegans(*args, **kwargs) else: plot_world_with_matplotlib(*args, **kwargs)
[ "def", "plot_world", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "interactive", "=", "kwargs", ".", "pop", "(", "'interactive'", ",", "True", ")", "if", "interactive", ":", "plot_world_with_elegans", "(", "*", "args", ",", "*", "*", "kwargs", ...
Generate a plot from received instance of World and show it. See also plot_world_with_elegans and plot_world_with_matplotlib. Parameters ---------- world : World or str World or a HDF5 filename to render. interactive : bool, default True Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with elegans. Examples -------- >>> plot_world(w) >>> plot_world(w, interactive=False)
[ "Generate", "a", "plot", "from", "received", "instance", "of", "World", "and", "show", "it", ".", "See", "also", "plot_world_with_elegans", "and", "plot_world_with_matplotlib", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L60-L83
22,164
ecell/ecell4
ecell4/util/viz.py
plot_movie
def plot_movie(*args, **kwargs): """ Generate a movie from received instances of World and show them. See also plot_movie_with_elegans and plot_movie_with_matplotlib. Parameters ---------- worlds : list of World Worlds to render. interactive : bool, default True Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with elegans. """ interactive = kwargs.pop('interactive', False) if interactive: plot_movie_with_elegans(*args, **kwargs) else: plot_movie_with_matplotlib(*args, **kwargs)
python
def plot_movie(*args, **kwargs): interactive = kwargs.pop('interactive', False) if interactive: plot_movie_with_elegans(*args, **kwargs) else: plot_movie_with_matplotlib(*args, **kwargs)
[ "def", "plot_movie", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "interactive", "=", "kwargs", ".", "pop", "(", "'interactive'", ",", "False", ")", "if", "interactive", ":", "plot_movie_with_elegans", "(", "*", "args", ",", "*", "*", "kwargs", ...
Generate a movie from received instances of World and show them. See also plot_movie_with_elegans and plot_movie_with_matplotlib. Parameters ---------- worlds : list of World Worlds to render. interactive : bool, default True Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with elegans.
[ "Generate", "a", "movie", "from", "received", "instances", "of", "World", "and", "show", "them", ".", "See", "also", "plot_movie_with_elegans", "and", "plot_movie_with_matplotlib", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L85-L103
22,165
ecell/ecell4
ecell4/util/viz.py
plot_trajectory
def plot_trajectory(*args, **kwargs): """ Generate a plot from received instance of TrajectoryObserver and show it See also plot_trajectory_with_elegans and plot_trajectory_with_matplotlib. Parameters ---------- obs : TrajectoryObserver TrajectoryObserver to render. interactive : bool, default True Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with elegans. Examples -------- >>> plot_trajectory(obs) >>> plot_trajectory(obs, interactive=False) """ interactive = kwargs.pop('interactive', True) if interactive: plot_trajectory_with_elegans(*args, **kwargs) else: plot_trajectory_with_matplotlib(*args, **kwargs)
python
def plot_trajectory(*args, **kwargs): interactive = kwargs.pop('interactive', True) if interactive: plot_trajectory_with_elegans(*args, **kwargs) else: plot_trajectory_with_matplotlib(*args, **kwargs)
[ "def", "plot_trajectory", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "interactive", "=", "kwargs", ".", "pop", "(", "'interactive'", ",", "True", ")", "if", "interactive", ":", "plot_trajectory_with_elegans", "(", "*", "args", ",", "*", "*", "k...
Generate a plot from received instance of TrajectoryObserver and show it See also plot_trajectory_with_elegans and plot_trajectory_with_matplotlib. Parameters ---------- obs : TrajectoryObserver TrajectoryObserver to render. interactive : bool, default True Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with elegans. Examples -------- >>> plot_trajectory(obs) >>> plot_trajectory(obs, interactive=False)
[ "Generate", "a", "plot", "from", "received", "instance", "of", "TrajectoryObserver", "and", "show", "it", "See", "also", "plot_trajectory_with_elegans", "and", "plot_trajectory_with_matplotlib", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L105-L128
22,166
ecell/ecell4
ecell4/util/viz.py
plot_movie_with_elegans
def plot_movie_with_elegans( worlds, radius=None, width=500, height=500, config=None, grid=False, species_list=None): """ Generate a movie from received instances of World and show them on IPython notebook. Parameters ---------- worlds : list of World Worlds to render. radius : float, default None If this value is set, all particles in the world will be rendered as if their radius are the same. width : float, default 500 Width of the plotting area. height : float, default 500 Height of the plotting area. config : dict, default {} Dict for configure default colors. Its values are colors unique to each speices. The dictionary will be updated during this plot. Colors included in config dict will never be used for other speices. species_list : array of string, default None If set, plot_movie will not search the list of species """ config = config or {} from IPython.core.display import display, HTML from jinja2 import Template data = {} sizes = {} for i, world in enumerate(worlds): species = __parse_world(world, radius, species_list) for species_info in species: if data.get(species_info['name']) is None: data[species_info['name']] = [] data[species_info['name']].append({ 'df': species_info['data'], 't': i }) sizes[species_info['name']] = species_info['size'] options = { 'player': True, 'autorange': False, 'space_mode': 'wireframe', 'grid': grid, 'range': __get_range_of_world(worlds[0]) } model_id = '"movie' + str(uuid.uuid4()) + '"' color_scale = default_color_scale(config=config) display(HTML(generate_html({ 'model_id': model_id, 'names': json.dumps(list(data.keys())), 'data': json.dumps(list(data.values())), 'colors': json.dumps([color_scale.get_color(name) for name in data.keys()]), 'sizes': json.dumps([sizes[name] for name in data.keys()]), 'options': json.dumps(options) }, 'templates/movie.tmpl')))
python
def plot_movie_with_elegans( worlds, radius=None, width=500, height=500, config=None, grid=False, species_list=None): config = config or {} from IPython.core.display import display, HTML from jinja2 import Template data = {} sizes = {} for i, world in enumerate(worlds): species = __parse_world(world, radius, species_list) for species_info in species: if data.get(species_info['name']) is None: data[species_info['name']] = [] data[species_info['name']].append({ 'df': species_info['data'], 't': i }) sizes[species_info['name']] = species_info['size'] options = { 'player': True, 'autorange': False, 'space_mode': 'wireframe', 'grid': grid, 'range': __get_range_of_world(worlds[0]) } model_id = '"movie' + str(uuid.uuid4()) + '"' color_scale = default_color_scale(config=config) display(HTML(generate_html({ 'model_id': model_id, 'names': json.dumps(list(data.keys())), 'data': json.dumps(list(data.values())), 'colors': json.dumps([color_scale.get_color(name) for name in data.keys()]), 'sizes': json.dumps([sizes[name] for name in data.keys()]), 'options': json.dumps(options) }, 'templates/movie.tmpl')))
[ "def", "plot_movie_with_elegans", "(", "worlds", ",", "radius", "=", "None", ",", "width", "=", "500", ",", "height", "=", "500", ",", "config", "=", "None", ",", "grid", "=", "False", ",", "species_list", "=", "None", ")", ":", "config", "=", "config"...
Generate a movie from received instances of World and show them on IPython notebook. Parameters ---------- worlds : list of World Worlds to render. radius : float, default None If this value is set, all particles in the world will be rendered as if their radius are the same. width : float, default 500 Width of the plotting area. height : float, default 500 Height of the plotting area. config : dict, default {} Dict for configure default colors. Its values are colors unique to each speices. The dictionary will be updated during this plot. Colors included in config dict will never be used for other speices. species_list : array of string, default None If set, plot_movie will not search the list of species
[ "Generate", "a", "movie", "from", "received", "instances", "of", "World", "and", "show", "them", "on", "IPython", "notebook", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L476-L539
22,167
ecell/ecell4
ecell4/util/viz.py
plot_world_with_elegans
def plot_world_with_elegans( world, radius=None, width=350, height=350, config=None, grid=True, wireframe=False, species_list=None, debug=None, max_count=1000, camera_position=(-22, 23, 32), camera_rotation=(-0.6, 0.5, 0.6), return_id=False, predicator=None): """ Generate a plot from received instance of World and show it on IPython notebook. This method returns the instance of dict that indicates color setting for each speices. You can use the dict as the parameter of plot_world, in order to use the same colors in another plot. Parameters ---------- world : World or str World or a HDF5 filename to render. radius : float, default None If this value is set, all particles in the world will be rendered as if their radius are the same. width : float, default 350 Width of the plotting area. height : float, default 350 Height of the plotting area. config : dict, default {} Dict for configure default colors. Its values are colors unique to each speices. The dictionary will be updated during this plot. Colors included in config dict will never be used for other speices. species_list : array of string, default None If set, plot_world will not search the list of species. max_count : Integer, default 1000 The maximum number of particles to show for each species. debug : array of dict, default [] *** EXPERIMENTAL IMPRIMENTATION *** Example: >> [{'type': 'box', 'x': 10, 'y': 10, 'z': 10, 'options': {'width': 1, 'height': 1}}] type: 'box', 'plane', 'sphere', and 'cylinder' x, y, z: float options: box: width, height, depth plane: width, height sphere: radius cylinder: radius, height camera_position : tuple, default (-22, 23, 32) camera_rotaiton : tuple, default (-0.6, 0.5, 0.6) Initial position and rotation of camera. return_id : bool, default False If True, return a model id, which is required for `to_png` function. """ config = config or {} from IPython.core.display import display, HTML from .simulation import load_world if isinstance(world, str): world = load_world(world) species = __parse_world(world, radius, species_list, max_count, predicator) color_scale = default_color_scale(config=config) plots = [] for species_info in species: plots.append({ 'type': 'Particles', 'data': species_info['data'], 'options': { 'name': species_info['name'], 'color': color_scale.get_color(species_info['name']), 'size': species_info['size'] } }) if debug is not None: data = {'type': [], 'x': [], 'y': [], 'z': [], 'options': []} for obj in debug: for k, v in obj.items(): data[k].append(v) plots.append({ 'type': 'DebugObject', 'data': data, 'options': {} }) model = { 'plots': plots, 'options': { 'world_width': width, 'world_height': height, 'range': __get_range_of_world(world), 'autorange': False, 'grid': grid, 'save_image': True # 'save_image': False } } if wireframe: model['options']['space_mode'] = 'wireframe' model_id = '"viz' + str(uuid.uuid4()) + '"' display(HTML(generate_html( {'model': json.dumps(model), 'model_id': model_id, 'px': camera_position[0], 'py': camera_position[1], 'pz': camera_position[2], 'rx': camera_rotation[0], 'ry': camera_rotation[1], 'rz': camera_rotation[2]}, 'templates/particles.tmpl'))) if return_id: return model_id
python
def plot_world_with_elegans( world, radius=None, width=350, height=350, config=None, grid=True, wireframe=False, species_list=None, debug=None, max_count=1000, camera_position=(-22, 23, 32), camera_rotation=(-0.6, 0.5, 0.6), return_id=False, predicator=None): config = config or {} from IPython.core.display import display, HTML from .simulation import load_world if isinstance(world, str): world = load_world(world) species = __parse_world(world, radius, species_list, max_count, predicator) color_scale = default_color_scale(config=config) plots = [] for species_info in species: plots.append({ 'type': 'Particles', 'data': species_info['data'], 'options': { 'name': species_info['name'], 'color': color_scale.get_color(species_info['name']), 'size': species_info['size'] } }) if debug is not None: data = {'type': [], 'x': [], 'y': [], 'z': [], 'options': []} for obj in debug: for k, v in obj.items(): data[k].append(v) plots.append({ 'type': 'DebugObject', 'data': data, 'options': {} }) model = { 'plots': plots, 'options': { 'world_width': width, 'world_height': height, 'range': __get_range_of_world(world), 'autorange': False, 'grid': grid, 'save_image': True # 'save_image': False } } if wireframe: model['options']['space_mode'] = 'wireframe' model_id = '"viz' + str(uuid.uuid4()) + '"' display(HTML(generate_html( {'model': json.dumps(model), 'model_id': model_id, 'px': camera_position[0], 'py': camera_position[1], 'pz': camera_position[2], 'rx': camera_rotation[0], 'ry': camera_rotation[1], 'rz': camera_rotation[2]}, 'templates/particles.tmpl'))) if return_id: return model_id
[ "def", "plot_world_with_elegans", "(", "world", ",", "radius", "=", "None", ",", "width", "=", "350", ",", "height", "=", "350", ",", "config", "=", "None", ",", "grid", "=", "True", ",", "wireframe", "=", "False", ",", "species_list", "=", "None", ","...
Generate a plot from received instance of World and show it on IPython notebook. This method returns the instance of dict that indicates color setting for each speices. You can use the dict as the parameter of plot_world, in order to use the same colors in another plot. Parameters ---------- world : World or str World or a HDF5 filename to render. radius : float, default None If this value is set, all particles in the world will be rendered as if their radius are the same. width : float, default 350 Width of the plotting area. height : float, default 350 Height of the plotting area. config : dict, default {} Dict for configure default colors. Its values are colors unique to each speices. The dictionary will be updated during this plot. Colors included in config dict will never be used for other speices. species_list : array of string, default None If set, plot_world will not search the list of species. max_count : Integer, default 1000 The maximum number of particles to show for each species. debug : array of dict, default [] *** EXPERIMENTAL IMPRIMENTATION *** Example: >> [{'type': 'box', 'x': 10, 'y': 10, 'z': 10, 'options': {'width': 1, 'height': 1}}] type: 'box', 'plane', 'sphere', and 'cylinder' x, y, z: float options: box: width, height, depth plane: width, height sphere: radius cylinder: radius, height camera_position : tuple, default (-22, 23, 32) camera_rotaiton : tuple, default (-0.6, 0.5, 0.6) Initial position and rotation of camera. return_id : bool, default False If True, return a model id, which is required for `to_png` function.
[ "Generate", "a", "plot", "from", "received", "instance", "of", "World", "and", "show", "it", "on", "IPython", "notebook", ".", "This", "method", "returns", "the", "instance", "of", "dict", "that", "indicates", "color", "setting", "for", "each", "speices", "....
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L541-L648
22,168
ecell/ecell4
ecell4/util/viz.py
generate_html
def generate_html(keywords, tmpl_path, package_name='ecell4.util'): """ Generate static html file from JSON model and its own id. Parameters ---------- model : dict JSON model from which ecell4.viz generates a plot. model_id : string Unique id for the plot. Returns ------- html : A HTML object """ from jinja2 import Template import pkgutil template = Template(pkgutil.get_data(package_name, tmpl_path).decode()) # path = os.path.abspath(os.path.dirname(__file__)) + tmpl_path # template = Template(open(path).read()) html = template.render(**keywords) return html
python
def generate_html(keywords, tmpl_path, package_name='ecell4.util'): from jinja2 import Template import pkgutil template = Template(pkgutil.get_data(package_name, tmpl_path).decode()) # path = os.path.abspath(os.path.dirname(__file__)) + tmpl_path # template = Template(open(path).read()) html = template.render(**keywords) return html
[ "def", "generate_html", "(", "keywords", ",", "tmpl_path", ",", "package_name", "=", "'ecell4.util'", ")", ":", "from", "jinja2", "import", "Template", "import", "pkgutil", "template", "=", "Template", "(", "pkgutil", ".", "get_data", "(", "package_name", ",", ...
Generate static html file from JSON model and its own id. Parameters ---------- model : dict JSON model from which ecell4.viz generates a plot. model_id : string Unique id for the plot. Returns ------- html : A HTML object
[ "Generate", "static", "html", "file", "from", "JSON", "model", "and", "its", "own", "id", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L802-L825
22,169
ecell/ecell4
ecell4/util/viz.py
plot_trajectory2d_with_matplotlib
def plot_trajectory2d_with_matplotlib( obs, plane='xy', max_count=10, figsize=6, legend=True, wireframe=False, grid=True, noaxis=False, plot_range=None, **kwargs): """ Make a 2D plot from received instance of TrajectoryObserver and show it on IPython notebook. Parameters ---------- obs : TrajectoryObserver TrajectoryObserver to render. plane : str, default 'xy' 'xy', 'yz', 'zx'. max_count : Integer, default 10 The maximum number of particles to show. If None, show all. figsize : float, default 6 Size of the plotting area. Given in inch. legend : bool, default True plot_range : tuple, default None Range for plotting. A triplet of pairs suggesting (rangex, rangey, rangez). If None, the minimum volume containing all the trajectories is used. """ import matplotlib.pyplot as plt plane = plane.lower() if len(plane) != 2 or plane[0] not in ('x', 'y', 'z') or plane[1] not in ('x', 'y', 'z'): raise ValueError("invalid 'plane' argument [{}] was given.".format(repr(plane))) xidx = 0 if plane[0] == 'x' else (1 if plane[0] == 'y' else 2) yidx = 0 if plane[1] == 'x' else (1 if plane[1] == 'y' else 2) data = obs.data() if max_count is not None and len(data) > max_count: data = random.sample(data, max_count) wrange = __get_range_of_trajectories(data, plot_range) wrange = (wrange['x'], wrange['y'], wrange['z']) wrange = {'x': wrange[xidx], 'y': wrange[yidx]} fig, ax = __prepare_plot_with_matplotlib( wrange, figsize, grid, wireframe, noaxis) ax.set_xlabel(plane[0].upper()) ax.set_ylabel(plane[1].upper()) lines = [] for i, y in enumerate(data): xarr, yarr, zarr = [], [], [] for pos in y: xarr.append(pos[xidx]) yarr.append(pos[yidx]) lines.append((xarr, yarr)) __plot_trajectory2d_with_matplotlib(lines, ax, **kwargs) # if legend: # ax.legend(loc='best', shadow=True) if legend is not None and legend is not False: legend_opts = {"loc": "best", "shadow": True} if isinstance(legend, dict): legend_opts.update(legend) ax.legend(**legend_opts) plt.show()
python
def plot_trajectory2d_with_matplotlib( obs, plane='xy', max_count=10, figsize=6, legend=True, wireframe=False, grid=True, noaxis=False, plot_range=None, **kwargs): import matplotlib.pyplot as plt plane = plane.lower() if len(plane) != 2 or plane[0] not in ('x', 'y', 'z') or plane[1] not in ('x', 'y', 'z'): raise ValueError("invalid 'plane' argument [{}] was given.".format(repr(plane))) xidx = 0 if plane[0] == 'x' else (1 if plane[0] == 'y' else 2) yidx = 0 if plane[1] == 'x' else (1 if plane[1] == 'y' else 2) data = obs.data() if max_count is not None and len(data) > max_count: data = random.sample(data, max_count) wrange = __get_range_of_trajectories(data, plot_range) wrange = (wrange['x'], wrange['y'], wrange['z']) wrange = {'x': wrange[xidx], 'y': wrange[yidx]} fig, ax = __prepare_plot_with_matplotlib( wrange, figsize, grid, wireframe, noaxis) ax.set_xlabel(plane[0].upper()) ax.set_ylabel(plane[1].upper()) lines = [] for i, y in enumerate(data): xarr, yarr, zarr = [], [], [] for pos in y: xarr.append(pos[xidx]) yarr.append(pos[yidx]) lines.append((xarr, yarr)) __plot_trajectory2d_with_matplotlib(lines, ax, **kwargs) # if legend: # ax.legend(loc='best', shadow=True) if legend is not None and legend is not False: legend_opts = {"loc": "best", "shadow": True} if isinstance(legend, dict): legend_opts.update(legend) ax.legend(**legend_opts) plt.show()
[ "def", "plot_trajectory2d_with_matplotlib", "(", "obs", ",", "plane", "=", "'xy'", ",", "max_count", "=", "10", ",", "figsize", "=", "6", ",", "legend", "=", "True", ",", "wireframe", "=", "False", ",", "grid", "=", "True", ",", "noaxis", "=", "False", ...
Make a 2D plot from received instance of TrajectoryObserver and show it on IPython notebook. Parameters ---------- obs : TrajectoryObserver TrajectoryObserver to render. plane : str, default 'xy' 'xy', 'yz', 'zx'. max_count : Integer, default 10 The maximum number of particles to show. If None, show all. figsize : float, default 6 Size of the plotting area. Given in inch. legend : bool, default True plot_range : tuple, default None Range for plotting. A triplet of pairs suggesting (rangex, rangey, rangez). If None, the minimum volume containing all the trajectories is used.
[ "Make", "a", "2D", "plot", "from", "received", "instance", "of", "TrajectoryObserver", "and", "show", "it", "on", "IPython", "notebook", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L1243-L1304
22,170
ecell/ecell4
ecell4/util/viz.py
plot_world2d_with_matplotlib
def plot_world2d_with_matplotlib( world, plane='xy', marker_size=3, figsize=6, grid=True, wireframe=False, species_list=None, max_count=1000, angle=None, legend=True, noaxis=False, scale=1.0, **kwargs): """ Make a 2D plot from received instance of World and show it on IPython notebook. Parameters ---------- world : World or str World to render. A HDF5 filename is also acceptable. plane : str, default 'xy' 'xy', 'yz', 'zx'. marker_size : float, default 3 Marker size for all species. Size is passed to scatter function as argument, s=(2 ** marker_size). figsize : float, default 6 Size of the plotting area. Given in inch. species_list : array of string, default None If set, plot_world will not search the list of species. max_count : Integer, default 1000 The maximum number of particles to show for each species. None means no limitation. angle : tuple, default None A tuple of view angle which is given as (azim, elev, dist). If None, use default assumed to be (-60, 30, 10). legend : bool, default True scale : float, default 1 A length-scaling factor """ import matplotlib.pyplot as plt plane = plane.lower() if len(plane) != 2 or plane[0] not in ('x', 'y', 'z') or plane[1] not in ('x', 'y', 'z'): raise ValueError("invalid 'plane' argument [{}] was given.".format(repr(plane))) xidx = 0 if plane[0] == 'x' else (1 if plane[0] == 'y' else 2) yidx = 0 if plane[1] == 'x' else (1 if plane[1] == 'y' else 2) if species_list is None: species_list = [p.species().serial() for pid, p in world.list_particles()] species_list = sorted( set(species_list), key=species_list.index) # XXX: pick unique ones wrange = __get_range_of_world(world, scale) wrange = (wrange['x'], wrange['y'], wrange['z']) wrange = {'x': wrange[xidx], 'y': wrange[yidx]} fig, ax = __prepare_plot_with_matplotlib( wrange, figsize, grid, wireframe, noaxis) scatters, plots = __scatter_world2d_with_matplotlib( world, (xidx, yidx), ax, species_list, marker_size, max_count, scale, **kwargs) ax.set_xlabel(plane[0].upper()) ax.set_ylabel(plane[1].upper()) # if legend: # ax.legend(handles=plots, labels=species_list, loc='best', shadow=True) if legend is not None and legend is not False: legend_opts = {'loc': 'center left', 'bbox_to_anchor': (1.0, 0.5), 'shadow': False, 'frameon': False, 'fontsize': 'x-large', 'scatterpoints': 1} if isinstance(legend, dict): legend_opts.update(legend) ax.legend(**legend_opts) # ax.legend(handles=plots, labels=species_list, **legend_opts) plt.show()
python
def plot_world2d_with_matplotlib( world, plane='xy', marker_size=3, figsize=6, grid=True, wireframe=False, species_list=None, max_count=1000, angle=None, legend=True, noaxis=False, scale=1.0, **kwargs): import matplotlib.pyplot as plt plane = plane.lower() if len(plane) != 2 or plane[0] not in ('x', 'y', 'z') or plane[1] not in ('x', 'y', 'z'): raise ValueError("invalid 'plane' argument [{}] was given.".format(repr(plane))) xidx = 0 if plane[0] == 'x' else (1 if plane[0] == 'y' else 2) yidx = 0 if plane[1] == 'x' else (1 if plane[1] == 'y' else 2) if species_list is None: species_list = [p.species().serial() for pid, p in world.list_particles()] species_list = sorted( set(species_list), key=species_list.index) # XXX: pick unique ones wrange = __get_range_of_world(world, scale) wrange = (wrange['x'], wrange['y'], wrange['z']) wrange = {'x': wrange[xidx], 'y': wrange[yidx]} fig, ax = __prepare_plot_with_matplotlib( wrange, figsize, grid, wireframe, noaxis) scatters, plots = __scatter_world2d_with_matplotlib( world, (xidx, yidx), ax, species_list, marker_size, max_count, scale, **kwargs) ax.set_xlabel(plane[0].upper()) ax.set_ylabel(plane[1].upper()) # if legend: # ax.legend(handles=plots, labels=species_list, loc='best', shadow=True) if legend is not None and legend is not False: legend_opts = {'loc': 'center left', 'bbox_to_anchor': (1.0, 0.5), 'shadow': False, 'frameon': False, 'fontsize': 'x-large', 'scatterpoints': 1} if isinstance(legend, dict): legend_opts.update(legend) ax.legend(**legend_opts) # ax.legend(handles=plots, labels=species_list, **legend_opts) plt.show()
[ "def", "plot_world2d_with_matplotlib", "(", "world", ",", "plane", "=", "'xy'", ",", "marker_size", "=", "3", ",", "figsize", "=", "6", ",", "grid", "=", "True", ",", "wireframe", "=", "False", ",", "species_list", "=", "None", ",", "max_count", "=", "10...
Make a 2D plot from received instance of World and show it on IPython notebook. Parameters ---------- world : World or str World to render. A HDF5 filename is also acceptable. plane : str, default 'xy' 'xy', 'yz', 'zx'. marker_size : float, default 3 Marker size for all species. Size is passed to scatter function as argument, s=(2 ** marker_size). figsize : float, default 6 Size of the plotting area. Given in inch. species_list : array of string, default None If set, plot_world will not search the list of species. max_count : Integer, default 1000 The maximum number of particles to show for each species. None means no limitation. angle : tuple, default None A tuple of view angle which is given as (azim, elev, dist). If None, use default assumed to be (-60, 30, 10). legend : bool, default True scale : float, default 1 A length-scaling factor
[ "Make", "a", "2D", "plot", "from", "received", "instance", "of", "World", "and", "show", "it", "on", "IPython", "notebook", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L1908-L1974
22,171
ecell/ecell4
ecell4/util/viz.py
plot_world_with_plotly
def plot_world_with_plotly(world, species_list=None, max_count=1000): """ Plot a World on IPython Notebook """ if isinstance(world, str): from .simulation import load_world world = load_world(world) if species_list is None: species_list = [sp.serial() for sp in world.list_species()] species_list.sort() import random from ecell4_base.core import Species positions = {} for serial in species_list: x, y, z = [], [], [] particles = world.list_particles_exact(Species(serial)) if max_count is not None and len(particles) > max_count: particles = random.sample(particles, max_count) for pid, p in particles: pos = p.position() x.append(pos[0]) y.append(pos[1]) z.append(pos[2]) positions[serial] = (x, y, z) import plotly import plotly.graph_objs as go plotly.offline.init_notebook_mode() marker = dict(size=6, line=dict(color='rgb(204, 204, 204)', width=1), opacity=0.9, symbol='circle') data = [] for serial, (x, y, z) in positions.items(): trace = go.Scatter3d( x=x, y=y, z=z, mode='markers', marker=marker, name=serial) data.append(trace) layout = go.Layout(margin=dict(l=0, r=0, b=0, t=0)) fig = go.Figure(data=data, layout=layout) plotly.offline.iplot(fig)
python
def plot_world_with_plotly(world, species_list=None, max_count=1000): if isinstance(world, str): from .simulation import load_world world = load_world(world) if species_list is None: species_list = [sp.serial() for sp in world.list_species()] species_list.sort() import random from ecell4_base.core import Species positions = {} for serial in species_list: x, y, z = [], [], [] particles = world.list_particles_exact(Species(serial)) if max_count is not None and len(particles) > max_count: particles = random.sample(particles, max_count) for pid, p in particles: pos = p.position() x.append(pos[0]) y.append(pos[1]) z.append(pos[2]) positions[serial] = (x, y, z) import plotly import plotly.graph_objs as go plotly.offline.init_notebook_mode() marker = dict(size=6, line=dict(color='rgb(204, 204, 204)', width=1), opacity=0.9, symbol='circle') data = [] for serial, (x, y, z) in positions.items(): trace = go.Scatter3d( x=x, y=y, z=z, mode='markers', marker=marker, name=serial) data.append(trace) layout = go.Layout(margin=dict(l=0, r=0, b=0, t=0)) fig = go.Figure(data=data, layout=layout) plotly.offline.iplot(fig)
[ "def", "plot_world_with_plotly", "(", "world", ",", "species_list", "=", "None", ",", "max_count", "=", "1000", ")", ":", "if", "isinstance", "(", "world", ",", "str", ")", ":", "from", ".", "simulation", "import", "load_world", "world", "=", "load_world", ...
Plot a World on IPython Notebook
[ "Plot", "a", "World", "on", "IPython", "Notebook" ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/viz.py#L2136-L2182
22,172
ecell/ecell4
ecell4/extra/_unit.py
getUnitRegistry
def getUnitRegistry(length="meter", time="second", substance="item", volume=None, other=()): """Return a pint.UnitRegistry made compatible with ecell4. Parameters ---------- length : str, optional A default unit for '[length]'. 'meter' is its default. time : str, optional A default unit for '[time]'. 'second' is its default. substance : str, optional A default unit for '[substance]' (the number of molecules). 'item' is its default. volume : str, optional A default unit for '[volume]'. Its default is None, thus '[length]**3'. other : tuple, optional A list of user-defined default units other than the above. Returns ------- ureg : pint.UnitRegistry """ ureg = pint.UnitRegistry() ureg.define('item = mole / (avogadro_number * 1 mole)') try: pint.molar # except UndefinedUnitError: except AttributeError: # https://github.com/hgrecco/pint/blob/master/pint/default_en.txt#L75-L77 ureg.define('[concentration] = [substance] / [volume]') ureg.define('molar = mol / (1e-3 * m ** 3) = M') base_units = [unit for unit in (length, time, substance, volume) if unit is not None] base_units.extend(other) _ = ureg.System.from_lines( ["@system local using international"] + base_units, ureg.get_base_units) ureg.default_system = 'local' wrap_quantity(ureg.Quantity) pint.set_application_registry(ureg) # for pickling return ureg
python
def getUnitRegistry(length="meter", time="second", substance="item", volume=None, other=()): ureg = pint.UnitRegistry() ureg.define('item = mole / (avogadro_number * 1 mole)') try: pint.molar # except UndefinedUnitError: except AttributeError: # https://github.com/hgrecco/pint/blob/master/pint/default_en.txt#L75-L77 ureg.define('[concentration] = [substance] / [volume]') ureg.define('molar = mol / (1e-3 * m ** 3) = M') base_units = [unit for unit in (length, time, substance, volume) if unit is not None] base_units.extend(other) _ = ureg.System.from_lines( ["@system local using international"] + base_units, ureg.get_base_units) ureg.default_system = 'local' wrap_quantity(ureg.Quantity) pint.set_application_registry(ureg) # for pickling return ureg
[ "def", "getUnitRegistry", "(", "length", "=", "\"meter\"", ",", "time", "=", "\"second\"", ",", "substance", "=", "\"item\"", ",", "volume", "=", "None", ",", "other", "=", "(", ")", ")", ":", "ureg", "=", "pint", ".", "UnitRegistry", "(", ")", "ureg",...
Return a pint.UnitRegistry made compatible with ecell4. Parameters ---------- length : str, optional A default unit for '[length]'. 'meter' is its default. time : str, optional A default unit for '[time]'. 'second' is its default. substance : str, optional A default unit for '[substance]' (the number of molecules). 'item' is its default. volume : str, optional A default unit for '[volume]'. Its default is None, thus '[length]**3'. other : tuple, optional A list of user-defined default units other than the above. Returns ------- ureg : pint.UnitRegistry
[ "Return", "a", "pint", ".", "UnitRegistry", "made", "compatible", "with", "ecell4", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/_unit.py#L32-L74
22,173
ecell/ecell4
ecell4/datasource/biogrid.py
biogridDataSource.interactor
def interactor(self, geneList=None, org=None): """ Supposing geneList returns an unique item. """ geneList = geneList or [] organisms = organisms or [] querydata = self.interactions(geneList, org) returnData = {} for i in querydata: if not returnData.get(i["symB"]["name"]): returnData[i["symB"]["name"]] = {"interactions": []} returnData[i["symB"]["name"]]["interactions"].append(i) return returnData
python
def interactor(self, geneList=None, org=None): geneList = geneList or [] organisms = organisms or [] querydata = self.interactions(geneList, org) returnData = {} for i in querydata: if not returnData.get(i["symB"]["name"]): returnData[i["symB"]["name"]] = {"interactions": []} returnData[i["symB"]["name"]]["interactions"].append(i) return returnData
[ "def", "interactor", "(", "self", ",", "geneList", "=", "None", ",", "org", "=", "None", ")", ":", "geneList", "=", "geneList", "or", "[", "]", "organisms", "=", "organisms", "or", "[", "]", "querydata", "=", "self", ".", "interactions", "(", "geneList...
Supposing geneList returns an unique item.
[ "Supposing", "geneList", "returns", "an", "unique", "item", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/datasource/biogrid.py#L96-L109
22,174
ecell/ecell4
ecell4/util/ports.py
save_sbml
def save_sbml(filename, model, y0=None, volume=1.0, is_valid=True): """ Save a model in the SBML format. Parameters ---------- model : NetworkModel y0 : dict Initial condition. volume : Real or Real3, optional A size of the simulation volume. is_valid : bool, optional Check if the generated model is valid. True as a default. """ y0 = y0 or {} import libsbml document = export_sbml(model, y0, volume, is_valid) # with open(filename, 'w') as fout: # fout.write(libsbml.writeSBMLToString(document)) # writer = libsbml.SBMLWriter() # writer.writeSBML(document, filename) libsbml.writeSBML(document, filename)
python
def save_sbml(filename, model, y0=None, volume=1.0, is_valid=True): y0 = y0 or {} import libsbml document = export_sbml(model, y0, volume, is_valid) # with open(filename, 'w') as fout: # fout.write(libsbml.writeSBMLToString(document)) # writer = libsbml.SBMLWriter() # writer.writeSBML(document, filename) libsbml.writeSBML(document, filename)
[ "def", "save_sbml", "(", "filename", ",", "model", ",", "y0", "=", "None", ",", "volume", "=", "1.0", ",", "is_valid", "=", "True", ")", ":", "y0", "=", "y0", "or", "{", "}", "import", "libsbml", "document", "=", "export_sbml", "(", "model", ",", "...
Save a model in the SBML format. Parameters ---------- model : NetworkModel y0 : dict Initial condition. volume : Real or Real3, optional A size of the simulation volume. is_valid : bool, optional Check if the generated model is valid. True as a default.
[ "Save", "a", "model", "in", "the", "SBML", "format", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/ports.py#L220-L245
22,175
ecell/ecell4
ecell4/util/ports.py
load_sbml
def load_sbml(filename): """ Load a model from a SBML file. Parameters ---------- filename : str The input SBML filename. Returns ------- model : NetworkModel y0 : dict Initial condition. volume : Real or Real3, optional A size of the simulation volume. """ import libsbml document = libsbml.readSBML(filename) document.validateSBML() num_errors = (document.getNumErrors(libsbml.LIBSBML_SEV_ERROR) + document.getNumErrors(libsbml.LIBSBML_SEV_FATAL)) if num_errors > 0: messages = "The generated document is not valid." messages += " {} errors were found:\n".format(num_errors) for i in range(document.getNumErrors(libsbml.LIBSBML_SEV_ERROR)): err = document.getErrorWithSeverity(i, libsbml.LIBSBML_SEV_ERROR) messages += "{}: {}\n".format(err.getSeverityAsString(), err.getShortMessage()) for i in range(document.getNumErrors(libsbml.LIBSBML_SEV_FATAL)): err = document.getErrorWithSeverity(i, libsbml.LIBSBML_SEV_FATAL) messages += "{}: {}\n".format(err.getSeverityAsString(), err.getShortMessage()) raise RuntimeError(messages) return import_sbml(document)
python
def load_sbml(filename): import libsbml document = libsbml.readSBML(filename) document.validateSBML() num_errors = (document.getNumErrors(libsbml.LIBSBML_SEV_ERROR) + document.getNumErrors(libsbml.LIBSBML_SEV_FATAL)) if num_errors > 0: messages = "The generated document is not valid." messages += " {} errors were found:\n".format(num_errors) for i in range(document.getNumErrors(libsbml.LIBSBML_SEV_ERROR)): err = document.getErrorWithSeverity(i, libsbml.LIBSBML_SEV_ERROR) messages += "{}: {}\n".format(err.getSeverityAsString(), err.getShortMessage()) for i in range(document.getNumErrors(libsbml.LIBSBML_SEV_FATAL)): err = document.getErrorWithSeverity(i, libsbml.LIBSBML_SEV_FATAL) messages += "{}: {}\n".format(err.getSeverityAsString(), err.getShortMessage()) raise RuntimeError(messages) return import_sbml(document)
[ "def", "load_sbml", "(", "filename", ")", ":", "import", "libsbml", "document", "=", "libsbml", ".", "readSBML", "(", "filename", ")", "document", ".", "validateSBML", "(", ")", "num_errors", "=", "(", "document", ".", "getNumErrors", "(", "libsbml", ".", ...
Load a model from a SBML file. Parameters ---------- filename : str The input SBML filename. Returns ------- model : NetworkModel y0 : dict Initial condition. volume : Real or Real3, optional A size of the simulation volume.
[ "Load", "a", "model", "from", "a", "SBML", "file", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/ports.py#L377-L411
22,176
ecell/ecell4
ecell4/util/decorator.py
get_model
def get_model(is_netfree=False, without_reset=False, seeds=None, effective=False): """ Generate a model with parameters in the global scope, ``SPECIES_ATTRIBUTES`` and ``REACTIONRULES``. Parameters ---------- is_netfree : bool, optional Return ``NetfreeModel`` if True, and ``NetworkModel`` if else. Default is False. without_reset : bool, optional Do not reset the global variables after the generation if True. Default is False. seeds : list, optional A list of seed ``Species`` for expanding the model. If this is not None, generate a ``NetfreeModel`` once, and return a ``NetworkModel``, which is an expanded form of that with the given seeds. Default is None. effective : bool, optional See ``NetfreeModel.effective`` and ``Netfree.set_effective``. Only meaningfull with option ``is_netfree=True``. Default is False Returns ------- model : NetworkModel, NetfreeModel """ try: if seeds is not None or is_netfree: m = ecell4_base.core.NetfreeModel() else: m = ecell4_base.core.NetworkModel() for sp in SPECIES_ATTRIBUTES: m.add_species_attribute(sp) for rr in REACTION_RULES: m.add_reaction_rule(rr) if not without_reset: reset_model() if seeds is not None: return m.expand(seeds) if isinstance(m, ecell4_base.core.NetfreeModel): m.set_effective(effective) except Exception as e: reset_model() raise e return m
python
def get_model(is_netfree=False, without_reset=False, seeds=None, effective=False): try: if seeds is not None or is_netfree: m = ecell4_base.core.NetfreeModel() else: m = ecell4_base.core.NetworkModel() for sp in SPECIES_ATTRIBUTES: m.add_species_attribute(sp) for rr in REACTION_RULES: m.add_reaction_rule(rr) if not without_reset: reset_model() if seeds is not None: return m.expand(seeds) if isinstance(m, ecell4_base.core.NetfreeModel): m.set_effective(effective) except Exception as e: reset_model() raise e return m
[ "def", "get_model", "(", "is_netfree", "=", "False", ",", "without_reset", "=", "False", ",", "seeds", "=", "None", ",", "effective", "=", "False", ")", ":", "try", ":", "if", "seeds", "is", "not", "None", "or", "is_netfree", ":", "m", "=", "ecell4_bas...
Generate a model with parameters in the global scope, ``SPECIES_ATTRIBUTES`` and ``REACTIONRULES``. Parameters ---------- is_netfree : bool, optional Return ``NetfreeModel`` if True, and ``NetworkModel`` if else. Default is False. without_reset : bool, optional Do not reset the global variables after the generation if True. Default is False. seeds : list, optional A list of seed ``Species`` for expanding the model. If this is not None, generate a ``NetfreeModel`` once, and return a ``NetworkModel``, which is an expanded form of that with the given seeds. Default is None. effective : bool, optional See ``NetfreeModel.effective`` and ``Netfree.set_effective``. Only meaningfull with option ``is_netfree=True``. Default is False Returns ------- model : NetworkModel, NetfreeModel
[ "Generate", "a", "model", "with", "parameters", "in", "the", "global", "scope", "SPECIES_ATTRIBUTES", "and", "REACTIONRULES", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/decorator.py#L143-L194
22,177
ecell/ecell4
ecell4/extra/ensemble.py
run_serial
def run_serial(target, jobs, n=1, **kwargs): """ Evaluate the given function with each set of arguments, and return a list of results. This function does in series. Parameters ---------- target : function A function to be evaluated. The function must accepts three arguments, which are a list of arguments given as `jobs`, a job and task id (int). jobs : list A list of arguments passed to the function. n : int, optional A number of tasks. Repeat the evaluation `n` times for each job. 1 for default. Returns ------- results : list A list of results. Each element is a list containing `n` results. Examples -------- >>> jobs = ((1, 'spam'), (2, 'ham'), (3, 'eggs')) >>> target = lambda args, job_id, task_id: (args[1] * args[0]) >>> run_serial(target, jobs) [['spam'], ['hamham'], ['eggseggseggs']] >>> target = lambda args, job_id, task_id: "{:d} {}".format(task_id, args[1] * args[0]) >>> run_serial(target, jobs, n=2) [['1 spam', '2 spam'], ['1 hamham', '2 hamham'], ['1 eggseggseggs', '2 eggseggseggs']] >>> seeds = genseeds(3) >>> def target(arg, job_id, task_id): ... from ecell4.extra.ensemble import getseed ... return getseed(arg, task_id) >>> run_serial(target, (seeds, ), n=3) # doctest: +SKIP [[127152315, 2028054913, 253611282]] See Also -------- ecell4.extra.ensemble.run_serial ecell4.extra.ensemble.run_sge ecell4.extra.ensemble.run_slurm ecell4.extra.ensemble.run_multiprocessing ecell4.extra.ensemble.run_azure """ return [[target(copy.copy(job), i + 1, j + 1) for j in range(n)] for i, job in enumerate(jobs)]
python
def run_serial(target, jobs, n=1, **kwargs): return [[target(copy.copy(job), i + 1, j + 1) for j in range(n)] for i, job in enumerate(jobs)]
[ "def", "run_serial", "(", "target", ",", "jobs", ",", "n", "=", "1", ",", "*", "*", "kwargs", ")", ":", "return", "[", "[", "target", "(", "copy", ".", "copy", "(", "job", ")", ",", "i", "+", "1", ",", "j", "+", "1", ")", "for", "j", "in", ...
Evaluate the given function with each set of arguments, and return a list of results. This function does in series. Parameters ---------- target : function A function to be evaluated. The function must accepts three arguments, which are a list of arguments given as `jobs`, a job and task id (int). jobs : list A list of arguments passed to the function. n : int, optional A number of tasks. Repeat the evaluation `n` times for each job. 1 for default. Returns ------- results : list A list of results. Each element is a list containing `n` results. Examples -------- >>> jobs = ((1, 'spam'), (2, 'ham'), (3, 'eggs')) >>> target = lambda args, job_id, task_id: (args[1] * args[0]) >>> run_serial(target, jobs) [['spam'], ['hamham'], ['eggseggseggs']] >>> target = lambda args, job_id, task_id: "{:d} {}".format(task_id, args[1] * args[0]) >>> run_serial(target, jobs, n=2) [['1 spam', '2 spam'], ['1 hamham', '2 hamham'], ['1 eggseggseggs', '2 eggseggseggs']] >>> seeds = genseeds(3) >>> def target(arg, job_id, task_id): ... from ecell4.extra.ensemble import getseed ... return getseed(arg, task_id) >>> run_serial(target, (seeds, ), n=3) # doctest: +SKIP [[127152315, 2028054913, 253611282]] See Also -------- ecell4.extra.ensemble.run_serial ecell4.extra.ensemble.run_sge ecell4.extra.ensemble.run_slurm ecell4.extra.ensemble.run_multiprocessing ecell4.extra.ensemble.run_azure
[ "Evaluate", "the", "given", "function", "with", "each", "set", "of", "arguments", "and", "return", "a", "list", "of", "results", ".", "This", "function", "does", "in", "series", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/ensemble.py#L22-L71
22,178
ecell/ecell4
ecell4/extra/ensemble.py
run_multiprocessing
def run_multiprocessing(target, jobs, n=1, nproc=None, **kwargs): """ Evaluate the given function with each set of arguments, and return a list of results. This function does in parallel by using `multiprocessing`. Parameters ---------- target : function A function to be evaluated. The function must accepts three arguments, which are a list of arguments given as `jobs`, a job and task id (int). jobs : list A list of arguments passed to the function. All the argument must be picklable. n : int, optional A number of tasks. Repeat the evaluation `n` times for each job. 1 for default. nproc : int, optional A number of cores available once. If nothing is given, all available cores are used. Returns ------- results : list A list of results. Each element is a list containing `n` results. Examples -------- >>> jobs = ((1, 'spam'), (2, 'ham'), (3, 'eggs')) >>> target = lambda args, job_id, task_id: (args[1] * args[0]) >>> run_multiprocessing(target, jobs, nproc=2) [['spam'], ['hamham'], ['eggseggseggs']] >>> target = lambda args, job_id, task_id: "{:d} {}".format(task_id, args[1] * args[0]) >>> run_multiprocessing(target, jobs, n=2, nproc=2) [['1 spam', '2 spam'], ['1 hamham', '2 hamham'], ['1 eggseggseggs', '2 eggseggseggs']] See Also -------- ecell4.extra.ensemble.run_serial ecell4.extra.ensemble.run_sge ecell4.extra.ensemble.run_slurm ecell4.extra.ensemble.run_multiprocessing ecell4.extra.ensemble.run_azure """ def consumer(f, q_in, q_out): while True: val = q_in.get() if val is None: q_in.task_done() break i, x = val res = (i, f(*x)) q_in.task_done() q_out.put(res) def mulpmap(f, X, nproc): nproc = nproc or multiprocessing.cpu_count() q_in = multiprocessing.JoinableQueue() q_out = multiprocessing.Queue() workers = [multiprocessing.Process(target=consumer, args=(f, q_in, q_out), daemon=True) for _ in range(nproc)] sent = [q_in.put((i, x)) for i, x in enumerate(X)] num_tasks = len(sent) [q_in.put(None) for _ in range(nproc)] #XXX: poison pill [w.start() for w in workers] # [w.join() for w in workers] q_in.join() res = [q_out.get() for _ in range(num_tasks)] return [x for (_, x) in sorted(res)] res = mulpmap( target, ((job, i + 1, j + 1) for (i, job), j in itertools.product(enumerate(jobs), range(n))), nproc) return [res[i: i + n] for i in range(0, len(res), n)]
python
def run_multiprocessing(target, jobs, n=1, nproc=None, **kwargs): def consumer(f, q_in, q_out): while True: val = q_in.get() if val is None: q_in.task_done() break i, x = val res = (i, f(*x)) q_in.task_done() q_out.put(res) def mulpmap(f, X, nproc): nproc = nproc or multiprocessing.cpu_count() q_in = multiprocessing.JoinableQueue() q_out = multiprocessing.Queue() workers = [multiprocessing.Process(target=consumer, args=(f, q_in, q_out), daemon=True) for _ in range(nproc)] sent = [q_in.put((i, x)) for i, x in enumerate(X)] num_tasks = len(sent) [q_in.put(None) for _ in range(nproc)] #XXX: poison pill [w.start() for w in workers] # [w.join() for w in workers] q_in.join() res = [q_out.get() for _ in range(num_tasks)] return [x for (_, x) in sorted(res)] res = mulpmap( target, ((job, i + 1, j + 1) for (i, job), j in itertools.product(enumerate(jobs), range(n))), nproc) return [res[i: i + n] for i in range(0, len(res), n)]
[ "def", "run_multiprocessing", "(", "target", ",", "jobs", ",", "n", "=", "1", ",", "nproc", "=", "None", ",", "*", "*", "kwargs", ")", ":", "def", "consumer", "(", "f", ",", "q_in", ",", "q_out", ")", ":", "while", "True", ":", "val", "=", "q_in"...
Evaluate the given function with each set of arguments, and return a list of results. This function does in parallel by using `multiprocessing`. Parameters ---------- target : function A function to be evaluated. The function must accepts three arguments, which are a list of arguments given as `jobs`, a job and task id (int). jobs : list A list of arguments passed to the function. All the argument must be picklable. n : int, optional A number of tasks. Repeat the evaluation `n` times for each job. 1 for default. nproc : int, optional A number of cores available once. If nothing is given, all available cores are used. Returns ------- results : list A list of results. Each element is a list containing `n` results. Examples -------- >>> jobs = ((1, 'spam'), (2, 'ham'), (3, 'eggs')) >>> target = lambda args, job_id, task_id: (args[1] * args[0]) >>> run_multiprocessing(target, jobs, nproc=2) [['spam'], ['hamham'], ['eggseggseggs']] >>> target = lambda args, job_id, task_id: "{:d} {}".format(task_id, args[1] * args[0]) >>> run_multiprocessing(target, jobs, n=2, nproc=2) [['1 spam', '2 spam'], ['1 hamham', '2 hamham'], ['1 eggseggseggs', '2 eggseggseggs']] See Also -------- ecell4.extra.ensemble.run_serial ecell4.extra.ensemble.run_sge ecell4.extra.ensemble.run_slurm ecell4.extra.ensemble.run_multiprocessing ecell4.extra.ensemble.run_azure
[ "Evaluate", "the", "given", "function", "with", "each", "set", "of", "arguments", "and", "return", "a", "list", "of", "results", ".", "This", "function", "does", "in", "parallel", "by", "using", "multiprocessing", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/ensemble.py#L73-L147
22,179
ecell/ecell4
ecell4/extra/ensemble.py
run_azure
def run_azure(target, jobs, n=1, nproc=None, path='.', delete=True, config=None, **kwargs): """ Evaluate the given function with each set of arguments, and return a list of results. This function does in parallel with Microsoft Azure Batch. This function is the work in progress. The argument `nproc` doesn't work yet. See `ecell4.extra.azure_batch.run_azure` for details. See Also -------- ecell4.extra.ensemble.run_serial ecell4.extra.ensemble.run_sge ecell4.extra.ensemble.run_slurm ecell4.extra.ensemble.run_multiprocessing ecell4.extra.ensemble.run_azure ecell4.extra.azure_batch.run_azure """ import ecell4.extra.azure_batch as azure_batch return azure_batch.run_azure(target, jobs, n, path, delete, config)
python
def run_azure(target, jobs, n=1, nproc=None, path='.', delete=True, config=None, **kwargs): import ecell4.extra.azure_batch as azure_batch return azure_batch.run_azure(target, jobs, n, path, delete, config)
[ "def", "run_azure", "(", "target", ",", "jobs", ",", "n", "=", "1", ",", "nproc", "=", "None", ",", "path", "=", "'.'", ",", "delete", "=", "True", ",", "config", "=", "None", ",", "*", "*", "kwargs", ")", ":", "import", "ecell4", ".", "extra", ...
Evaluate the given function with each set of arguments, and return a list of results. This function does in parallel with Microsoft Azure Batch. This function is the work in progress. The argument `nproc` doesn't work yet. See `ecell4.extra.azure_batch.run_azure` for details. See Also -------- ecell4.extra.ensemble.run_serial ecell4.extra.ensemble.run_sge ecell4.extra.ensemble.run_slurm ecell4.extra.ensemble.run_multiprocessing ecell4.extra.ensemble.run_azure ecell4.extra.azure_batch.run_azure
[ "Evaluate", "the", "given", "function", "with", "each", "set", "of", "arguments", "and", "return", "a", "list", "of", "results", ".", "This", "function", "does", "in", "parallel", "with", "Microsoft", "Azure", "Batch", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/ensemble.py#L503-L523
22,180
ecell/ecell4
ecell4/extra/ensemble.py
getseed
def getseed(myseed, i): """ Return a single seed from a long seed given by `genseeds`. Parameters ---------- myseed : bytes A long seed given by `genseeds(n)`. i : int An index less than n. Returns ------- rndseed : int A seed (less than (2 ** 31)) """ rndseed = int(myseed[(i - 1) * 8: i * 8], 16) rndseed = rndseed % (2 ** 31) #XXX: trancate the first bit return rndseed
python
def getseed(myseed, i): rndseed = int(myseed[(i - 1) * 8: i * 8], 16) rndseed = rndseed % (2 ** 31) #XXX: trancate the first bit return rndseed
[ "def", "getseed", "(", "myseed", ",", "i", ")", ":", "rndseed", "=", "int", "(", "myseed", "[", "(", "i", "-", "1", ")", "*", "8", ":", "i", "*", "8", "]", ",", "16", ")", "rndseed", "=", "rndseed", "%", "(", "2", "**", "31", ")", "#XXX: tr...
Return a single seed from a long seed given by `genseeds`. Parameters ---------- myseed : bytes A long seed given by `genseeds(n)`. i : int An index less than n. Returns ------- rndseed : int A seed (less than (2 ** 31))
[ "Return", "a", "single", "seed", "from", "a", "long", "seed", "given", "by", "genseeds", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/ensemble.py#L543-L562
22,181
ecell/ecell4
ecell4/extra/ensemble.py
list_species
def list_species(model, seeds=None): """This function is deprecated.""" seeds = None or [] from ecell4_base.core import Species if not isinstance(seeds, list): seeds = list(seeds) expanded = model.expand([Species(serial) for serial in seeds]) species_list = [sp.serial() for sp in expanded.list_species()] species_list = sorted(set(seeds + species_list)) return species_list
python
def list_species(model, seeds=None): seeds = None or [] from ecell4_base.core import Species if not isinstance(seeds, list): seeds = list(seeds) expanded = model.expand([Species(serial) for serial in seeds]) species_list = [sp.serial() for sp in expanded.list_species()] species_list = sorted(set(seeds + species_list)) return species_list
[ "def", "list_species", "(", "model", ",", "seeds", "=", "None", ")", ":", "seeds", "=", "None", "or", "[", "]", "from", "ecell4_base", ".", "core", "import", "Species", "if", "not", "isinstance", "(", "seeds", ",", "list", ")", ":", "seeds", "=", "li...
This function is deprecated.
[ "This", "function", "is", "deprecated", "." ]
a4a1229661c39b2059adbbacae9090e5ba664e01
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/ensemble.py#L582-L594
22,182
dlon/html2markdown
html2markdown.py
_escapeCharacters
def _escapeCharacters(tag): """non-recursively escape underlines and asterisks in the tag""" for i,c in enumerate(tag.contents): if type(c) != bs4.element.NavigableString: continue c.replace_with(_escapeCharSub(r'\\\1', c))
python
def _escapeCharacters(tag): for i,c in enumerate(tag.contents): if type(c) != bs4.element.NavigableString: continue c.replace_with(_escapeCharSub(r'\\\1', c))
[ "def", "_escapeCharacters", "(", "tag", ")", ":", "for", "i", ",", "c", "in", "enumerate", "(", "tag", ".", "contents", ")", ":", "if", "type", "(", "c", ")", "!=", "bs4", ".", "element", ".", "NavigableString", ":", "continue", "c", ".", "replace_wi...
non-recursively escape underlines and asterisks in the tag
[ "non", "-", "recursively", "escape", "underlines", "and", "asterisks", "in", "the", "tag" ]
5946da7136e69a67b3dd37fd0e896be4d6a5b482
https://github.com/dlon/html2markdown/blob/5946da7136e69a67b3dd37fd0e896be4d6a5b482/html2markdown.py#L148-L154
22,183
dlon/html2markdown
html2markdown.py
_breakRemNewlines
def _breakRemNewlines(tag): """non-recursively break spaces and remove newlines in the tag""" for i,c in enumerate(tag.contents): if type(c) != bs4.element.NavigableString: continue c.replace_with(re.sub(r' {2,}', ' ', c).replace('\n',''))
python
def _breakRemNewlines(tag): for i,c in enumerate(tag.contents): if type(c) != bs4.element.NavigableString: continue c.replace_with(re.sub(r' {2,}', ' ', c).replace('\n',''))
[ "def", "_breakRemNewlines", "(", "tag", ")", ":", "for", "i", ",", "c", "in", "enumerate", "(", "tag", ".", "contents", ")", ":", "if", "type", "(", "c", ")", "!=", "bs4", ".", "element", ".", "NavigableString", ":", "continue", "c", ".", "replace_wi...
non-recursively break spaces and remove newlines in the tag
[ "non", "-", "recursively", "break", "spaces", "and", "remove", "newlines", "in", "the", "tag" ]
5946da7136e69a67b3dd37fd0e896be4d6a5b482
https://github.com/dlon/html2markdown/blob/5946da7136e69a67b3dd37fd0e896be4d6a5b482/html2markdown.py#L156-L161
22,184
dlon/html2markdown
html2markdown.py
convert
def convert(html): """converts an html string to markdown while preserving unsupported markup.""" bs = BeautifulSoup(html, 'html.parser') _markdownify(bs) ret = unicode(bs).replace(u'\xa0', '&nbsp;') ret = re.sub(r'\n{3,}', r'\n\n', ret) # ! FIXME: hack ret = re.sub(r'&lt;&lt;&lt;FLOATING LINK: (.+)&gt;&gt;&gt;', r'<\1>', ret) # ! FIXME: hack sp = re.split(r'(&lt;&lt;&lt;BLOCKQUOTE: .*?&gt;&gt;&gt;)', ret, flags=re.DOTALL) for i,e in enumerate(sp): if e[:len('&lt;&lt;&lt;BLOCKQUOTE:')] == '&lt;&lt;&lt;BLOCKQUOTE:': sp[i] = '> ' + e[len('&lt;&lt;&lt;BLOCKQUOTE:') : -len('&gt;&gt;&gt;')] sp[i] = sp[i].replace('\n', '\n> ') ret = ''.join(sp) return ret.strip('\n')
python
def convert(html): bs = BeautifulSoup(html, 'html.parser') _markdownify(bs) ret = unicode(bs).replace(u'\xa0', '&nbsp;') ret = re.sub(r'\n{3,}', r'\n\n', ret) # ! FIXME: hack ret = re.sub(r'&lt;&lt;&lt;FLOATING LINK: (.+)&gt;&gt;&gt;', r'<\1>', ret) # ! FIXME: hack sp = re.split(r'(&lt;&lt;&lt;BLOCKQUOTE: .*?&gt;&gt;&gt;)', ret, flags=re.DOTALL) for i,e in enumerate(sp): if e[:len('&lt;&lt;&lt;BLOCKQUOTE:')] == '&lt;&lt;&lt;BLOCKQUOTE:': sp[i] = '> ' + e[len('&lt;&lt;&lt;BLOCKQUOTE:') : -len('&gt;&gt;&gt;')] sp[i] = sp[i].replace('\n', '\n> ') ret = ''.join(sp) return ret.strip('\n')
[ "def", "convert", "(", "html", ")", ":", "bs", "=", "BeautifulSoup", "(", "html", ",", "'html.parser'", ")", "_markdownify", "(", "bs", ")", "ret", "=", "unicode", "(", "bs", ")", ".", "replace", "(", "u'\\xa0'", ",", "'&nbsp;'", ")", "ret", "=", "re...
converts an html string to markdown while preserving unsupported markup.
[ "converts", "an", "html", "string", "to", "markdown", "while", "preserving", "unsupported", "markup", "." ]
5946da7136e69a67b3dd37fd0e896be4d6a5b482
https://github.com/dlon/html2markdown/blob/5946da7136e69a67b3dd37fd0e896be4d6a5b482/html2markdown.py#L332-L347
22,185
timknip/pyswf
swf/filters.py
SWFFilterFactory.create
def create(cls, type): """ Return the specified Filter """ if type == 0: return FilterDropShadow(id) elif type == 1: return FilterBlur(id) elif type == 2: return FilterGlow(id) elif type == 3: return FilterBevel(id) elif type == 4: return FilterGradientGlow(id) elif type == 5: return FilterConvolution(id) elif type == 6: return FilterColorMatrix(id) elif type == 7: return FilterGradientBevel(id) else: raise Exception("Unknown filter type: %d" % type)
python
def create(cls, type): if type == 0: return FilterDropShadow(id) elif type == 1: return FilterBlur(id) elif type == 2: return FilterGlow(id) elif type == 3: return FilterBevel(id) elif type == 4: return FilterGradientGlow(id) elif type == 5: return FilterConvolution(id) elif type == 6: return FilterColorMatrix(id) elif type == 7: return FilterGradientBevel(id) else: raise Exception("Unknown filter type: %d" % type)
[ "def", "create", "(", "cls", ",", "type", ")", ":", "if", "type", "==", "0", ":", "return", "FilterDropShadow", "(", "id", ")", "elif", "type", "==", "1", ":", "return", "FilterBlur", "(", "id", ")", "elif", "type", "==", "2", ":", "return", "Filte...
Return the specified Filter
[ "Return", "the", "specified", "Filter" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/filters.py#L220-L231
22,186
timknip/pyswf
swf/movie.py
SWF.export
def export(self, exporter=None, force_stroke=False): """ Export this SWF using the specified exporter. When no exporter is passed in the default exporter used is swf.export.SVGExporter. Exporters should extend the swf.export.BaseExporter class. @param exporter : the exporter to use @param force_stroke : set to true to force strokes on fills, useful for some edge cases. """ exporter = SVGExporter() if exporter is None else exporter if self._data is None: raise Exception("This SWF was not loaded! (no data)") if len(self.tags) == 0: raise Exception("This SWF doesn't contain any tags!") return exporter.export(self, force_stroke)
python
def export(self, exporter=None, force_stroke=False): exporter = SVGExporter() if exporter is None else exporter if self._data is None: raise Exception("This SWF was not loaded! (no data)") if len(self.tags) == 0: raise Exception("This SWF doesn't contain any tags!") return exporter.export(self, force_stroke)
[ "def", "export", "(", "self", ",", "exporter", "=", "None", ",", "force_stroke", "=", "False", ")", ":", "exporter", "=", "SVGExporter", "(", ")", "if", "exporter", "is", "None", "else", "exporter", "if", "self", ".", "_data", "is", "None", ":", "raise...
Export this SWF using the specified exporter. When no exporter is passed in the default exporter used is swf.export.SVGExporter. Exporters should extend the swf.export.BaseExporter class. @param exporter : the exporter to use @param force_stroke : set to true to force strokes on fills, useful for some edge cases.
[ "Export", "this", "SWF", "using", "the", "specified", "exporter", ".", "When", "no", "exporter", "is", "passed", "in", "the", "default", "exporter", "used", "is", "swf", ".", "export", ".", "SVGExporter", ".", "Exporters", "should", "extend", "the", "swf", ...
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/movie.py#L114-L131
22,187
timknip/pyswf
swf/movie.py
SWF.parse
def parse(self, data): """ Parses the SWF. The @data parameter can be a file object or a SWFStream """ self._data = data = data if isinstance(data, SWFStream) else SWFStream(data) self._header = SWFHeader(self._data) if self._header.compressed: temp = BytesIO() if self._header.compressed_zlib: import zlib data = data.f.read() zip = zlib.decompressobj() temp.write(zip.decompress(data)) else: import pylzma data.readUI32() #consume compressed length data = data.f.read() temp.write(pylzma.decompress(data)) temp.seek(0) data = SWFStream(temp) self._header._frame_size = data.readRECT() self._header._frame_rate = data.readFIXED8() self._header._frame_count = data.readUI16() self.parse_tags(data)
python
def parse(self, data): self._data = data = data if isinstance(data, SWFStream) else SWFStream(data) self._header = SWFHeader(self._data) if self._header.compressed: temp = BytesIO() if self._header.compressed_zlib: import zlib data = data.f.read() zip = zlib.decompressobj() temp.write(zip.decompress(data)) else: import pylzma data.readUI32() #consume compressed length data = data.f.read() temp.write(pylzma.decompress(data)) temp.seek(0) data = SWFStream(temp) self._header._frame_size = data.readRECT() self._header._frame_rate = data.readFIXED8() self._header._frame_count = data.readUI16() self.parse_tags(data)
[ "def", "parse", "(", "self", ",", "data", ")", ":", "self", ".", "_data", "=", "data", "=", "data", "if", "isinstance", "(", "data", ",", "SWFStream", ")", "else", "SWFStream", "(", "data", ")", "self", ".", "_header", "=", "SWFHeader", "(", "self", ...
Parses the SWF. The @data parameter can be a file object or a SWFStream
[ "Parses", "the", "SWF", ".", "The" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/movie.py#L137-L162
22,188
timknip/pyswf
swf/stream.py
int32
def int32(x): """ Return a signed or unsigned int """ if x>0xFFFFFFFF: raise OverflowError if x>0x7FFFFFFF: x=int(0x100000000-x) if x<2147483648: return -x else: return -2147483648 return x
python
def int32(x): if x>0xFFFFFFFF: raise OverflowError if x>0x7FFFFFFF: x=int(0x100000000-x) if x<2147483648: return -x else: return -2147483648 return x
[ "def", "int32", "(", "x", ")", ":", "if", "x", ">", "0xFFFFFFFF", ":", "raise", "OverflowError", "if", "x", ">", "0x7FFFFFFF", ":", "x", "=", "int", "(", "0x100000000", "-", "x", ")", "if", "x", "<", "2147483648", ":", "return", "-", "x", "else", ...
Return a signed or unsigned int
[ "Return", "a", "signed", "or", "unsigned", "int" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L490-L500
22,189
timknip/pyswf
swf/stream.py
SWFStream.bin
def bin(self, s): """ Return a value as a binary string """ return str(s) if s<=1 else bin(s>>1) + str(s&1)
python
def bin(self, s): return str(s) if s<=1 else bin(s>>1) + str(s&1)
[ "def", "bin", "(", "self", ",", "s", ")", ":", "return", "str", "(", "s", ")", "if", "s", "<=", "1", "else", "bin", "(", "s", ">>", "1", ")", "+", "str", "(", "s", "&", "1", ")" ]
Return a value as a binary string
[ "Return", "a", "value", "as", "a", "binary", "string" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L22-L24
22,190
timknip/pyswf
swf/stream.py
SWFStream.calc_max_bits
def calc_max_bits(self, signed, values): """ Calculates the maximim needed bits to represent a value """ b = 0 vmax = -10000000 for val in values: if signed: b = b | val if val >= 0 else b | ~val << 1 vmax = val if vmax < val else vmax else: b |= val; bits = 0 if b > 0: bits = len(self.bin(b)) - 2 if signed and vmax > 0 and len(self.bin(vmax)) - 2 >= bits: bits += 1 return bits
python
def calc_max_bits(self, signed, values): b = 0 vmax = -10000000 for val in values: if signed: b = b | val if val >= 0 else b | ~val << 1 vmax = val if vmax < val else vmax else: b |= val; bits = 0 if b > 0: bits = len(self.bin(b)) - 2 if signed and vmax > 0 and len(self.bin(vmax)) - 2 >= bits: bits += 1 return bits
[ "def", "calc_max_bits", "(", "self", ",", "signed", ",", "values", ")", ":", "b", "=", "0", "vmax", "=", "-", "10000000", "for", "val", "in", "values", ":", "if", "signed", ":", "b", "=", "b", "|", "val", "if", "val", ">=", "0", "else", "b", "|...
Calculates the maximim needed bits to represent a value
[ "Calculates", "the", "maximim", "needed", "bits", "to", "represent", "a", "value" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L26-L42
22,191
timknip/pyswf
swf/stream.py
SWFStream.readbits
def readbits(self, bits): """ Read the specified number of bits from the stream. Returns 0 for bits == 0. """ if bits == 0: return 0 # fast byte-aligned path if bits % 8 == 0 and self._bits_pending == 0: return self._read_bytes_aligned(bits // 8) out = 0 masks = self._masks def transfer_bits(x, y, n, t): """ transfers t bits from the top of y_n to the bottom of x. then returns x and the remaining bits in y """ if n == t: # taking all return (x << t) | y, 0 mask = masks[t] # (1 << t) - 1 remainmask = masks[n - t] # (1 << n - t) - 1 taken = ((y >> n - t) & mask) return (x << t) | taken, y & remainmask while bits > 0: if self._bits_pending > 0: assert self._partial_byte is not None take = min(self._bits_pending, bits) out, self._partial_byte = transfer_bits(out, self._partial_byte, self._bits_pending, take) if take == self._bits_pending: # we took them all self._partial_byte = None self._bits_pending -= take bits -= take continue r = self.f.read(1) if r == b'': raise EOFError self._partial_byte = ord(r) self._bits_pending = 8 return out
python
def readbits(self, bits): if bits == 0: return 0 # fast byte-aligned path if bits % 8 == 0 and self._bits_pending == 0: return self._read_bytes_aligned(bits // 8) out = 0 masks = self._masks def transfer_bits(x, y, n, t): """ transfers t bits from the top of y_n to the bottom of x. then returns x and the remaining bits in y """ if n == t: # taking all return (x << t) | y, 0 mask = masks[t] # (1 << t) - 1 remainmask = masks[n - t] # (1 << n - t) - 1 taken = ((y >> n - t) & mask) return (x << t) | taken, y & remainmask while bits > 0: if self._bits_pending > 0: assert self._partial_byte is not None take = min(self._bits_pending, bits) out, self._partial_byte = transfer_bits(out, self._partial_byte, self._bits_pending, take) if take == self._bits_pending: # we took them all self._partial_byte = None self._bits_pending -= take bits -= take continue r = self.f.read(1) if r == b'': raise EOFError self._partial_byte = ord(r) self._bits_pending = 8 return out
[ "def", "readbits", "(", "self", ",", "bits", ")", ":", "if", "bits", "==", "0", ":", "return", "0", "# fast byte-aligned path", "if", "bits", "%", "8", "==", "0", "and", "self", ".", "_bits_pending", "==", "0", ":", "return", "self", ".", "_read_bytes_...
Read the specified number of bits from the stream. Returns 0 for bits == 0.
[ "Read", "the", "specified", "number", "of", "bits", "from", "the", "stream", ".", "Returns", "0", "for", "bits", "==", "0", "." ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L56-L105
22,192
timknip/pyswf
swf/stream.py
SWFStream.readSB
def readSB(self, bits): """ Read a signed int using the specified number of bits """ shift = 32 - bits return int32(self.readbits(bits) << shift) >> shift
python
def readSB(self, bits): shift = 32 - bits return int32(self.readbits(bits) << shift) >> shift
[ "def", "readSB", "(", "self", ",", "bits", ")", ":", "shift", "=", "32", "-", "bits", "return", "int32", "(", "self", ".", "readbits", "(", "bits", ")", "<<", "shift", ")", ">>", "shift" ]
Read a signed int using the specified number of bits
[ "Read", "a", "signed", "int", "using", "the", "specified", "number", "of", "bits" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L111-L114
22,193
timknip/pyswf
swf/stream.py
SWFStream.readEncodedU32
def readEncodedU32(self): """ Read a encoded unsigned int """ self.reset_bits_pending(); result = self.readUI8(); if result & 0x80 != 0: result = (result & 0x7f) | (self.readUI8() << 7) if result & 0x4000 != 0: result = (result & 0x3fff) | (self.readUI8() << 14) if result & 0x200000 != 0: result = (result & 0x1fffff) | (self.readUI8() << 21) if result & 0x10000000 != 0: result = (result & 0xfffffff) | (self.readUI8() << 28) return result
python
def readEncodedU32(self): self.reset_bits_pending(); result = self.readUI8(); if result & 0x80 != 0: result = (result & 0x7f) | (self.readUI8() << 7) if result & 0x4000 != 0: result = (result & 0x3fff) | (self.readUI8() << 14) if result & 0x200000 != 0: result = (result & 0x1fffff) | (self.readUI8() << 21) if result & 0x10000000 != 0: result = (result & 0xfffffff) | (self.readUI8() << 28) return result
[ "def", "readEncodedU32", "(", "self", ")", ":", "self", ".", "reset_bits_pending", "(", ")", "result", "=", "self", ".", "readUI8", "(", ")", "if", "result", "&", "0x80", "!=", "0", ":", "result", "=", "(", "result", "&", "0x7f", ")", "|", "(", "se...
Read a encoded unsigned int
[ "Read", "a", "encoded", "unsigned", "int" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L155-L167
22,194
timknip/pyswf
swf/stream.py
SWFStream.readFLOAT16
def readFLOAT16(self): """ Read a 2 byte float """ self.reset_bits_pending() word = self.readUI16() sign = -1 if ((word & 0x8000) != 0) else 1 exponent = (word >> 10) & 0x1f significand = word & 0x3ff if exponent == 0: if significand == 0: return 0.0 else: return sign * math.pow(2, 1 - SWFStream.FLOAT16_EXPONENT_BASE) * (significand / 1024.0) if exponent == 31: if significand == 0: return float('-inf') if sign < 0 else float('inf') else: return float('nan') # normal number return sign * math.pow(2, exponent - SWFStream.FLOAT16_EXPONENT_BASE) * (1 + significand / 1024.0)
python
def readFLOAT16(self): self.reset_bits_pending() word = self.readUI16() sign = -1 if ((word & 0x8000) != 0) else 1 exponent = (word >> 10) & 0x1f significand = word & 0x3ff if exponent == 0: if significand == 0: return 0.0 else: return sign * math.pow(2, 1 - SWFStream.FLOAT16_EXPONENT_BASE) * (significand / 1024.0) if exponent == 31: if significand == 0: return float('-inf') if sign < 0 else float('inf') else: return float('nan') # normal number return sign * math.pow(2, exponent - SWFStream.FLOAT16_EXPONENT_BASE) * (1 + significand / 1024.0)
[ "def", "readFLOAT16", "(", "self", ")", ":", "self", ".", "reset_bits_pending", "(", ")", "word", "=", "self", ".", "readUI16", "(", ")", "sign", "=", "-", "1", "if", "(", "(", "word", "&", "0x8000", ")", "!=", "0", ")", "else", "1", "exponent", ...
Read a 2 byte float
[ "Read", "a", "2", "byte", "float" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L174-L192
22,195
timknip/pyswf
swf/stream.py
SWFStream.readSTYLECHANGERECORD
def readSTYLECHANGERECORD(self, states, fill_bits, line_bits, level = 1): """ Read a SWFShapeRecordStyleChange """ return SWFShapeRecordStyleChange(self, states, fill_bits, line_bits, level)
python
def readSTYLECHANGERECORD(self, states, fill_bits, line_bits, level = 1): return SWFShapeRecordStyleChange(self, states, fill_bits, line_bits, level)
[ "def", "readSTYLECHANGERECORD", "(", "self", ",", "states", ",", "fill_bits", ",", "line_bits", ",", "level", "=", "1", ")", ":", "return", "SWFShapeRecordStyleChange", "(", "self", ",", "states", ",", "fill_bits", ",", "line_bits", ",", "level", ")" ]
Read a SWFShapeRecordStyleChange
[ "Read", "a", "SWFShapeRecordStyleChange" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L263-L265
22,196
timknip/pyswf
swf/stream.py
SWFStream.readTEXTRECORD
def readTEXTRECORD(self, glyphBits, advanceBits, previousRecord=None, level=1): """ Read a SWFTextRecord """ if self.readUI8() == 0: return None else: self.seek(self.tell() - 1) return SWFTextRecord(self, glyphBits, advanceBits, previousRecord, level)
python
def readTEXTRECORD(self, glyphBits, advanceBits, previousRecord=None, level=1): if self.readUI8() == 0: return None else: self.seek(self.tell() - 1) return SWFTextRecord(self, glyphBits, advanceBits, previousRecord, level)
[ "def", "readTEXTRECORD", "(", "self", ",", "glyphBits", ",", "advanceBits", ",", "previousRecord", "=", "None", ",", "level", "=", "1", ")", ":", "if", "self", ".", "readUI8", "(", ")", "==", "0", ":", "return", "None", "else", ":", "self", ".", "see...
Read a SWFTextRecord
[ "Read", "a", "SWFTextRecord" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L271-L277
22,197
timknip/pyswf
swf/stream.py
SWFStream.readACTIONRECORD
def readACTIONRECORD(self): """ Read a SWFActionRecord """ action = None actionCode = self.readUI8() if actionCode != 0: actionLength = self.readUI16() if actionCode >= 0x80 else 0 #print "0x%x"%actionCode, actionLength action = SWFActionFactory.create(actionCode, actionLength) action.parse(self) return action
python
def readACTIONRECORD(self): action = None actionCode = self.readUI8() if actionCode != 0: actionLength = self.readUI16() if actionCode >= 0x80 else 0 #print "0x%x"%actionCode, actionLength action = SWFActionFactory.create(actionCode, actionLength) action.parse(self) return action
[ "def", "readACTIONRECORD", "(", "self", ")", ":", "action", "=", "None", "actionCode", "=", "self", ".", "readUI8", "(", ")", "if", "actionCode", "!=", "0", ":", "actionLength", "=", "self", ".", "readUI16", "(", ")", "if", "actionCode", ">=", "0x80", ...
Read a SWFActionRecord
[ "Read", "a", "SWFActionRecord" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L307-L316
22,198
timknip/pyswf
swf/stream.py
SWFStream.readCLIPACTIONRECORD
def readCLIPACTIONRECORD(self, version): """ Read a SWFClipActionRecord """ pos = self.tell() flags = self.readUI32() if version >= 6 else self.readUI16() if flags == 0: return None else: self.seek(pos) return SWFClipActionRecord(self, version)
python
def readCLIPACTIONRECORD(self, version): pos = self.tell() flags = self.readUI32() if version >= 6 else self.readUI16() if flags == 0: return None else: self.seek(pos) return SWFClipActionRecord(self, version)
[ "def", "readCLIPACTIONRECORD", "(", "self", ",", "version", ")", ":", "pos", "=", "self", ".", "tell", "(", ")", "flags", "=", "self", ".", "readUI32", "(", ")", "if", "version", ">=", "6", "else", "self", ".", "readUI16", "(", ")", "if", "flags", ...
Read a SWFClipActionRecord
[ "Read", "a", "SWFClipActionRecord" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L333-L341
22,199
timknip/pyswf
swf/stream.py
SWFStream.readRGB
def readRGB(self): """ Read a RGB color """ self.reset_bits_pending(); r = self.readUI8() g = self.readUI8() b = self.readUI8() return (0xff << 24) | (r << 16) | (g << 8) | b
python
def readRGB(self): self.reset_bits_pending(); r = self.readUI8() g = self.readUI8() b = self.readUI8() return (0xff << 24) | (r << 16) | (g << 8) | b
[ "def", "readRGB", "(", "self", ")", ":", "self", ".", "reset_bits_pending", "(", ")", "r", "=", "self", ".", "readUI8", "(", ")", "g", "=", "self", ".", "readUI8", "(", ")", "b", "=", "self", ".", "readUI8", "(", ")", "return", "(", "0xff", "<<",...
Read a RGB color
[ "Read", "a", "RGB", "color" ]
3740cc80d7650156831e728ea0d408819e5671eb
https://github.com/timknip/pyswf/blob/3740cc80d7650156831e728ea0d408819e5671eb/swf/stream.py#L347-L353