code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def get_current_waypoints(boatd=None): ''' Get the current set of waypoints active from boatd. :returns: The current waypoints :rtype: List of Points ''' if boatd is None: boatd = Boatd() content = boatd.get('/waypoints') return [Point(*coords) for coords in content.get('waypoints')]
def function[get_current_waypoints, parameter[boatd]]: constant[ Get the current set of waypoints active from boatd. :returns: The current waypoints :rtype: List of Points ] if compare[name[boatd] is constant[None]] begin[:] variable[boatd] assign[=] call[name[Boatd], parameter[]] variable[content] assign[=] call[name[boatd].get, parameter[constant[/waypoints]]] return[<ast.ListComp object at 0x7da207f00730>]
keyword[def] identifier[get_current_waypoints] ( identifier[boatd] = keyword[None] ): literal[string] keyword[if] identifier[boatd] keyword[is] keyword[None] : identifier[boatd] = identifier[Boatd] () identifier[content] = identifier[boatd] . identifier[get] ( literal[string] ) keyword[return] [ identifier[Point] (* identifier[coords] ) keyword[for] identifier[coords] keyword[in] identifier[content] . identifier[get] ( literal[string] )]
def get_current_waypoints(boatd=None): """ Get the current set of waypoints active from boatd. :returns: The current waypoints :rtype: List of Points """ if boatd is None: boatd = Boatd() # depends on [control=['if'], data=['boatd']] content = boatd.get('/waypoints') return [Point(*coords) for coords in content.get('waypoints')]
def log_stats(self): """Print statistics into log.""" logging.info('Validation statistics: ') for k, v in iteritems(self.stats): logging.info('%s - %d valid out of %d total submissions', k, v[0], v[0] + v[1])
def function[log_stats, parameter[self]]: constant[Print statistics into log.] call[name[logging].info, parameter[constant[Validation statistics: ]]] for taget[tuple[[<ast.Name object at 0x7da1b1fc8610>, <ast.Name object at 0x7da1b1fca230>]]] in starred[call[name[iteritems], parameter[name[self].stats]]] begin[:] call[name[logging].info, parameter[constant[%s - %d valid out of %d total submissions], name[k], call[name[v]][constant[0]], binary_operation[call[name[v]][constant[0]] + call[name[v]][constant[1]]]]]
keyword[def] identifier[log_stats] ( identifier[self] ): literal[string] identifier[logging] . identifier[info] ( literal[string] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[iteritems] ( identifier[self] . identifier[stats] ): identifier[logging] . identifier[info] ( literal[string] , identifier[k] , identifier[v] [ literal[int] ], identifier[v] [ literal[int] ]+ identifier[v] [ literal[int] ])
def log_stats(self): """Print statistics into log.""" logging.info('Validation statistics: ') for (k, v) in iteritems(self.stats): logging.info('%s - %d valid out of %d total submissions', k, v[0], v[0] + v[1]) # depends on [control=['for'], data=[]]
def list_certificate_signing_request(self, **kwargs): """ list or watch objects of kind CertificateSigningRequest This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_certificate_signing_request(async_req=True) >>> result = thread.get() :param async_req bool :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1CertificateSigningRequestList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_certificate_signing_request_with_http_info(**kwargs) else: (data) = self.list_certificate_signing_request_with_http_info(**kwargs) return data
def function[list_certificate_signing_request, parameter[self]]: constant[ list or watch objects of kind CertificateSigningRequest This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_certificate_signing_request(async_req=True) >>> result = thread.get() :param async_req bool :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1CertificateSigningRequestList If the method is called asynchronously, returns the request thread. ] call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True] if call[name[kwargs].get, parameter[constant[async_req]]] begin[:] return[call[name[self].list_certificate_signing_request_with_http_info, parameter[]]]
keyword[def] identifier[list_certificate_signing_request] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= keyword[True] keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ): keyword[return] identifier[self] . identifier[list_certificate_signing_request_with_http_info] (** identifier[kwargs] ) keyword[else] : ( identifier[data] )= identifier[self] . identifier[list_certificate_signing_request_with_http_info] (** identifier[kwargs] ) keyword[return] identifier[data]
def list_certificate_signing_request(self, **kwargs): """ list or watch objects of kind CertificateSigningRequest This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_certificate_signing_request(async_req=True) >>> result = thread.get() :param async_req bool :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1CertificateSigningRequestList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_certificate_signing_request_with_http_info(**kwargs) # depends on [control=['if'], data=[]] else: data = self.list_certificate_signing_request_with_http_info(**kwargs) return data
def get_default_api_key(self, email, password): """ Get the default API key for a user. :param email: The email of the user. :type email: string :param password: The user's password. :type password: string :returns: API key to confirm that it was fetched successfully. :rtype: string """ parameters = dict() parameters['email'] = email parameters['password'] = password response = self.request('midas.user.apikey.default', parameters) return response['apikey']
def function[get_default_api_key, parameter[self, email, password]]: constant[ Get the default API key for a user. :param email: The email of the user. :type email: string :param password: The user's password. :type password: string :returns: API key to confirm that it was fetched successfully. :rtype: string ] variable[parameters] assign[=] call[name[dict], parameter[]] call[name[parameters]][constant[email]] assign[=] name[email] call[name[parameters]][constant[password]] assign[=] name[password] variable[response] assign[=] call[name[self].request, parameter[constant[midas.user.apikey.default], name[parameters]]] return[call[name[response]][constant[apikey]]]
keyword[def] identifier[get_default_api_key] ( identifier[self] , identifier[email] , identifier[password] ): literal[string] identifier[parameters] = identifier[dict] () identifier[parameters] [ literal[string] ]= identifier[email] identifier[parameters] [ literal[string] ]= identifier[password] identifier[response] = identifier[self] . identifier[request] ( literal[string] , identifier[parameters] ) keyword[return] identifier[response] [ literal[string] ]
def get_default_api_key(self, email, password): """ Get the default API key for a user. :param email: The email of the user. :type email: string :param password: The user's password. :type password: string :returns: API key to confirm that it was fetched successfully. :rtype: string """ parameters = dict() parameters['email'] = email parameters['password'] = password response = self.request('midas.user.apikey.default', parameters) return response['apikey']
def _read_data(self, fp_, header): """Read data block""" nlines = int(header["block2"]['number_of_lines'][0]) ncols = int(header["block2"]['number_of_columns'][0]) return da.from_array(np.memmap(self.filename, offset=fp_.tell(), dtype='<u2', shape=(nlines, ncols), mode='r'), chunks=CHUNK_SIZE)
def function[_read_data, parameter[self, fp_, header]]: constant[Read data block] variable[nlines] assign[=] call[name[int], parameter[call[call[call[name[header]][constant[block2]]][constant[number_of_lines]]][constant[0]]]] variable[ncols] assign[=] call[name[int], parameter[call[call[call[name[header]][constant[block2]]][constant[number_of_columns]]][constant[0]]]] return[call[name[da].from_array, parameter[call[name[np].memmap, parameter[name[self].filename]]]]]
keyword[def] identifier[_read_data] ( identifier[self] , identifier[fp_] , identifier[header] ): literal[string] identifier[nlines] = identifier[int] ( identifier[header] [ literal[string] ][ literal[string] ][ literal[int] ]) identifier[ncols] = identifier[int] ( identifier[header] [ literal[string] ][ literal[string] ][ literal[int] ]) keyword[return] identifier[da] . identifier[from_array] ( identifier[np] . identifier[memmap] ( identifier[self] . identifier[filename] , identifier[offset] = identifier[fp_] . identifier[tell] (), identifier[dtype] = literal[string] , identifier[shape] =( identifier[nlines] , identifier[ncols] ), identifier[mode] = literal[string] ), identifier[chunks] = identifier[CHUNK_SIZE] )
def _read_data(self, fp_, header): """Read data block""" nlines = int(header['block2']['number_of_lines'][0]) ncols = int(header['block2']['number_of_columns'][0]) return da.from_array(np.memmap(self.filename, offset=fp_.tell(), dtype='<u2', shape=(nlines, ncols), mode='r'), chunks=CHUNK_SIZE)
def Save(self, filename, env): """ Saves all the options in the given file. This file can then be used to load the options next run. This can be used to create an option cache file. filename - Name of the file to save into env - the environment get the option values from """ # Create the file and write out the header try: fh = open(filename, 'w') try: # Make an assignment in the file for each option # within the environment that was assigned a value # other than the default. for option in self.options: try: value = env[option.key] try: prepare = value.prepare_to_store except AttributeError: try: eval(repr(value)) except KeyboardInterrupt: raise except: # Convert stuff that has a repr() that # cannot be evaluated into a string value = SCons.Util.to_String(value) else: value = prepare() defaultVal = env.subst(SCons.Util.to_String(option.default)) if option.converter: defaultVal = option.converter(defaultVal) if str(env.subst('${%s}' % option.key)) != str(defaultVal): fh.write('%s = %s\n' % (option.key, repr(value))) except KeyError: pass finally: fh.close() except IOError as x: raise SCons.Errors.UserError('Error writing options to file: %s\n%s' % (filename, x))
def function[Save, parameter[self, filename, env]]: constant[ Saves all the options in the given file. This file can then be used to load the options next run. This can be used to create an option cache file. filename - Name of the file to save into env - the environment get the option values from ] <ast.Try object at 0x7da204623a90>
keyword[def] identifier[Save] ( identifier[self] , identifier[filename] , identifier[env] ): literal[string] keyword[try] : identifier[fh] = identifier[open] ( identifier[filename] , literal[string] ) keyword[try] : keyword[for] identifier[option] keyword[in] identifier[self] . identifier[options] : keyword[try] : identifier[value] = identifier[env] [ identifier[option] . identifier[key] ] keyword[try] : identifier[prepare] = identifier[value] . identifier[prepare_to_store] keyword[except] identifier[AttributeError] : keyword[try] : identifier[eval] ( identifier[repr] ( identifier[value] )) keyword[except] identifier[KeyboardInterrupt] : keyword[raise] keyword[except] : identifier[value] = identifier[SCons] . identifier[Util] . identifier[to_String] ( identifier[value] ) keyword[else] : identifier[value] = identifier[prepare] () identifier[defaultVal] = identifier[env] . identifier[subst] ( identifier[SCons] . identifier[Util] . identifier[to_String] ( identifier[option] . identifier[default] )) keyword[if] identifier[option] . identifier[converter] : identifier[defaultVal] = identifier[option] . identifier[converter] ( identifier[defaultVal] ) keyword[if] identifier[str] ( identifier[env] . identifier[subst] ( literal[string] % identifier[option] . identifier[key] ))!= identifier[str] ( identifier[defaultVal] ): identifier[fh] . identifier[write] ( literal[string] %( identifier[option] . identifier[key] , identifier[repr] ( identifier[value] ))) keyword[except] identifier[KeyError] : keyword[pass] keyword[finally] : identifier[fh] . identifier[close] () keyword[except] identifier[IOError] keyword[as] identifier[x] : keyword[raise] identifier[SCons] . identifier[Errors] . identifier[UserError] ( literal[string] %( identifier[filename] , identifier[x] ))
def Save(self, filename, env): """ Saves all the options in the given file. This file can then be used to load the options next run. This can be used to create an option cache file. filename - Name of the file to save into env - the environment get the option values from """ # Create the file and write out the header try: fh = open(filename, 'w') try: # Make an assignment in the file for each option # within the environment that was assigned a value # other than the default. for option in self.options: try: value = env[option.key] try: prepare = value.prepare_to_store # depends on [control=['try'], data=[]] except AttributeError: try: eval(repr(value)) # depends on [control=['try'], data=[]] except KeyboardInterrupt: raise # depends on [control=['except'], data=[]] except: # Convert stuff that has a repr() that # cannot be evaluated into a string value = SCons.Util.to_String(value) # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] else: value = prepare() defaultVal = env.subst(SCons.Util.to_String(option.default)) if option.converter: defaultVal = option.converter(defaultVal) # depends on [control=['if'], data=[]] if str(env.subst('${%s}' % option.key)) != str(defaultVal): fh.write('%s = %s\n' % (option.key, repr(value))) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except KeyError: pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['option']] # depends on [control=['try'], data=[]] finally: fh.close() # depends on [control=['try'], data=[]] except IOError as x: raise SCons.Errors.UserError('Error writing options to file: %s\n%s' % (filename, x)) # depends on [control=['except'], data=['x']]
def sbytes2ilines(stream, encoding="utf8", closer=None): """ CONVERT A STREAM (with read() method) OF (ARBITRARY-SIZED) byte BLOCKS TO A LINE (CR-DELIMITED) GENERATOR """ def read(): try: while True: bytes_ = stream.read(4096) if not bytes_: return yield bytes_ except Exception as e: Log.error("Problem iterating through stream", cause=e) finally: try: stream.close() except Exception: pass if closer: try: closer() except Exception: pass return ibytes2ilines(read(), encoding=encoding)
def function[sbytes2ilines, parameter[stream, encoding, closer]]: constant[ CONVERT A STREAM (with read() method) OF (ARBITRARY-SIZED) byte BLOCKS TO A LINE (CR-DELIMITED) GENERATOR ] def function[read, parameter[]]: <ast.Try object at 0x7da1b0a3bf40> return[call[name[ibytes2ilines], parameter[call[name[read], parameter[]]]]]
keyword[def] identifier[sbytes2ilines] ( identifier[stream] , identifier[encoding] = literal[string] , identifier[closer] = keyword[None] ): literal[string] keyword[def] identifier[read] (): keyword[try] : keyword[while] keyword[True] : identifier[bytes_] = identifier[stream] . identifier[read] ( literal[int] ) keyword[if] keyword[not] identifier[bytes_] : keyword[return] keyword[yield] identifier[bytes_] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[Log] . identifier[error] ( literal[string] , identifier[cause] = identifier[e] ) keyword[finally] : keyword[try] : identifier[stream] . identifier[close] () keyword[except] identifier[Exception] : keyword[pass] keyword[if] identifier[closer] : keyword[try] : identifier[closer] () keyword[except] identifier[Exception] : keyword[pass] keyword[return] identifier[ibytes2ilines] ( identifier[read] (), identifier[encoding] = identifier[encoding] )
def sbytes2ilines(stream, encoding='utf8', closer=None): """ CONVERT A STREAM (with read() method) OF (ARBITRARY-SIZED) byte BLOCKS TO A LINE (CR-DELIMITED) GENERATOR """ def read(): try: while True: bytes_ = stream.read(4096) if not bytes_: return # depends on [control=['if'], data=[]] yield bytes_ # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]] except Exception as e: Log.error('Problem iterating through stream', cause=e) # depends on [control=['except'], data=['e']] finally: try: stream.close() # depends on [control=['try'], data=[]] except Exception: pass # depends on [control=['except'], data=[]] if closer: try: closer() # depends on [control=['try'], data=[]] except Exception: pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] return ibytes2ilines(read(), encoding=encoding)
def named_module(name): """Returns a module given its name.""" module = __import__(name) packages = name.split(".")[1:] m = module for p in packages: m = getattr(m, p) return m
def function[named_module, parameter[name]]: constant[Returns a module given its name.] variable[module] assign[=] call[name[__import__], parameter[name[name]]] variable[packages] assign[=] call[call[name[name].split, parameter[constant[.]]]][<ast.Slice object at 0x7da1b09ed120>] variable[m] assign[=] name[module] for taget[name[p]] in starred[name[packages]] begin[:] variable[m] assign[=] call[name[getattr], parameter[name[m], name[p]]] return[name[m]]
keyword[def] identifier[named_module] ( identifier[name] ): literal[string] identifier[module] = identifier[__import__] ( identifier[name] ) identifier[packages] = identifier[name] . identifier[split] ( literal[string] )[ literal[int] :] identifier[m] = identifier[module] keyword[for] identifier[p] keyword[in] identifier[packages] : identifier[m] = identifier[getattr] ( identifier[m] , identifier[p] ) keyword[return] identifier[m]
def named_module(name): """Returns a module given its name.""" module = __import__(name) packages = name.split('.')[1:] m = module for p in packages: m = getattr(m, p) # depends on [control=['for'], data=['p']] return m
def setup_splittable_dax_generated(workflow, input_tables, out_dir, tags): ''' Function for setting up the splitting jobs as part of the workflow. Parameters ----------- workflow : pycbc.workflow.core.Workflow The Workflow instance that the jobs will be added to. input_tables : pycbc.workflow.core.FileList The input files to be split up. out_dir : path The directory in which output will be written. Returns -------- split_table_outs : pycbc.workflow.core.FileList The list of split up files as output from this job. ''' cp = workflow.cp # Get values from ini file try: num_splits = cp.get_opt_tags("workflow-splittable", "splittable-num-banks", tags) except BaseException: inj_interval = int(cp.get_opt_tags("workflow-splittable", "splitinjtable-interval", tags)) if cp.has_option_tags("em_bright_filter", "max-keep", tags) and \ cp.has_option("workflow-injections", "em-bright-only"): num_injs = int(cp.get_opt_tags("em_bright_filter", "max-keep", tags)) else: num_injs = int(cp.get_opt_tags("workflow-injections", "num-injs", tags)) inj_tspace = float(abs(workflow.analysis_time)) / num_injs num_splits = int(inj_interval // inj_tspace) + 1 split_exe_tag = cp.get_opt_tags("workflow-splittable", "splittable-exe-tag", tags) split_exe = os.path.basename(cp.get("executables", split_exe_tag)) # Select the appropriate class exe_class = select_splitfilejob_instance(split_exe) # Set up output structure out_file_groups = FileList([]) # Set up the condorJob class for the current executable curr_exe_job = exe_class(workflow.cp, split_exe_tag, num_splits, out_dir=out_dir) for input in input_tables: node = curr_exe_job.create_node(input, tags=tags) workflow.add_node(node) out_file_groups += node.output_files return out_file_groups
def function[setup_splittable_dax_generated, parameter[workflow, input_tables, out_dir, tags]]: constant[ Function for setting up the splitting jobs as part of the workflow. Parameters ----------- workflow : pycbc.workflow.core.Workflow The Workflow instance that the jobs will be added to. input_tables : pycbc.workflow.core.FileList The input files to be split up. out_dir : path The directory in which output will be written. Returns -------- split_table_outs : pycbc.workflow.core.FileList The list of split up files as output from this job. ] variable[cp] assign[=] name[workflow].cp <ast.Try object at 0x7da20e9b0f70> variable[split_exe_tag] assign[=] call[name[cp].get_opt_tags, parameter[constant[workflow-splittable], constant[splittable-exe-tag], name[tags]]] variable[split_exe] assign[=] call[name[os].path.basename, parameter[call[name[cp].get, parameter[constant[executables], name[split_exe_tag]]]]] variable[exe_class] assign[=] call[name[select_splitfilejob_instance], parameter[name[split_exe]]] variable[out_file_groups] assign[=] call[name[FileList], parameter[list[[]]]] variable[curr_exe_job] assign[=] call[name[exe_class], parameter[name[workflow].cp, name[split_exe_tag], name[num_splits]]] for taget[name[input]] in starred[name[input_tables]] begin[:] variable[node] assign[=] call[name[curr_exe_job].create_node, parameter[name[input]]] call[name[workflow].add_node, parameter[name[node]]] <ast.AugAssign object at 0x7da20c990670> return[name[out_file_groups]]
keyword[def] identifier[setup_splittable_dax_generated] ( identifier[workflow] , identifier[input_tables] , identifier[out_dir] , identifier[tags] ): literal[string] identifier[cp] = identifier[workflow] . identifier[cp] keyword[try] : identifier[num_splits] = identifier[cp] . identifier[get_opt_tags] ( literal[string] , literal[string] , identifier[tags] ) keyword[except] identifier[BaseException] : identifier[inj_interval] = identifier[int] ( identifier[cp] . identifier[get_opt_tags] ( literal[string] , literal[string] , identifier[tags] )) keyword[if] identifier[cp] . identifier[has_option_tags] ( literal[string] , literal[string] , identifier[tags] ) keyword[and] identifier[cp] . identifier[has_option] ( literal[string] , literal[string] ): identifier[num_injs] = identifier[int] ( identifier[cp] . identifier[get_opt_tags] ( literal[string] , literal[string] , identifier[tags] )) keyword[else] : identifier[num_injs] = identifier[int] ( identifier[cp] . identifier[get_opt_tags] ( literal[string] , literal[string] , identifier[tags] )) identifier[inj_tspace] = identifier[float] ( identifier[abs] ( identifier[workflow] . identifier[analysis_time] ))/ identifier[num_injs] identifier[num_splits] = identifier[int] ( identifier[inj_interval] // identifier[inj_tspace] )+ literal[int] identifier[split_exe_tag] = identifier[cp] . identifier[get_opt_tags] ( literal[string] , literal[string] , identifier[tags] ) identifier[split_exe] = identifier[os] . identifier[path] . identifier[basename] ( identifier[cp] . identifier[get] ( literal[string] , identifier[split_exe_tag] )) identifier[exe_class] = identifier[select_splitfilejob_instance] ( identifier[split_exe] ) identifier[out_file_groups] = identifier[FileList] ([]) identifier[curr_exe_job] = identifier[exe_class] ( identifier[workflow] . identifier[cp] , identifier[split_exe_tag] , identifier[num_splits] , identifier[out_dir] = identifier[out_dir] ) keyword[for] identifier[input] keyword[in] identifier[input_tables] : identifier[node] = identifier[curr_exe_job] . identifier[create_node] ( identifier[input] , identifier[tags] = identifier[tags] ) identifier[workflow] . identifier[add_node] ( identifier[node] ) identifier[out_file_groups] += identifier[node] . identifier[output_files] keyword[return] identifier[out_file_groups]
def setup_splittable_dax_generated(workflow, input_tables, out_dir, tags): """ Function for setting up the splitting jobs as part of the workflow. Parameters ----------- workflow : pycbc.workflow.core.Workflow The Workflow instance that the jobs will be added to. input_tables : pycbc.workflow.core.FileList The input files to be split up. out_dir : path The directory in which output will be written. Returns -------- split_table_outs : pycbc.workflow.core.FileList The list of split up files as output from this job. """ cp = workflow.cp # Get values from ini file try: num_splits = cp.get_opt_tags('workflow-splittable', 'splittable-num-banks', tags) # depends on [control=['try'], data=[]] except BaseException: inj_interval = int(cp.get_opt_tags('workflow-splittable', 'splitinjtable-interval', tags)) if cp.has_option_tags('em_bright_filter', 'max-keep', tags) and cp.has_option('workflow-injections', 'em-bright-only'): num_injs = int(cp.get_opt_tags('em_bright_filter', 'max-keep', tags)) # depends on [control=['if'], data=[]] else: num_injs = int(cp.get_opt_tags('workflow-injections', 'num-injs', tags)) inj_tspace = float(abs(workflow.analysis_time)) / num_injs num_splits = int(inj_interval // inj_tspace) + 1 # depends on [control=['except'], data=[]] split_exe_tag = cp.get_opt_tags('workflow-splittable', 'splittable-exe-tag', tags) split_exe = os.path.basename(cp.get('executables', split_exe_tag)) # Select the appropriate class exe_class = select_splitfilejob_instance(split_exe) # Set up output structure out_file_groups = FileList([]) # Set up the condorJob class for the current executable curr_exe_job = exe_class(workflow.cp, split_exe_tag, num_splits, out_dir=out_dir) for input in input_tables: node = curr_exe_job.create_node(input, tags=tags) workflow.add_node(node) out_file_groups += node.output_files # depends on [control=['for'], data=['input']] return out_file_groups
def os_packages(metadata): """ Installs operating system dependent packages """ family = metadata[0] release = metadata[1] # if 'Amazon' in family and '2' not in release: stdout_message('Identified Amazon Linux 1 os distro') commands = [ 'sudo yum -y update', 'sudo yum -y groupinstall "Development tools"' ] for cmd in commands: stdout_message(subprocess.getoutput(cmd)) return True elif 'Amazon' in family and '2' in release: stdout_message('Identified Amazon Linux 2 os distro') commands = [ 'sudo yum -y update', 'sudo yum -y groupinstall "Development tools"' ] for cmd in commands: stdout_message(subprocess.getoutput(cmd)) return True elif 'Redhat' in family: stdout_message('Identified Redhat Enterprise Linux os distro') commands = [ 'sudo yum -y update', 'sudo yum -y groupinstall "Development tools"' ] for cmd in commands: stdout_message(subprocess.getoutput(cmd)) elif 'Ubuntu' or 'Mint' in family: stdout_message('Identified Ubuntu Linux os distro') commands = [ 'sudo apt -y update', 'sudo apt -y upgrade', 'sudo yum -y groupinstall "Development tools"' ] for cmd in commands: stdout_message(subprocess.getoutput(cmd)) return True return False
def function[os_packages, parameter[metadata]]: constant[ Installs operating system dependent packages ] variable[family] assign[=] call[name[metadata]][constant[0]] variable[release] assign[=] call[name[metadata]][constant[1]] if <ast.BoolOp object at 0x7da18f00caf0> begin[:] call[name[stdout_message], parameter[constant[Identified Amazon Linux 1 os distro]]] variable[commands] assign[=] list[[<ast.Constant object at 0x7da18f00d4b0>, <ast.Constant object at 0x7da18f00f940>]] for taget[name[cmd]] in starred[name[commands]] begin[:] call[name[stdout_message], parameter[call[name[subprocess].getoutput, parameter[name[cmd]]]]] return[constant[True]] return[constant[False]]
keyword[def] identifier[os_packages] ( identifier[metadata] ): literal[string] identifier[family] = identifier[metadata] [ literal[int] ] identifier[release] = identifier[metadata] [ literal[int] ] keyword[if] literal[string] keyword[in] identifier[family] keyword[and] literal[string] keyword[not] keyword[in] identifier[release] : identifier[stdout_message] ( literal[string] ) identifier[commands] =[ literal[string] , literal[string] ] keyword[for] identifier[cmd] keyword[in] identifier[commands] : identifier[stdout_message] ( identifier[subprocess] . identifier[getoutput] ( identifier[cmd] )) keyword[return] keyword[True] keyword[elif] literal[string] keyword[in] identifier[family] keyword[and] literal[string] keyword[in] identifier[release] : identifier[stdout_message] ( literal[string] ) identifier[commands] =[ literal[string] , literal[string] ] keyword[for] identifier[cmd] keyword[in] identifier[commands] : identifier[stdout_message] ( identifier[subprocess] . identifier[getoutput] ( identifier[cmd] )) keyword[return] keyword[True] keyword[elif] literal[string] keyword[in] identifier[family] : identifier[stdout_message] ( literal[string] ) identifier[commands] =[ literal[string] , literal[string] ] keyword[for] identifier[cmd] keyword[in] identifier[commands] : identifier[stdout_message] ( identifier[subprocess] . identifier[getoutput] ( identifier[cmd] )) keyword[elif] literal[string] keyword[or] literal[string] keyword[in] identifier[family] : identifier[stdout_message] ( literal[string] ) identifier[commands] =[ literal[string] , literal[string] , literal[string] ] keyword[for] identifier[cmd] keyword[in] identifier[commands] : identifier[stdout_message] ( identifier[subprocess] . identifier[getoutput] ( identifier[cmd] )) keyword[return] keyword[True] keyword[return] keyword[False]
def os_packages(metadata): """ Installs operating system dependent packages """ family = metadata[0] release = metadata[1] # if 'Amazon' in family and '2' not in release: stdout_message('Identified Amazon Linux 1 os distro') commands = ['sudo yum -y update', 'sudo yum -y groupinstall "Development tools"'] for cmd in commands: stdout_message(subprocess.getoutput(cmd)) # depends on [control=['for'], data=['cmd']] return True # depends on [control=['if'], data=[]] elif 'Amazon' in family and '2' in release: stdout_message('Identified Amazon Linux 2 os distro') commands = ['sudo yum -y update', 'sudo yum -y groupinstall "Development tools"'] for cmd in commands: stdout_message(subprocess.getoutput(cmd)) # depends on [control=['for'], data=['cmd']] return True # depends on [control=['if'], data=[]] elif 'Redhat' in family: stdout_message('Identified Redhat Enterprise Linux os distro') commands = ['sudo yum -y update', 'sudo yum -y groupinstall "Development tools"'] for cmd in commands: stdout_message(subprocess.getoutput(cmd)) # depends on [control=['for'], data=['cmd']] # depends on [control=['if'], data=[]] elif 'Ubuntu' or 'Mint' in family: stdout_message('Identified Ubuntu Linux os distro') commands = ['sudo apt -y update', 'sudo apt -y upgrade', 'sudo yum -y groupinstall "Development tools"'] for cmd in commands: stdout_message(subprocess.getoutput(cmd)) # depends on [control=['for'], data=['cmd']] return True # depends on [control=['if'], data=[]] return False
def set_volume(self, volume): """Set volume.""" for data in self._group.get('clients'): client = self._server.client(data.get('id')) yield from client.set_volume(volume, update_group=False) client.update_volume({ 'volume': { 'percent': volume, 'muted': client.muted } }) _LOGGER.info('set volume to %s on clients in %s', volume, self.friendly_name)
def function[set_volume, parameter[self, volume]]: constant[Set volume.] for taget[name[data]] in starred[call[name[self]._group.get, parameter[constant[clients]]]] begin[:] variable[client] assign[=] call[name[self]._server.client, parameter[call[name[data].get, parameter[constant[id]]]]] <ast.YieldFrom object at 0x7da1b02e6ec0> call[name[client].update_volume, parameter[dictionary[[<ast.Constant object at 0x7da1b02e7220>], [<ast.Dict object at 0x7da1b02e69b0>]]]] call[name[_LOGGER].info, parameter[constant[set volume to %s on clients in %s], name[volume], name[self].friendly_name]]
keyword[def] identifier[set_volume] ( identifier[self] , identifier[volume] ): literal[string] keyword[for] identifier[data] keyword[in] identifier[self] . identifier[_group] . identifier[get] ( literal[string] ): identifier[client] = identifier[self] . identifier[_server] . identifier[client] ( identifier[data] . identifier[get] ( literal[string] )) keyword[yield] keyword[from] identifier[client] . identifier[set_volume] ( identifier[volume] , identifier[update_group] = keyword[False] ) identifier[client] . identifier[update_volume] ({ literal[string] :{ literal[string] : identifier[volume] , literal[string] : identifier[client] . identifier[muted] } }) identifier[_LOGGER] . identifier[info] ( literal[string] , identifier[volume] , identifier[self] . identifier[friendly_name] )
def set_volume(self, volume): """Set volume.""" for data in self._group.get('clients'): client = self._server.client(data.get('id')) yield from client.set_volume(volume, update_group=False) client.update_volume({'volume': {'percent': volume, 'muted': client.muted}}) # depends on [control=['for'], data=['data']] _LOGGER.info('set volume to %s on clients in %s', volume, self.friendly_name)
def get_id(test): """ Return the id of the given test, formatted as expected by nose. :param test: a nose.case.Test instance :return: """ test_id = test.id() module_length = len(test.test.__module__) return test_id[:module_length] + ":" + test_id[module_length + 1:]
def function[get_id, parameter[test]]: constant[ Return the id of the given test, formatted as expected by nose. :param test: a nose.case.Test instance :return: ] variable[test_id] assign[=] call[name[test].id, parameter[]] variable[module_length] assign[=] call[name[len], parameter[name[test].test.__module__]] return[binary_operation[binary_operation[call[name[test_id]][<ast.Slice object at 0x7da1b1340fd0>] + constant[:]] + call[name[test_id]][<ast.Slice object at 0x7da204623700>]]]
keyword[def] identifier[get_id] ( identifier[test] ): literal[string] identifier[test_id] = identifier[test] . identifier[id] () identifier[module_length] = identifier[len] ( identifier[test] . identifier[test] . identifier[__module__] ) keyword[return] identifier[test_id] [: identifier[module_length] ]+ literal[string] + identifier[test_id] [ identifier[module_length] + literal[int] :]
def get_id(test): """ Return the id of the given test, formatted as expected by nose. :param test: a nose.case.Test instance :return: """ test_id = test.id() module_length = len(test.test.__module__) return test_id[:module_length] + ':' + test_id[module_length + 1:]
def estimate(phenotype, G=None, K=None, covariates=None, overdispersion=True): """Estimate the so-called narrow-sense heritability. It supports Bernoulli and Binomial phenotypes (see `outcome_type`). The user must specifiy only one of the parameters G, K, and QS for defining the genetic background. Let :math:`N` be the sample size, :math:`S` the number of covariates, and :math:`P_b` the number of genetic markers used for Kinship estimation. :param numpy.ndarray y: Phenotype. The domain has be the non-negative integers. Dimension (:math:`N\\times 0`). :param numpy.ndarray G: Genetic markers matrix used internally for kinship estimation. Dimension (:math:`N\\times P_b`). :param numpy.ndarray K: Kinship matrix. Dimension (:math:`N\\times N`). :param tuple QS: Economic eigen decomposition of the Kinship matrix. :param numpy.ndarray covariate: Covariates. Default is an offset. Dimension (:math:`N\\times S`). :param object oucome_type: Either :class:`limix_qep.Bernoulli` (default) or a :class:`limix_qep.Binomial` instance. :param float prevalence: Population rate of cases for dichotomous phenotypes. Typically useful for case-control studies. :return: a tuple containing the estimated heritability and additional information, respectively. """ logger = logging.getLogger(__name__) logger.info('Heritability estimation has started.') G, K = _background_standardize(G, K) if G is None and K is None: raise Exception('G and K cannot be all None.') Q0, Q1, S0 = _background_decomposition(G, K) if covariates is None: logger.debug('Inserting offset covariate.') covariates = ones((phenotype.sample_size, 1)) logger.debug('Constructing EP.') from limix_inference.glmm import ExpFamEP ep = ExpFamEP(phenotype.to_likelihood(), covariates, Q0, Q1, S0, overdispersion) logger.debug('EP optimization.') ep.learn() h2 = ep.heritability logger.info('Found heritability before correction: %.5f.', h2) return h2
def function[estimate, parameter[phenotype, G, K, covariates, overdispersion]]: constant[Estimate the so-called narrow-sense heritability. It supports Bernoulli and Binomial phenotypes (see `outcome_type`). The user must specifiy only one of the parameters G, K, and QS for defining the genetic background. Let :math:`N` be the sample size, :math:`S` the number of covariates, and :math:`P_b` the number of genetic markers used for Kinship estimation. :param numpy.ndarray y: Phenotype. The domain has be the non-negative integers. Dimension (:math:`N\times 0`). :param numpy.ndarray G: Genetic markers matrix used internally for kinship estimation. Dimension (:math:`N\times P_b`). :param numpy.ndarray K: Kinship matrix. Dimension (:math:`N\times N`). :param tuple QS: Economic eigen decomposition of the Kinship matrix. :param numpy.ndarray covariate: Covariates. Default is an offset. Dimension (:math:`N\times S`). :param object oucome_type: Either :class:`limix_qep.Bernoulli` (default) or a :class:`limix_qep.Binomial` instance. :param float prevalence: Population rate of cases for dichotomous phenotypes. Typically useful for case-control studies. :return: a tuple containing the estimated heritability and additional information, respectively. ] variable[logger] assign[=] call[name[logging].getLogger, parameter[name[__name__]]] call[name[logger].info, parameter[constant[Heritability estimation has started.]]] <ast.Tuple object at 0x7da1b09ee650> assign[=] call[name[_background_standardize], parameter[name[G], name[K]]] if <ast.BoolOp object at 0x7da1b09ecf10> begin[:] <ast.Raise object at 0x7da1b09efc10> <ast.Tuple object at 0x7da1b09ee530> assign[=] call[name[_background_decomposition], parameter[name[G], name[K]]] if compare[name[covariates] is constant[None]] begin[:] call[name[logger].debug, parameter[constant[Inserting offset covariate.]]] variable[covariates] assign[=] call[name[ones], parameter[tuple[[<ast.Attribute object at 0x7da1b09ee950>, <ast.Constant object at 0x7da1b09ed900>]]]] call[name[logger].debug, parameter[constant[Constructing EP.]]] from relative_module[limix_inference.glmm] import module[ExpFamEP] variable[ep] assign[=] call[name[ExpFamEP], parameter[call[name[phenotype].to_likelihood, parameter[]], name[covariates], name[Q0], name[Q1], name[S0], name[overdispersion]]] call[name[logger].debug, parameter[constant[EP optimization.]]] call[name[ep].learn, parameter[]] variable[h2] assign[=] name[ep].heritability call[name[logger].info, parameter[constant[Found heritability before correction: %.5f.], name[h2]]] return[name[h2]]
keyword[def] identifier[estimate] ( identifier[phenotype] , identifier[G] = keyword[None] , identifier[K] = keyword[None] , identifier[covariates] = keyword[None] , identifier[overdispersion] = keyword[True] ): literal[string] identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] ) identifier[logger] . identifier[info] ( literal[string] ) identifier[G] , identifier[K] = identifier[_background_standardize] ( identifier[G] , identifier[K] ) keyword[if] identifier[G] keyword[is] keyword[None] keyword[and] identifier[K] keyword[is] keyword[None] : keyword[raise] identifier[Exception] ( literal[string] ) identifier[Q0] , identifier[Q1] , identifier[S0] = identifier[_background_decomposition] ( identifier[G] , identifier[K] ) keyword[if] identifier[covariates] keyword[is] keyword[None] : identifier[logger] . identifier[debug] ( literal[string] ) identifier[covariates] = identifier[ones] (( identifier[phenotype] . identifier[sample_size] , literal[int] )) identifier[logger] . identifier[debug] ( literal[string] ) keyword[from] identifier[limix_inference] . identifier[glmm] keyword[import] identifier[ExpFamEP] identifier[ep] = identifier[ExpFamEP] ( identifier[phenotype] . identifier[to_likelihood] (), identifier[covariates] , identifier[Q0] , identifier[Q1] , identifier[S0] , identifier[overdispersion] ) identifier[logger] . identifier[debug] ( literal[string] ) identifier[ep] . identifier[learn] () identifier[h2] = identifier[ep] . identifier[heritability] identifier[logger] . identifier[info] ( literal[string] , identifier[h2] ) keyword[return] identifier[h2]
def estimate(phenotype, G=None, K=None, covariates=None, overdispersion=True): """Estimate the so-called narrow-sense heritability. It supports Bernoulli and Binomial phenotypes (see `outcome_type`). The user must specifiy only one of the parameters G, K, and QS for defining the genetic background. Let :math:`N` be the sample size, :math:`S` the number of covariates, and :math:`P_b` the number of genetic markers used for Kinship estimation. :param numpy.ndarray y: Phenotype. The domain has be the non-negative integers. Dimension (:math:`N\\times 0`). :param numpy.ndarray G: Genetic markers matrix used internally for kinship estimation. Dimension (:math:`N\\times P_b`). :param numpy.ndarray K: Kinship matrix. Dimension (:math:`N\\times N`). :param tuple QS: Economic eigen decomposition of the Kinship matrix. :param numpy.ndarray covariate: Covariates. Default is an offset. Dimension (:math:`N\\times S`). :param object oucome_type: Either :class:`limix_qep.Bernoulli` (default) or a :class:`limix_qep.Binomial` instance. :param float prevalence: Population rate of cases for dichotomous phenotypes. Typically useful for case-control studies. :return: a tuple containing the estimated heritability and additional information, respectively. """ logger = logging.getLogger(__name__) logger.info('Heritability estimation has started.') (G, K) = _background_standardize(G, K) if G is None and K is None: raise Exception('G and K cannot be all None.') # depends on [control=['if'], data=[]] (Q0, Q1, S0) = _background_decomposition(G, K) if covariates is None: logger.debug('Inserting offset covariate.') covariates = ones((phenotype.sample_size, 1)) # depends on [control=['if'], data=['covariates']] logger.debug('Constructing EP.') from limix_inference.glmm import ExpFamEP ep = ExpFamEP(phenotype.to_likelihood(), covariates, Q0, Q1, S0, overdispersion) logger.debug('EP optimization.') ep.learn() h2 = ep.heritability logger.info('Found heritability before correction: %.5f.', h2) return h2
def prefix(prefix): """Returns a dictionary of all environment variables starting with the given prefix, lower cased and stripped. """ d = {} e = lower_dict(environ.copy()) prefix = prefix.lower() for k, v in e.items(): try: if k.startswith(prefix): k = k[len(prefix):] d[k] = v except AttributeError: pass return d
def function[prefix, parameter[prefix]]: constant[Returns a dictionary of all environment variables starting with the given prefix, lower cased and stripped. ] variable[d] assign[=] dictionary[[], []] variable[e] assign[=] call[name[lower_dict], parameter[call[name[environ].copy, parameter[]]]] variable[prefix] assign[=] call[name[prefix].lower, parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b0be0790>, <ast.Name object at 0x7da1b0be3010>]]] in starred[call[name[e].items, parameter[]]] begin[:] <ast.Try object at 0x7da1b0be1330> return[name[d]]
keyword[def] identifier[prefix] ( identifier[prefix] ): literal[string] identifier[d] ={} identifier[e] = identifier[lower_dict] ( identifier[environ] . identifier[copy] ()) identifier[prefix] = identifier[prefix] . identifier[lower] () keyword[for] identifier[k] , identifier[v] keyword[in] identifier[e] . identifier[items] (): keyword[try] : keyword[if] identifier[k] . identifier[startswith] ( identifier[prefix] ): identifier[k] = identifier[k] [ identifier[len] ( identifier[prefix] ):] identifier[d] [ identifier[k] ]= identifier[v] keyword[except] identifier[AttributeError] : keyword[pass] keyword[return] identifier[d]
def prefix(prefix): """Returns a dictionary of all environment variables starting with the given prefix, lower cased and stripped. """ d = {} e = lower_dict(environ.copy()) prefix = prefix.lower() for (k, v) in e.items(): try: if k.startswith(prefix): k = k[len(prefix):] d[k] = v # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except AttributeError: pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] return d
def _from_dict(cls, _dict): """Initialize a Face object from a json dictionary.""" args = {} if 'age' in _dict: args['age'] = FaceAge._from_dict(_dict.get('age')) if 'gender' in _dict: args['gender'] = FaceGender._from_dict(_dict.get('gender')) if 'face_location' in _dict: args['face_location'] = FaceLocation._from_dict( _dict.get('face_location')) return cls(**args)
def function[_from_dict, parameter[cls, _dict]]: constant[Initialize a Face object from a json dictionary.] variable[args] assign[=] dictionary[[], []] if compare[constant[age] in name[_dict]] begin[:] call[name[args]][constant[age]] assign[=] call[name[FaceAge]._from_dict, parameter[call[name[_dict].get, parameter[constant[age]]]]] if compare[constant[gender] in name[_dict]] begin[:] call[name[args]][constant[gender]] assign[=] call[name[FaceGender]._from_dict, parameter[call[name[_dict].get, parameter[constant[gender]]]]] if compare[constant[face_location] in name[_dict]] begin[:] call[name[args]][constant[face_location]] assign[=] call[name[FaceLocation]._from_dict, parameter[call[name[_dict].get, parameter[constant[face_location]]]]] return[call[name[cls], parameter[]]]
keyword[def] identifier[_from_dict] ( identifier[cls] , identifier[_dict] ): literal[string] identifier[args] ={} keyword[if] literal[string] keyword[in] identifier[_dict] : identifier[args] [ literal[string] ]= identifier[FaceAge] . identifier[_from_dict] ( identifier[_dict] . identifier[get] ( literal[string] )) keyword[if] literal[string] keyword[in] identifier[_dict] : identifier[args] [ literal[string] ]= identifier[FaceGender] . identifier[_from_dict] ( identifier[_dict] . identifier[get] ( literal[string] )) keyword[if] literal[string] keyword[in] identifier[_dict] : identifier[args] [ literal[string] ]= identifier[FaceLocation] . identifier[_from_dict] ( identifier[_dict] . identifier[get] ( literal[string] )) keyword[return] identifier[cls] (** identifier[args] )
def _from_dict(cls, _dict): """Initialize a Face object from a json dictionary.""" args = {} if 'age' in _dict: args['age'] = FaceAge._from_dict(_dict.get('age')) # depends on [control=['if'], data=['_dict']] if 'gender' in _dict: args['gender'] = FaceGender._from_dict(_dict.get('gender')) # depends on [control=['if'], data=['_dict']] if 'face_location' in _dict: args['face_location'] = FaceLocation._from_dict(_dict.get('face_location')) # depends on [control=['if'], data=['_dict']] return cls(**args)
def _fill_lookup_prop(self, testsuites_properties): """Fills the polarion-lookup-method property.""" if not self._lookup_prop: raise Dump2PolarionException("Failed to set the 'polarion-lookup-method' property") etree.SubElement( testsuites_properties, "property", {"name": "polarion-lookup-method", "value": self._lookup_prop}, )
def function[_fill_lookup_prop, parameter[self, testsuites_properties]]: constant[Fills the polarion-lookup-method property.] if <ast.UnaryOp object at 0x7da1b23d6080> begin[:] <ast.Raise object at 0x7da1b23d62c0> call[name[etree].SubElement, parameter[name[testsuites_properties], constant[property], dictionary[[<ast.Constant object at 0x7da1b23d5300>, <ast.Constant object at 0x7da1b23d79d0>], [<ast.Constant object at 0x7da20c6a9000>, <ast.Attribute object at 0x7da20c6a8a90>]]]]
keyword[def] identifier[_fill_lookup_prop] ( identifier[self] , identifier[testsuites_properties] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[_lookup_prop] : keyword[raise] identifier[Dump2PolarionException] ( literal[string] ) identifier[etree] . identifier[SubElement] ( identifier[testsuites_properties] , literal[string] , { literal[string] : literal[string] , literal[string] : identifier[self] . identifier[_lookup_prop] }, )
def _fill_lookup_prop(self, testsuites_properties): """Fills the polarion-lookup-method property.""" if not self._lookup_prop: raise Dump2PolarionException("Failed to set the 'polarion-lookup-method' property") # depends on [control=['if'], data=[]] etree.SubElement(testsuites_properties, 'property', {'name': 'polarion-lookup-method', 'value': self._lookup_prop})
def _update_from_raw_data(self, raw_data, data_type_id=None, name=None, description=None): """ Upload already serialized raw data and replace the existing dataset. Parameters ---------- raw_data: bytes Dataset contents to upload. data_type_id : str Serialization format of the raw data. If None, the format of the existing dataset is used. Supported formats are: 'PlainText' 'GenericCSV' 'GenericTSV' 'GenericCSVNoHeader' 'GenericTSVNoHeader' 'ARFF' See the azureml.DataTypeIds class for constants. name : str, optional Name for the dataset. If None, the name of the existing dataset is used. description : str, optional Description for the dataset. If None, the name of the existing dataset is used. """ _not_none('raw_data', raw_data) if data_type_id is None: data_type_id = self.data_type_id if name is None: name = self.name if description is None: description = self.description self._upload_and_refresh(raw_data, data_type_id, name, description)
def function[_update_from_raw_data, parameter[self, raw_data, data_type_id, name, description]]: constant[ Upload already serialized raw data and replace the existing dataset. Parameters ---------- raw_data: bytes Dataset contents to upload. data_type_id : str Serialization format of the raw data. If None, the format of the existing dataset is used. Supported formats are: 'PlainText' 'GenericCSV' 'GenericTSV' 'GenericCSVNoHeader' 'GenericTSVNoHeader' 'ARFF' See the azureml.DataTypeIds class for constants. name : str, optional Name for the dataset. If None, the name of the existing dataset is used. description : str, optional Description for the dataset. If None, the name of the existing dataset is used. ] call[name[_not_none], parameter[constant[raw_data], name[raw_data]]] if compare[name[data_type_id] is constant[None]] begin[:] variable[data_type_id] assign[=] name[self].data_type_id if compare[name[name] is constant[None]] begin[:] variable[name] assign[=] name[self].name if compare[name[description] is constant[None]] begin[:] variable[description] assign[=] name[self].description call[name[self]._upload_and_refresh, parameter[name[raw_data], name[data_type_id], name[name], name[description]]]
keyword[def] identifier[_update_from_raw_data] ( identifier[self] , identifier[raw_data] , identifier[data_type_id] = keyword[None] , identifier[name] = keyword[None] , identifier[description] = keyword[None] ): literal[string] identifier[_not_none] ( literal[string] , identifier[raw_data] ) keyword[if] identifier[data_type_id] keyword[is] keyword[None] : identifier[data_type_id] = identifier[self] . identifier[data_type_id] keyword[if] identifier[name] keyword[is] keyword[None] : identifier[name] = identifier[self] . identifier[name] keyword[if] identifier[description] keyword[is] keyword[None] : identifier[description] = identifier[self] . identifier[description] identifier[self] . identifier[_upload_and_refresh] ( identifier[raw_data] , identifier[data_type_id] , identifier[name] , identifier[description] )
def _update_from_raw_data(self, raw_data, data_type_id=None, name=None, description=None): """ Upload already serialized raw data and replace the existing dataset. Parameters ---------- raw_data: bytes Dataset contents to upload. data_type_id : str Serialization format of the raw data. If None, the format of the existing dataset is used. Supported formats are: 'PlainText' 'GenericCSV' 'GenericTSV' 'GenericCSVNoHeader' 'GenericTSVNoHeader' 'ARFF' See the azureml.DataTypeIds class for constants. name : str, optional Name for the dataset. If None, the name of the existing dataset is used. description : str, optional Description for the dataset. If None, the name of the existing dataset is used. """ _not_none('raw_data', raw_data) if data_type_id is None: data_type_id = self.data_type_id # depends on [control=['if'], data=['data_type_id']] if name is None: name = self.name # depends on [control=['if'], data=['name']] if description is None: description = self.description # depends on [control=['if'], data=['description']] self._upload_and_refresh(raw_data, data_type_id, name, description)
def config(self, configlet=None, plane='sdr', **attributes): """Configure the device. This method applies configuration to the device. Args: configlet (text): The configuration template. plane (text): sdr or admin attributes (dict): The dictionary of attributes used in template. Returns: A string with commit label or None """ begin = time.time() label = self._chain.target_device.config(configlet, plane, **attributes) elapsed = time.time() - begin if label: self.emit_message("Configuration change last {:.0f}s. Label: {}".format(elapsed, label), log_level=logging.INFO) else: self.emit_message("Configuration failed.", log_level=logging.WARNING) return label
def function[config, parameter[self, configlet, plane]]: constant[Configure the device. This method applies configuration to the device. Args: configlet (text): The configuration template. plane (text): sdr or admin attributes (dict): The dictionary of attributes used in template. Returns: A string with commit label or None ] variable[begin] assign[=] call[name[time].time, parameter[]] variable[label] assign[=] call[name[self]._chain.target_device.config, parameter[name[configlet], name[plane]]] variable[elapsed] assign[=] binary_operation[call[name[time].time, parameter[]] - name[begin]] if name[label] begin[:] call[name[self].emit_message, parameter[call[constant[Configuration change last {:.0f}s. Label: {}].format, parameter[name[elapsed], name[label]]]]] return[name[label]]
keyword[def] identifier[config] ( identifier[self] , identifier[configlet] = keyword[None] , identifier[plane] = literal[string] ,** identifier[attributes] ): literal[string] identifier[begin] = identifier[time] . identifier[time] () identifier[label] = identifier[self] . identifier[_chain] . identifier[target_device] . identifier[config] ( identifier[configlet] , identifier[plane] ,** identifier[attributes] ) identifier[elapsed] = identifier[time] . identifier[time] ()- identifier[begin] keyword[if] identifier[label] : identifier[self] . identifier[emit_message] ( literal[string] . identifier[format] ( identifier[elapsed] , identifier[label] ), identifier[log_level] = identifier[logging] . identifier[INFO] ) keyword[else] : identifier[self] . identifier[emit_message] ( literal[string] , identifier[log_level] = identifier[logging] . identifier[WARNING] ) keyword[return] identifier[label]
def config(self, configlet=None, plane='sdr', **attributes): """Configure the device. This method applies configuration to the device. Args: configlet (text): The configuration template. plane (text): sdr or admin attributes (dict): The dictionary of attributes used in template. Returns: A string with commit label or None """ begin = time.time() label = self._chain.target_device.config(configlet, plane, **attributes) elapsed = time.time() - begin if label: self.emit_message('Configuration change last {:.0f}s. Label: {}'.format(elapsed, label), log_level=logging.INFO) # depends on [control=['if'], data=[]] else: self.emit_message('Configuration failed.', log_level=logging.WARNING) return label
def begin_group(self, indent=0, open=''): """ Begin a group. If you want support for python < 2.5 which doesn't has the with statement this is the preferred way: p.begin_group(1, '{') ... p.end_group(1, '}') The python 2.5 expression would be this: with p.group(1, '{', '}'): ... The first parameter specifies the indentation for the next line (usually the width of the opening text), the second the opening text. All parameters are optional. """ if open: self.text(open) group = Group(self.group_stack[-1].depth + 1) self.group_stack.append(group) self.group_queue.enq(group) self.indentation += indent
def function[begin_group, parameter[self, indent, open]]: constant[ Begin a group. If you want support for python < 2.5 which doesn't has the with statement this is the preferred way: p.begin_group(1, '{') ... p.end_group(1, '}') The python 2.5 expression would be this: with p.group(1, '{', '}'): ... The first parameter specifies the indentation for the next line (usually the width of the opening text), the second the opening text. All parameters are optional. ] if name[open] begin[:] call[name[self].text, parameter[name[open]]] variable[group] assign[=] call[name[Group], parameter[binary_operation[call[name[self].group_stack][<ast.UnaryOp object at 0x7da2054a4af0>].depth + constant[1]]]] call[name[self].group_stack.append, parameter[name[group]]] call[name[self].group_queue.enq, parameter[name[group]]] <ast.AugAssign object at 0x7da2054a59f0>
keyword[def] identifier[begin_group] ( identifier[self] , identifier[indent] = literal[int] , identifier[open] = literal[string] ): literal[string] keyword[if] identifier[open] : identifier[self] . identifier[text] ( identifier[open] ) identifier[group] = identifier[Group] ( identifier[self] . identifier[group_stack] [- literal[int] ]. identifier[depth] + literal[int] ) identifier[self] . identifier[group_stack] . identifier[append] ( identifier[group] ) identifier[self] . identifier[group_queue] . identifier[enq] ( identifier[group] ) identifier[self] . identifier[indentation] += identifier[indent]
def begin_group(self, indent=0, open=''): """ Begin a group. If you want support for python < 2.5 which doesn't has the with statement this is the preferred way: p.begin_group(1, '{') ... p.end_group(1, '}') The python 2.5 expression would be this: with p.group(1, '{', '}'): ... The first parameter specifies the indentation for the next line (usually the width of the opening text), the second the opening text. All parameters are optional. """ if open: self.text(open) # depends on [control=['if'], data=[]] group = Group(self.group_stack[-1].depth + 1) self.group_stack.append(group) self.group_queue.enq(group) self.indentation += indent
def new_plugin(self, config, *args, **kwargs): """ instantiate a plugin creates the object, stores it in _instance """ typ = None obj = None # if type is defined, create a new instance if 'type' in config: typ = config['type'] # single key is overriding an existing plugin instance elif isinstance(config, collections.Mapping) and len(config) == 1: # get type name and shift out config to parent level (typ, config) = list(config.items())[0] obj = self._ctor(typ, config, *args, **kwargs) # store if named if 'name' in config: self._instance[config['name']] = obj else: # this could dupe on .name, make name=''? config['name'] = typ return obj
def function[new_plugin, parameter[self, config]]: constant[ instantiate a plugin creates the object, stores it in _instance ] variable[typ] assign[=] constant[None] variable[obj] assign[=] constant[None] if compare[constant[type] in name[config]] begin[:] variable[typ] assign[=] call[name[config]][constant[type]] variable[obj] assign[=] call[name[self]._ctor, parameter[name[typ], name[config], <ast.Starred object at 0x7da20c6a91e0>]] if compare[constant[name] in name[config]] begin[:] call[name[self]._instance][call[name[config]][constant[name]]] assign[=] name[obj] return[name[obj]]
keyword[def] identifier[new_plugin] ( identifier[self] , identifier[config] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[typ] = keyword[None] identifier[obj] = keyword[None] keyword[if] literal[string] keyword[in] identifier[config] : identifier[typ] = identifier[config] [ literal[string] ] keyword[elif] identifier[isinstance] ( identifier[config] , identifier[collections] . identifier[Mapping] ) keyword[and] identifier[len] ( identifier[config] )== literal[int] : ( identifier[typ] , identifier[config] )= identifier[list] ( identifier[config] . identifier[items] ())[ literal[int] ] identifier[obj] = identifier[self] . identifier[_ctor] ( identifier[typ] , identifier[config] ,* identifier[args] ,** identifier[kwargs] ) keyword[if] literal[string] keyword[in] identifier[config] : identifier[self] . identifier[_instance] [ identifier[config] [ literal[string] ]]= identifier[obj] keyword[else] : identifier[config] [ literal[string] ]= identifier[typ] keyword[return] identifier[obj]
def new_plugin(self, config, *args, **kwargs): """ instantiate a plugin creates the object, stores it in _instance """ typ = None obj = None # if type is defined, create a new instance if 'type' in config: typ = config['type'] # depends on [control=['if'], data=['config']] # single key is overriding an existing plugin instance elif isinstance(config, collections.Mapping) and len(config) == 1: # get type name and shift out config to parent level (typ, config) = list(config.items())[0] # depends on [control=['if'], data=[]] obj = self._ctor(typ, config, *args, **kwargs) # store if named if 'name' in config: self._instance[config['name']] = obj # depends on [control=['if'], data=['config']] else: # this could dupe on .name, make name=''? config['name'] = typ return obj
def encode_max_segments_accepted(arg): """Encode the maximum number of segments the device will accept, Section 20.1.2.4, and if the device says it can only accept one segment it shouldn't say that it supports segmentation!""" # unspecified if not arg: return 0 if arg > 64: return 7 # the largest number not greater than the arg for i in range(6, 0, -1): if _max_segments_accepted_encoding[i] <= arg: return i raise ValueError("invalid max max segments accepted: %r" % (arg,))
def function[encode_max_segments_accepted, parameter[arg]]: constant[Encode the maximum number of segments the device will accept, Section 20.1.2.4, and if the device says it can only accept one segment it shouldn't say that it supports segmentation!] if <ast.UnaryOp object at 0x7da1b26adcf0> begin[:] return[constant[0]] if compare[name[arg] greater[>] constant[64]] begin[:] return[constant[7]] for taget[name[i]] in starred[call[name[range], parameter[constant[6], constant[0], <ast.UnaryOp object at 0x7da1b08e6380>]]] begin[:] if compare[call[name[_max_segments_accepted_encoding]][name[i]] less_or_equal[<=] name[arg]] begin[:] return[name[i]] <ast.Raise object at 0x7da1b08e4280>
keyword[def] identifier[encode_max_segments_accepted] ( identifier[arg] ): literal[string] keyword[if] keyword[not] identifier[arg] : keyword[return] literal[int] keyword[if] identifier[arg] > literal[int] : keyword[return] literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] ,- literal[int] ): keyword[if] identifier[_max_segments_accepted_encoding] [ identifier[i] ]<= identifier[arg] : keyword[return] identifier[i] keyword[raise] identifier[ValueError] ( literal[string] %( identifier[arg] ,))
def encode_max_segments_accepted(arg): """Encode the maximum number of segments the device will accept, Section 20.1.2.4, and if the device says it can only accept one segment it shouldn't say that it supports segmentation!""" # unspecified if not arg: return 0 # depends on [control=['if'], data=[]] if arg > 64: return 7 # depends on [control=['if'], data=[]] # the largest number not greater than the arg for i in range(6, 0, -1): if _max_segments_accepted_encoding[i] <= arg: return i # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] raise ValueError('invalid max max segments accepted: %r' % (arg,))
def flatten_urls(self, urls): """ Function flatten urls for route grouping feature of glim. Args ---- urls (dict): a dict of url definitions. current_key (unknown type): a dict or a string marking the current key that is used for recursive calls. ruleset (dict): the ruleset that is eventually returned to dispatcher. Returns ------- ruleset (list): a list of ruleset dict with endpoint, url and method functions """ available_methods = ['POST', 'PUT', 'OPTIONS', 'GET', 'DELETE', 'TRACE', 'COPY'] ruleset = [] for route, endpoint in urls.items(): route_pieces = route.split(' ') try: methods = url = None if len(route_pieces) > 1: methods = [route_pieces[0]] url = route_pieces[1] else: methods = available_methods url = route_pieces[0] endpoint_pieces = endpoint.split('.') if len(endpoint_pieces) > 1: rule = {'url': url, 'endpoint': endpoint, 'methods': methods} ruleset.append(rule) else: for method in available_methods: rule = { 'url': url, 'endpoint': '%s.%s' % (endpoint, method.lower()), 'methods': [method] } ruleset.append(rule) except Exception as e: raise InvalidRouteDefinitionError() return ruleset
def function[flatten_urls, parameter[self, urls]]: constant[ Function flatten urls for route grouping feature of glim. Args ---- urls (dict): a dict of url definitions. current_key (unknown type): a dict or a string marking the current key that is used for recursive calls. ruleset (dict): the ruleset that is eventually returned to dispatcher. Returns ------- ruleset (list): a list of ruleset dict with endpoint, url and method functions ] variable[available_methods] assign[=] list[[<ast.Constant object at 0x7da20c6c41f0>, <ast.Constant object at 0x7da20c6c4ac0>, <ast.Constant object at 0x7da20c6c65c0>, <ast.Constant object at 0x7da20c6c4ca0>, <ast.Constant object at 0x7da20c6c6d10>, <ast.Constant object at 0x7da20c6c60e0>, <ast.Constant object at 0x7da20c6c6740>]] variable[ruleset] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da20c6c4c40>, <ast.Name object at 0x7da20c6c64a0>]]] in starred[call[name[urls].items, parameter[]]] begin[:] variable[route_pieces] assign[=] call[name[route].split, parameter[constant[ ]]] <ast.Try object at 0x7da20c6c6b30> return[name[ruleset]]
keyword[def] identifier[flatten_urls] ( identifier[self] , identifier[urls] ): literal[string] identifier[available_methods] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] identifier[ruleset] =[] keyword[for] identifier[route] , identifier[endpoint] keyword[in] identifier[urls] . identifier[items] (): identifier[route_pieces] = identifier[route] . identifier[split] ( literal[string] ) keyword[try] : identifier[methods] = identifier[url] = keyword[None] keyword[if] identifier[len] ( identifier[route_pieces] )> literal[int] : identifier[methods] =[ identifier[route_pieces] [ literal[int] ]] identifier[url] = identifier[route_pieces] [ literal[int] ] keyword[else] : identifier[methods] = identifier[available_methods] identifier[url] = identifier[route_pieces] [ literal[int] ] identifier[endpoint_pieces] = identifier[endpoint] . identifier[split] ( literal[string] ) keyword[if] identifier[len] ( identifier[endpoint_pieces] )> literal[int] : identifier[rule] ={ literal[string] : identifier[url] , literal[string] : identifier[endpoint] , literal[string] : identifier[methods] } identifier[ruleset] . identifier[append] ( identifier[rule] ) keyword[else] : keyword[for] identifier[method] keyword[in] identifier[available_methods] : identifier[rule] ={ literal[string] : identifier[url] , literal[string] : literal[string] %( identifier[endpoint] , identifier[method] . identifier[lower] ()), literal[string] :[ identifier[method] ] } identifier[ruleset] . identifier[append] ( identifier[rule] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : keyword[raise] identifier[InvalidRouteDefinitionError] () keyword[return] identifier[ruleset]
def flatten_urls(self, urls): """ Function flatten urls for route grouping feature of glim. Args ---- urls (dict): a dict of url definitions. current_key (unknown type): a dict or a string marking the current key that is used for recursive calls. ruleset (dict): the ruleset that is eventually returned to dispatcher. Returns ------- ruleset (list): a list of ruleset dict with endpoint, url and method functions """ available_methods = ['POST', 'PUT', 'OPTIONS', 'GET', 'DELETE', 'TRACE', 'COPY'] ruleset = [] for (route, endpoint) in urls.items(): route_pieces = route.split(' ') try: methods = url = None if len(route_pieces) > 1: methods = [route_pieces[0]] url = route_pieces[1] # depends on [control=['if'], data=[]] else: methods = available_methods url = route_pieces[0] endpoint_pieces = endpoint.split('.') if len(endpoint_pieces) > 1: rule = {'url': url, 'endpoint': endpoint, 'methods': methods} ruleset.append(rule) # depends on [control=['if'], data=[]] else: for method in available_methods: rule = {'url': url, 'endpoint': '%s.%s' % (endpoint, method.lower()), 'methods': [method]} ruleset.append(rule) # depends on [control=['for'], data=['method']] # depends on [control=['try'], data=[]] except Exception as e: raise InvalidRouteDefinitionError() # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] return ruleset
def default_styles(): """Generate default ODF styles.""" styles = {} def _add_style(name, **kwargs): styles[name] = _create_style(name, **kwargs) _add_style('heading-1', family='paragraph', fontsize='24pt', fontweight='bold', ) _add_style('heading-2', family='paragraph', fontsize='22pt', fontweight='bold', ) _add_style('heading-3', family='paragraph', fontsize='20pt', fontweight='bold', ) _add_style('heading-4', family='paragraph', fontsize='18pt', fontweight='bold', ) _add_style('heading-5', family='paragraph', fontsize='16pt', fontweight='bold', ) _add_style('heading-6', family='paragraph', fontsize='14pt', fontweight='bold', ) _add_style('normal-paragraph', family='paragraph', fontsize='12pt', marginbottom='0.25cm', ) _add_style('code', family='paragraph', fontsize='10pt', fontweight='bold', fontfamily='Courier New', color='#555555', ) _add_style('quote', family='paragraph', fontsize='12pt', fontstyle='italic', ) _add_style('list-paragraph', family='paragraph', fontsize='12pt', marginbottom='.1cm', ) _add_style('sublist-paragraph', family='paragraph', fontsize='12pt', marginbottom='.1cm', ) _add_style('numbered-list-paragraph', family='paragraph', fontsize='12pt', marginbottom='.1cm', ) _add_style('normal-text', family='text', fontsize='12pt', ) _add_style('italic', family='text', fontstyle='italic', fontsize='12pt', ) _add_style('bold', family='text', fontweight='bold', fontsize='12pt', ) _add_style('url', family='text', fontsize='12pt', fontweight='bold', fontfamily='Courier', ) _add_style('inline-code', family='text', fontsize='10pt', fontweight='bold', fontfamily='Courier New', color='#555555', ) styles['_numbered_list'] = _numbered_style() return styles
def function[default_styles, parameter[]]: constant[Generate default ODF styles.] variable[styles] assign[=] dictionary[[], []] def function[_add_style, parameter[name]]: call[name[styles]][name[name]] assign[=] call[name[_create_style], parameter[name[name]]] call[name[_add_style], parameter[constant[heading-1]]] call[name[_add_style], parameter[constant[heading-2]]] call[name[_add_style], parameter[constant[heading-3]]] call[name[_add_style], parameter[constant[heading-4]]] call[name[_add_style], parameter[constant[heading-5]]] call[name[_add_style], parameter[constant[heading-6]]] call[name[_add_style], parameter[constant[normal-paragraph]]] call[name[_add_style], parameter[constant[code]]] call[name[_add_style], parameter[constant[quote]]] call[name[_add_style], parameter[constant[list-paragraph]]] call[name[_add_style], parameter[constant[sublist-paragraph]]] call[name[_add_style], parameter[constant[numbered-list-paragraph]]] call[name[_add_style], parameter[constant[normal-text]]] call[name[_add_style], parameter[constant[italic]]] call[name[_add_style], parameter[constant[bold]]] call[name[_add_style], parameter[constant[url]]] call[name[_add_style], parameter[constant[inline-code]]] call[name[styles]][constant[_numbered_list]] assign[=] call[name[_numbered_style], parameter[]] return[name[styles]]
keyword[def] identifier[default_styles] (): literal[string] identifier[styles] ={} keyword[def] identifier[_add_style] ( identifier[name] ,** identifier[kwargs] ): identifier[styles] [ identifier[name] ]= identifier[_create_style] ( identifier[name] ,** identifier[kwargs] ) identifier[_add_style] ( literal[string] , identifier[family] = literal[string] , identifier[fontsize] = literal[string] , identifier[fontweight] = literal[string] , ) identifier[_add_style] ( literal[string] , identifier[family] = literal[string] , identifier[fontsize] = literal[string] , identifier[fontweight] = literal[string] , ) identifier[_add_style] ( literal[string] , identifier[family] = literal[string] , identifier[fontsize] = literal[string] , identifier[fontweight] = literal[string] , ) identifier[_add_style] ( literal[string] , identifier[family] = literal[string] , identifier[fontsize] = literal[string] , identifier[fontweight] = literal[string] , ) identifier[_add_style] ( literal[string] , identifier[family] = literal[string] , identifier[fontsize] = literal[string] , identifier[fontweight] = literal[string] , ) identifier[_add_style] ( literal[string] , identifier[family] = literal[string] , identifier[fontsize] = literal[string] , identifier[fontweight] = literal[string] , ) identifier[_add_style] ( literal[string] , identifier[family] = literal[string] , identifier[fontsize] = literal[string] , identifier[marginbottom] = literal[string] , ) identifier[_add_style] ( literal[string] , identifier[family] = literal[string] , identifier[fontsize] = literal[string] , identifier[fontweight] = literal[string] , identifier[fontfamily] = literal[string] , identifier[color] = literal[string] , ) identifier[_add_style] ( literal[string] , identifier[family] = literal[string] , identifier[fontsize] = literal[string] , identifier[fontstyle] = literal[string] , ) identifier[_add_style] ( literal[string] , identifier[family] = literal[string] , identifier[fontsize] = literal[string] , identifier[marginbottom] = literal[string] , ) identifier[_add_style] ( literal[string] , identifier[family] = literal[string] , identifier[fontsize] = literal[string] , identifier[marginbottom] = literal[string] , ) identifier[_add_style] ( literal[string] , identifier[family] = literal[string] , identifier[fontsize] = literal[string] , identifier[marginbottom] = literal[string] , ) identifier[_add_style] ( literal[string] , identifier[family] = literal[string] , identifier[fontsize] = literal[string] , ) identifier[_add_style] ( literal[string] , identifier[family] = literal[string] , identifier[fontstyle] = literal[string] , identifier[fontsize] = literal[string] , ) identifier[_add_style] ( literal[string] , identifier[family] = literal[string] , identifier[fontweight] = literal[string] , identifier[fontsize] = literal[string] , ) identifier[_add_style] ( literal[string] , identifier[family] = literal[string] , identifier[fontsize] = literal[string] , identifier[fontweight] = literal[string] , identifier[fontfamily] = literal[string] , ) identifier[_add_style] ( literal[string] , identifier[family] = literal[string] , identifier[fontsize] = literal[string] , identifier[fontweight] = literal[string] , identifier[fontfamily] = literal[string] , identifier[color] = literal[string] , ) identifier[styles] [ literal[string] ]= identifier[_numbered_style] () keyword[return] identifier[styles]
def default_styles(): """Generate default ODF styles.""" styles = {} def _add_style(name, **kwargs): styles[name] = _create_style(name, **kwargs) _add_style('heading-1', family='paragraph', fontsize='24pt', fontweight='bold') _add_style('heading-2', family='paragraph', fontsize='22pt', fontweight='bold') _add_style('heading-3', family='paragraph', fontsize='20pt', fontweight='bold') _add_style('heading-4', family='paragraph', fontsize='18pt', fontweight='bold') _add_style('heading-5', family='paragraph', fontsize='16pt', fontweight='bold') _add_style('heading-6', family='paragraph', fontsize='14pt', fontweight='bold') _add_style('normal-paragraph', family='paragraph', fontsize='12pt', marginbottom='0.25cm') _add_style('code', family='paragraph', fontsize='10pt', fontweight='bold', fontfamily='Courier New', color='#555555') _add_style('quote', family='paragraph', fontsize='12pt', fontstyle='italic') _add_style('list-paragraph', family='paragraph', fontsize='12pt', marginbottom='.1cm') _add_style('sublist-paragraph', family='paragraph', fontsize='12pt', marginbottom='.1cm') _add_style('numbered-list-paragraph', family='paragraph', fontsize='12pt', marginbottom='.1cm') _add_style('normal-text', family='text', fontsize='12pt') _add_style('italic', family='text', fontstyle='italic', fontsize='12pt') _add_style('bold', family='text', fontweight='bold', fontsize='12pt') _add_style('url', family='text', fontsize='12pt', fontweight='bold', fontfamily='Courier') _add_style('inline-code', family='text', fontsize='10pt', fontweight='bold', fontfamily='Courier New', color='#555555') styles['_numbered_list'] = _numbered_style() return styles
def patch( self, id, name=None, description=None, whitelisted_container_task_types=None, whitelisted_executable_task_types=None, ): """Partially updates a task whitelist on the saltant server. Args: id (int): The ID of the task whitelist. name (str, optional): The name of the task whitelist. description (str, optional): A description of the task whitelist. whitelisted_container_task_types (list, optional): A list of whitelisted container task type IDs. whitelisted_executable_task_types (list, optional): A list of whitelisted executable task type IDs. Returns: :class:`saltant.models.task_whitelist.TaskWhitelist`: A task whitelist model instance representing the task whitelist just updated. """ # Update the object request_url = self._client.base_api_url + self.detail_url.format(id=id) data_to_patch = {} if name is not None: data_to_patch["name"] = name if description is not None: data_to_patch["description"] = description if whitelisted_container_task_types is not None: data_to_patch[ "whitelisted_container_task_types" ] = whitelisted_container_task_types if whitelisted_executable_task_types is not None: data_to_patch[ "whitelisted_executable_task_types" ] = whitelisted_executable_task_types response = self._client.session.patch(request_url, data=data_to_patch) # Validate that the request was successful self.validate_request_success( response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_200_OK, ) # Return a model instance representing the task instance return self.response_data_to_model_instance(response.json())
def function[patch, parameter[self, id, name, description, whitelisted_container_task_types, whitelisted_executable_task_types]]: constant[Partially updates a task whitelist on the saltant server. Args: id (int): The ID of the task whitelist. name (str, optional): The name of the task whitelist. description (str, optional): A description of the task whitelist. whitelisted_container_task_types (list, optional): A list of whitelisted container task type IDs. whitelisted_executable_task_types (list, optional): A list of whitelisted executable task type IDs. Returns: :class:`saltant.models.task_whitelist.TaskWhitelist`: A task whitelist model instance representing the task whitelist just updated. ] variable[request_url] assign[=] binary_operation[name[self]._client.base_api_url + call[name[self].detail_url.format, parameter[]]] variable[data_to_patch] assign[=] dictionary[[], []] if compare[name[name] is_not constant[None]] begin[:] call[name[data_to_patch]][constant[name]] assign[=] name[name] if compare[name[description] is_not constant[None]] begin[:] call[name[data_to_patch]][constant[description]] assign[=] name[description] if compare[name[whitelisted_container_task_types] is_not constant[None]] begin[:] call[name[data_to_patch]][constant[whitelisted_container_task_types]] assign[=] name[whitelisted_container_task_types] if compare[name[whitelisted_executable_task_types] is_not constant[None]] begin[:] call[name[data_to_patch]][constant[whitelisted_executable_task_types]] assign[=] name[whitelisted_executable_task_types] variable[response] assign[=] call[name[self]._client.session.patch, parameter[name[request_url]]] call[name[self].validate_request_success, parameter[]] return[call[name[self].response_data_to_model_instance, parameter[call[name[response].json, parameter[]]]]]
keyword[def] identifier[patch] ( identifier[self] , identifier[id] , identifier[name] = keyword[None] , identifier[description] = keyword[None] , identifier[whitelisted_container_task_types] = keyword[None] , identifier[whitelisted_executable_task_types] = keyword[None] , ): literal[string] identifier[request_url] = identifier[self] . identifier[_client] . identifier[base_api_url] + identifier[self] . identifier[detail_url] . identifier[format] ( identifier[id] = identifier[id] ) identifier[data_to_patch] ={} keyword[if] identifier[name] keyword[is] keyword[not] keyword[None] : identifier[data_to_patch] [ literal[string] ]= identifier[name] keyword[if] identifier[description] keyword[is] keyword[not] keyword[None] : identifier[data_to_patch] [ literal[string] ]= identifier[description] keyword[if] identifier[whitelisted_container_task_types] keyword[is] keyword[not] keyword[None] : identifier[data_to_patch] [ literal[string] ]= identifier[whitelisted_container_task_types] keyword[if] identifier[whitelisted_executable_task_types] keyword[is] keyword[not] keyword[None] : identifier[data_to_patch] [ literal[string] ]= identifier[whitelisted_executable_task_types] identifier[response] = identifier[self] . identifier[_client] . identifier[session] . identifier[patch] ( identifier[request_url] , identifier[data] = identifier[data_to_patch] ) identifier[self] . identifier[validate_request_success] ( identifier[response_text] = identifier[response] . identifier[text] , identifier[request_url] = identifier[request_url] , identifier[status_code] = identifier[response] . identifier[status_code] , identifier[expected_status_code] = identifier[HTTP_200_OK] , ) keyword[return] identifier[self] . identifier[response_data_to_model_instance] ( identifier[response] . identifier[json] ())
def patch(self, id, name=None, description=None, whitelisted_container_task_types=None, whitelisted_executable_task_types=None): """Partially updates a task whitelist on the saltant server. Args: id (int): The ID of the task whitelist. name (str, optional): The name of the task whitelist. description (str, optional): A description of the task whitelist. whitelisted_container_task_types (list, optional): A list of whitelisted container task type IDs. whitelisted_executable_task_types (list, optional): A list of whitelisted executable task type IDs. Returns: :class:`saltant.models.task_whitelist.TaskWhitelist`: A task whitelist model instance representing the task whitelist just updated. """ # Update the object request_url = self._client.base_api_url + self.detail_url.format(id=id) data_to_patch = {} if name is not None: data_to_patch['name'] = name # depends on [control=['if'], data=['name']] if description is not None: data_to_patch['description'] = description # depends on [control=['if'], data=['description']] if whitelisted_container_task_types is not None: data_to_patch['whitelisted_container_task_types'] = whitelisted_container_task_types # depends on [control=['if'], data=['whitelisted_container_task_types']] if whitelisted_executable_task_types is not None: data_to_patch['whitelisted_executable_task_types'] = whitelisted_executable_task_types # depends on [control=['if'], data=['whitelisted_executable_task_types']] response = self._client.session.patch(request_url, data=data_to_patch) # Validate that the request was successful self.validate_request_success(response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_200_OK) # Return a model instance representing the task instance return self.response_data_to_model_instance(response.json())
def get_relations(self, database, schema): """Case-insensitively yield all relations matching the given schema. :param str schema: The case-insensitive schema name to list from. :return List[BaseRelation]: The list of relations with the given schema """ schema = _lower(schema) with self.lock: results = [ r.inner for r in self.relations.values() if (r.schema == _lower(schema) and r.database == _lower(database)) ] if None in results: dbt.exceptions.raise_cache_inconsistent( 'in get_relations, a None relation was found in the cache!' ) return results
def function[get_relations, parameter[self, database, schema]]: constant[Case-insensitively yield all relations matching the given schema. :param str schema: The case-insensitive schema name to list from. :return List[BaseRelation]: The list of relations with the given schema ] variable[schema] assign[=] call[name[_lower], parameter[name[schema]]] with name[self].lock begin[:] variable[results] assign[=] <ast.ListComp object at 0x7da1b1b3e1a0> if compare[constant[None] in name[results]] begin[:] call[name[dbt].exceptions.raise_cache_inconsistent, parameter[constant[in get_relations, a None relation was found in the cache!]]] return[name[results]]
keyword[def] identifier[get_relations] ( identifier[self] , identifier[database] , identifier[schema] ): literal[string] identifier[schema] = identifier[_lower] ( identifier[schema] ) keyword[with] identifier[self] . identifier[lock] : identifier[results] =[ identifier[r] . identifier[inner] keyword[for] identifier[r] keyword[in] identifier[self] . identifier[relations] . identifier[values] () keyword[if] ( identifier[r] . identifier[schema] == identifier[_lower] ( identifier[schema] ) keyword[and] identifier[r] . identifier[database] == identifier[_lower] ( identifier[database] )) ] keyword[if] keyword[None] keyword[in] identifier[results] : identifier[dbt] . identifier[exceptions] . identifier[raise_cache_inconsistent] ( literal[string] ) keyword[return] identifier[results]
def get_relations(self, database, schema): """Case-insensitively yield all relations matching the given schema. :param str schema: The case-insensitive schema name to list from. :return List[BaseRelation]: The list of relations with the given schema """ schema = _lower(schema) with self.lock: results = [r.inner for r in self.relations.values() if r.schema == _lower(schema) and r.database == _lower(database)] # depends on [control=['with'], data=[]] if None in results: dbt.exceptions.raise_cache_inconsistent('in get_relations, a None relation was found in the cache!') # depends on [control=['if'], data=[]] return results
def t_B_SEQUENCE_COMPACT_START(self, t): r""" \-\ + (?= -\ ) # ^ ^ sequence indicator | \-\ + (?= [\{\[]\ | [^:\n]*:\s ) # ^ ^ ^^^ map indicator # ^ ^ flow indicator """ indent_status, curr_depth, next_depth = self.get_indent_status(t) if indent_status == 'INDENT': self.indent_stack.append(next_depth) return t msg = dedent(""" expected 'INDENT', got {indent_status!r} current_depth: {curr_depth} next_depth: {next_depth} token: {t} """).format(**vars()) raise YAMLUnknownSyntaxError(msg)
def function[t_B_SEQUENCE_COMPACT_START, parameter[self, t]]: constant[ \-\ + (?= -\ ) # ^ ^ sequence indicator | \-\ + (?= [\{\[]\ | [^:\n]*:\s ) # ^ ^ ^^^ map indicator # ^ ^ flow indicator ] <ast.Tuple object at 0x7da1b2594190> assign[=] call[name[self].get_indent_status, parameter[name[t]]] if compare[name[indent_status] equal[==] constant[INDENT]] begin[:] call[name[self].indent_stack.append, parameter[name[next_depth]]] return[name[t]] variable[msg] assign[=] call[call[name[dedent], parameter[constant[ expected 'INDENT', got {indent_status!r} current_depth: {curr_depth} next_depth: {next_depth} token: {t} ]]].format, parameter[]] <ast.Raise object at 0x7da1b2595d20>
keyword[def] identifier[t_B_SEQUENCE_COMPACT_START] ( identifier[self] , identifier[t] ): literal[string] identifier[indent_status] , identifier[curr_depth] , identifier[next_depth] = identifier[self] . identifier[get_indent_status] ( identifier[t] ) keyword[if] identifier[indent_status] == literal[string] : identifier[self] . identifier[indent_stack] . identifier[append] ( identifier[next_depth] ) keyword[return] identifier[t] identifier[msg] = identifier[dedent] ( literal[string] ). identifier[format] (** identifier[vars] ()) keyword[raise] identifier[YAMLUnknownSyntaxError] ( identifier[msg] )
def t_B_SEQUENCE_COMPACT_START(self, t): """ \\-\\ + (?= -\\ ) # ^ ^ sequence indicator | \\-\\ + (?= [\\{\\[]\\ | [^:\\n]*:\\s ) # ^ ^ ^^^ map indicator # ^ ^ flow indicator """ (indent_status, curr_depth, next_depth) = self.get_indent_status(t) if indent_status == 'INDENT': self.indent_stack.append(next_depth) return t # depends on [control=['if'], data=[]] msg = dedent("\n expected 'INDENT', got {indent_status!r}\n current_depth: {curr_depth}\n next_depth: {next_depth}\n token: {t}\n ").format(**vars()) raise YAMLUnknownSyntaxError(msg)
def delete(uid): ''' Delete by uid ''' q_u1 = TabPostHist.delete().where(TabPostHist.post_id == uid) q_u1.execute() q_u2 = TabRel.delete().where(TabRel.post_f_id == uid or TabRel.post_t_id == uid) q_u2.execute() q_u3 = TabCollect.delete().where(TabCollect.post_id == uid) q_u3.execute() q_u4 = TabPost2Tag.delete().where(TabPost2Tag.post_id == uid) q_u4.execute() q_u5 = TabUsage.delete().where(TabUsage.post_id == uid) q_u5.execute() reply_arr = [] for reply in TabUser2Reply.select().where(TabUser2Reply.reply_id == uid): reply_arr.append(reply.reply_id.uid) q_u6 = TabUser2Reply.delete().where(TabUser2Reply.reply_id == uid) q_u6.execute() for replyid in reply_arr: TabReply.delete().where(TabReply.uid == replyid).execute() q_u7 = TabEvaluation.delete().where(TabEvaluation.post_id == uid) q_u7.execute() q_u8 = TabRating.delete().where(TabRating.post_id == uid) q_u8.execute() return MHelper.delete(TabPost, uid)
def function[delete, parameter[uid]]: constant[ Delete by uid ] variable[q_u1] assign[=] call[call[name[TabPostHist].delete, parameter[]].where, parameter[compare[name[TabPostHist].post_id equal[==] name[uid]]]] call[name[q_u1].execute, parameter[]] variable[q_u2] assign[=] call[call[name[TabRel].delete, parameter[]].where, parameter[<ast.BoolOp object at 0x7da1b04f4fa0>]] call[name[q_u2].execute, parameter[]] variable[q_u3] assign[=] call[call[name[TabCollect].delete, parameter[]].where, parameter[compare[name[TabCollect].post_id equal[==] name[uid]]]] call[name[q_u3].execute, parameter[]] variable[q_u4] assign[=] call[call[name[TabPost2Tag].delete, parameter[]].where, parameter[compare[name[TabPost2Tag].post_id equal[==] name[uid]]]] call[name[q_u4].execute, parameter[]] variable[q_u5] assign[=] call[call[name[TabUsage].delete, parameter[]].where, parameter[compare[name[TabUsage].post_id equal[==] name[uid]]]] call[name[q_u5].execute, parameter[]] variable[reply_arr] assign[=] list[[]] for taget[name[reply]] in starred[call[call[name[TabUser2Reply].select, parameter[]].where, parameter[compare[name[TabUser2Reply].reply_id equal[==] name[uid]]]]] begin[:] call[name[reply_arr].append, parameter[name[reply].reply_id.uid]] variable[q_u6] assign[=] call[call[name[TabUser2Reply].delete, parameter[]].where, parameter[compare[name[TabUser2Reply].reply_id equal[==] name[uid]]]] call[name[q_u6].execute, parameter[]] for taget[name[replyid]] in starred[name[reply_arr]] begin[:] call[call[call[name[TabReply].delete, parameter[]].where, parameter[compare[name[TabReply].uid equal[==] name[replyid]]]].execute, parameter[]] variable[q_u7] assign[=] call[call[name[TabEvaluation].delete, parameter[]].where, parameter[compare[name[TabEvaluation].post_id equal[==] name[uid]]]] call[name[q_u7].execute, parameter[]] variable[q_u8] assign[=] call[call[name[TabRating].delete, parameter[]].where, parameter[compare[name[TabRating].post_id equal[==] name[uid]]]] call[name[q_u8].execute, parameter[]] return[call[name[MHelper].delete, parameter[name[TabPost], name[uid]]]]
keyword[def] identifier[delete] ( identifier[uid] ): literal[string] identifier[q_u1] = identifier[TabPostHist] . identifier[delete] (). identifier[where] ( identifier[TabPostHist] . identifier[post_id] == identifier[uid] ) identifier[q_u1] . identifier[execute] () identifier[q_u2] = identifier[TabRel] . identifier[delete] (). identifier[where] ( identifier[TabRel] . identifier[post_f_id] == identifier[uid] keyword[or] identifier[TabRel] . identifier[post_t_id] == identifier[uid] ) identifier[q_u2] . identifier[execute] () identifier[q_u3] = identifier[TabCollect] . identifier[delete] (). identifier[where] ( identifier[TabCollect] . identifier[post_id] == identifier[uid] ) identifier[q_u3] . identifier[execute] () identifier[q_u4] = identifier[TabPost2Tag] . identifier[delete] (). identifier[where] ( identifier[TabPost2Tag] . identifier[post_id] == identifier[uid] ) identifier[q_u4] . identifier[execute] () identifier[q_u5] = identifier[TabUsage] . identifier[delete] (). identifier[where] ( identifier[TabUsage] . identifier[post_id] == identifier[uid] ) identifier[q_u5] . identifier[execute] () identifier[reply_arr] =[] keyword[for] identifier[reply] keyword[in] identifier[TabUser2Reply] . identifier[select] (). identifier[where] ( identifier[TabUser2Reply] . identifier[reply_id] == identifier[uid] ): identifier[reply_arr] . identifier[append] ( identifier[reply] . identifier[reply_id] . identifier[uid] ) identifier[q_u6] = identifier[TabUser2Reply] . identifier[delete] (). identifier[where] ( identifier[TabUser2Reply] . identifier[reply_id] == identifier[uid] ) identifier[q_u6] . identifier[execute] () keyword[for] identifier[replyid] keyword[in] identifier[reply_arr] : identifier[TabReply] . identifier[delete] (). identifier[where] ( identifier[TabReply] . identifier[uid] == identifier[replyid] ). identifier[execute] () identifier[q_u7] = identifier[TabEvaluation] . identifier[delete] (). identifier[where] ( identifier[TabEvaluation] . identifier[post_id] == identifier[uid] ) identifier[q_u7] . identifier[execute] () identifier[q_u8] = identifier[TabRating] . identifier[delete] (). identifier[where] ( identifier[TabRating] . identifier[post_id] == identifier[uid] ) identifier[q_u8] . identifier[execute] () keyword[return] identifier[MHelper] . identifier[delete] ( identifier[TabPost] , identifier[uid] )
def delete(uid): """ Delete by uid """ q_u1 = TabPostHist.delete().where(TabPostHist.post_id == uid) q_u1.execute() q_u2 = TabRel.delete().where(TabRel.post_f_id == uid or TabRel.post_t_id == uid) q_u2.execute() q_u3 = TabCollect.delete().where(TabCollect.post_id == uid) q_u3.execute() q_u4 = TabPost2Tag.delete().where(TabPost2Tag.post_id == uid) q_u4.execute() q_u5 = TabUsage.delete().where(TabUsage.post_id == uid) q_u5.execute() reply_arr = [] for reply in TabUser2Reply.select().where(TabUser2Reply.reply_id == uid): reply_arr.append(reply.reply_id.uid) # depends on [control=['for'], data=['reply']] q_u6 = TabUser2Reply.delete().where(TabUser2Reply.reply_id == uid) q_u6.execute() for replyid in reply_arr: TabReply.delete().where(TabReply.uid == replyid).execute() # depends on [control=['for'], data=['replyid']] q_u7 = TabEvaluation.delete().where(TabEvaluation.post_id == uid) q_u7.execute() q_u8 = TabRating.delete().where(TabRating.post_id == uid) q_u8.execute() return MHelper.delete(TabPost, uid)
def cancel(self, tid, session): '''taobao.logistics.online.cancel 取消物流订单接口 调此接口取消发货的订单,重新选择物流公司发货。前提是物流公司未揽收货物。对未发货和已经被物流公司揽收的物流订单,是不能取消的。''' request = TOPRequest('taobao.logistics.online.cancel') request['tid'] = tid self.create(self.execute(request, session), fields = ['is_success','modify_time','recreated_order_id'], models = {'modify_time':TOPDate}) return self
def function[cancel, parameter[self, tid, session]]: constant[taobao.logistics.online.cancel 取消物流订单接口 调此接口取消发货的订单,重新选择物流公司发货。前提是物流公司未揽收货物。对未发货和已经被物流公司揽收的物流订单,是不能取消的。] variable[request] assign[=] call[name[TOPRequest], parameter[constant[taobao.logistics.online.cancel]]] call[name[request]][constant[tid]] assign[=] name[tid] call[name[self].create, parameter[call[name[self].execute, parameter[name[request], name[session]]]]] return[name[self]]
keyword[def] identifier[cancel] ( identifier[self] , identifier[tid] , identifier[session] ): literal[string] identifier[request] = identifier[TOPRequest] ( literal[string] ) identifier[request] [ literal[string] ]= identifier[tid] identifier[self] . identifier[create] ( identifier[self] . identifier[execute] ( identifier[request] , identifier[session] ), identifier[fields] =[ literal[string] , literal[string] , literal[string] ], identifier[models] ={ literal[string] : identifier[TOPDate] }) keyword[return] identifier[self]
def cancel(self, tid, session): """taobao.logistics.online.cancel 取消物流订单接口 调此接口取消发货的订单,重新选择物流公司发货。前提是物流公司未揽收货物。对未发货和已经被物流公司揽收的物流订单,是不能取消的。""" request = TOPRequest('taobao.logistics.online.cancel') request['tid'] = tid self.create(self.execute(request, session), fields=['is_success', 'modify_time', 'recreated_order_id'], models={'modify_time': TOPDate}) return self
def convolutional_layer_series(initial_size, layer_sequence): """ Execute a series of convolutional layer transformations to the size number """ size = initial_size for filter_size, padding, stride in layer_sequence: size = convolution_size_equation(size, filter_size, padding, stride) return size
def function[convolutional_layer_series, parameter[initial_size, layer_sequence]]: constant[ Execute a series of convolutional layer transformations to the size number ] variable[size] assign[=] name[initial_size] for taget[tuple[[<ast.Name object at 0x7da18bcca590>, <ast.Name object at 0x7da18bccb220>, <ast.Name object at 0x7da18bcc8f10>]]] in starred[name[layer_sequence]] begin[:] variable[size] assign[=] call[name[convolution_size_equation], parameter[name[size], name[filter_size], name[padding], name[stride]]] return[name[size]]
keyword[def] identifier[convolutional_layer_series] ( identifier[initial_size] , identifier[layer_sequence] ): literal[string] identifier[size] = identifier[initial_size] keyword[for] identifier[filter_size] , identifier[padding] , identifier[stride] keyword[in] identifier[layer_sequence] : identifier[size] = identifier[convolution_size_equation] ( identifier[size] , identifier[filter_size] , identifier[padding] , identifier[stride] ) keyword[return] identifier[size]
def convolutional_layer_series(initial_size, layer_sequence): """ Execute a series of convolutional layer transformations to the size number """ size = initial_size for (filter_size, padding, stride) in layer_sequence: size = convolution_size_equation(size, filter_size, padding, stride) # depends on [control=['for'], data=[]] return size
def get_body_region(defined): """Return the start and end offsets of function body""" scope = defined.get_scope() pymodule = defined.get_module() lines = pymodule.lines node = defined.get_ast() start_line = node.lineno if defined.get_doc() is None: start_line = node.body[0].lineno elif len(node.body) > 1: start_line = node.body[1].lineno start = lines.get_line_start(start_line) scope_start = pymodule.logical_lines.logical_line_in(scope.start) if scope_start[1] >= start_line: # a one-liner! # XXX: what if colon appears in a string start = pymodule.source_code.index(':', start) + 1 while pymodule.source_code[start].isspace(): start += 1 end = min(lines.get_line_end(scope.end) + 1, len(pymodule.source_code)) return start, end
def function[get_body_region, parameter[defined]]: constant[Return the start and end offsets of function body] variable[scope] assign[=] call[name[defined].get_scope, parameter[]] variable[pymodule] assign[=] call[name[defined].get_module, parameter[]] variable[lines] assign[=] name[pymodule].lines variable[node] assign[=] call[name[defined].get_ast, parameter[]] variable[start_line] assign[=] name[node].lineno if compare[call[name[defined].get_doc, parameter[]] is constant[None]] begin[:] variable[start_line] assign[=] call[name[node].body][constant[0]].lineno variable[start] assign[=] call[name[lines].get_line_start, parameter[name[start_line]]] variable[scope_start] assign[=] call[name[pymodule].logical_lines.logical_line_in, parameter[name[scope].start]] if compare[call[name[scope_start]][constant[1]] greater_or_equal[>=] name[start_line]] begin[:] variable[start] assign[=] binary_operation[call[name[pymodule].source_code.index, parameter[constant[:], name[start]]] + constant[1]] while call[call[name[pymodule].source_code][name[start]].isspace, parameter[]] begin[:] <ast.AugAssign object at 0x7da1b065cca0> variable[end] assign[=] call[name[min], parameter[binary_operation[call[name[lines].get_line_end, parameter[name[scope].end]] + constant[1]], call[name[len], parameter[name[pymodule].source_code]]]] return[tuple[[<ast.Name object at 0x7da1b065e8c0>, <ast.Name object at 0x7da1b065e590>]]]
keyword[def] identifier[get_body_region] ( identifier[defined] ): literal[string] identifier[scope] = identifier[defined] . identifier[get_scope] () identifier[pymodule] = identifier[defined] . identifier[get_module] () identifier[lines] = identifier[pymodule] . identifier[lines] identifier[node] = identifier[defined] . identifier[get_ast] () identifier[start_line] = identifier[node] . identifier[lineno] keyword[if] identifier[defined] . identifier[get_doc] () keyword[is] keyword[None] : identifier[start_line] = identifier[node] . identifier[body] [ literal[int] ]. identifier[lineno] keyword[elif] identifier[len] ( identifier[node] . identifier[body] )> literal[int] : identifier[start_line] = identifier[node] . identifier[body] [ literal[int] ]. identifier[lineno] identifier[start] = identifier[lines] . identifier[get_line_start] ( identifier[start_line] ) identifier[scope_start] = identifier[pymodule] . identifier[logical_lines] . identifier[logical_line_in] ( identifier[scope] . identifier[start] ) keyword[if] identifier[scope_start] [ literal[int] ]>= identifier[start_line] : identifier[start] = identifier[pymodule] . identifier[source_code] . identifier[index] ( literal[string] , identifier[start] )+ literal[int] keyword[while] identifier[pymodule] . identifier[source_code] [ identifier[start] ]. identifier[isspace] (): identifier[start] += literal[int] identifier[end] = identifier[min] ( identifier[lines] . identifier[get_line_end] ( identifier[scope] . identifier[end] )+ literal[int] , identifier[len] ( identifier[pymodule] . identifier[source_code] )) keyword[return] identifier[start] , identifier[end]
def get_body_region(defined): """Return the start and end offsets of function body""" scope = defined.get_scope() pymodule = defined.get_module() lines = pymodule.lines node = defined.get_ast() start_line = node.lineno if defined.get_doc() is None: start_line = node.body[0].lineno # depends on [control=['if'], data=[]] elif len(node.body) > 1: start_line = node.body[1].lineno # depends on [control=['if'], data=[]] start = lines.get_line_start(start_line) scope_start = pymodule.logical_lines.logical_line_in(scope.start) if scope_start[1] >= start_line: # a one-liner! # XXX: what if colon appears in a string start = pymodule.source_code.index(':', start) + 1 while pymodule.source_code[start].isspace(): start += 1 # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]] end = min(lines.get_line_end(scope.end) + 1, len(pymodule.source_code)) return (start, end)
def getTemplates(self, workitems, template_folder=None, template_names=None, keep=False, encoding="UTF-8"): """Get templates from a group of to-be-copied :class:`Workitems` and write them to files named after the names in `template_names` respectively. :param workitems: a :class:`list`/:class:`tuple`/:class:`set` contains the ids (integer or equivalent string) of some to-be-copied :class:`Workitems` :param template_names: a :class:`list`/:class:`tuple`/:class:`set` contains the template file names for copied :class:`Workitems`. If `None`, the new template files will be named after the :class:`rtcclient.workitem.Workitem` id with "`.template`" as a postfix :param template_folder: refer to :class:`rtcclient.template.Templater.getTemplate` :param keep: (default is False) refer to :class:`rtcclient.template.Templater.getTemplate` :param encoding: (default is "UTF-8") refer to :class:`rtcclient.template.Templater.getTemplate` """ if (not workitems or isinstance(workitems, six.string_types) or isinstance(workitems, int) or isinstance(workitems, float) or not hasattr(workitems, "__iter__")): error_msg = "Input parameter 'workitems' is not iterable" self.log.error(error_msg) raise exception.BadValue(error_msg) if template_names is not None: if not hasattr(template_names, "__iter__"): error_msg = "Input parameter 'template_names' is not iterable" self.log.error(error_msg) raise exception.BadValue(error_msg) if len(workitems) != len(template_names): error_msg = "".join(["Input parameters 'workitems' and ", "'template_names' have different length"]) self.log.error(error_msg) raise exception.BadValue(error_msg) for index, wk_id in enumerate(workitems): try: if template_names is not None: template_name = template_names[index] else: template_name = ".".join([wk_id, "template"]) self.getTemplate(wk_id, template_name=template_name, template_folder=template_folder, keep=keep, encoding=encoding) except Exception as excp: self.log.error("Exception occurred when fetching" "template from <Workitem %s>: %s", str(wk_id), excp) continue self.log.info("Successfully fetch all the templates from " "workitems: %s", workitems)
def function[getTemplates, parameter[self, workitems, template_folder, template_names, keep, encoding]]: constant[Get templates from a group of to-be-copied :class:`Workitems` and write them to files named after the names in `template_names` respectively. :param workitems: a :class:`list`/:class:`tuple`/:class:`set` contains the ids (integer or equivalent string) of some to-be-copied :class:`Workitems` :param template_names: a :class:`list`/:class:`tuple`/:class:`set` contains the template file names for copied :class:`Workitems`. If `None`, the new template files will be named after the :class:`rtcclient.workitem.Workitem` id with "`.template`" as a postfix :param template_folder: refer to :class:`rtcclient.template.Templater.getTemplate` :param keep: (default is False) refer to :class:`rtcclient.template.Templater.getTemplate` :param encoding: (default is "UTF-8") refer to :class:`rtcclient.template.Templater.getTemplate` ] if <ast.BoolOp object at 0x7da20e9637f0> begin[:] variable[error_msg] assign[=] constant[Input parameter 'workitems' is not iterable] call[name[self].log.error, parameter[name[error_msg]]] <ast.Raise object at 0x7da20e960af0> if compare[name[template_names] is_not constant[None]] begin[:] if <ast.UnaryOp object at 0x7da20e960fd0> begin[:] variable[error_msg] assign[=] constant[Input parameter 'template_names' is not iterable] call[name[self].log.error, parameter[name[error_msg]]] <ast.Raise object at 0x7da20e963a30> if compare[call[name[len], parameter[name[workitems]]] not_equal[!=] call[name[len], parameter[name[template_names]]]] begin[:] variable[error_msg] assign[=] call[constant[].join, parameter[list[[<ast.Constant object at 0x7da20e963970>, <ast.Constant object at 0x7da20e9611b0>]]]] call[name[self].log.error, parameter[name[error_msg]]] <ast.Raise object at 0x7da1b2838ca0> for taget[tuple[[<ast.Name object at 0x7da1b283a200>, <ast.Name object at 0x7da1b2839390>]]] in starred[call[name[enumerate], parameter[name[workitems]]]] begin[:] <ast.Try object at 0x7da1b283a050> call[name[self].log.info, parameter[constant[Successfully fetch all the templates from workitems: %s], name[workitems]]]
keyword[def] identifier[getTemplates] ( identifier[self] , identifier[workitems] , identifier[template_folder] = keyword[None] , identifier[template_names] = keyword[None] , identifier[keep] = keyword[False] , identifier[encoding] = literal[string] ): literal[string] keyword[if] ( keyword[not] identifier[workitems] keyword[or] identifier[isinstance] ( identifier[workitems] , identifier[six] . identifier[string_types] ) keyword[or] identifier[isinstance] ( identifier[workitems] , identifier[int] ) keyword[or] identifier[isinstance] ( identifier[workitems] , identifier[float] ) keyword[or] keyword[not] identifier[hasattr] ( identifier[workitems] , literal[string] )): identifier[error_msg] = literal[string] identifier[self] . identifier[log] . identifier[error] ( identifier[error_msg] ) keyword[raise] identifier[exception] . identifier[BadValue] ( identifier[error_msg] ) keyword[if] identifier[template_names] keyword[is] keyword[not] keyword[None] : keyword[if] keyword[not] identifier[hasattr] ( identifier[template_names] , literal[string] ): identifier[error_msg] = literal[string] identifier[self] . identifier[log] . identifier[error] ( identifier[error_msg] ) keyword[raise] identifier[exception] . identifier[BadValue] ( identifier[error_msg] ) keyword[if] identifier[len] ( identifier[workitems] )!= identifier[len] ( identifier[template_names] ): identifier[error_msg] = literal[string] . identifier[join] ([ literal[string] , literal[string] ]) identifier[self] . identifier[log] . identifier[error] ( identifier[error_msg] ) keyword[raise] identifier[exception] . identifier[BadValue] ( identifier[error_msg] ) keyword[for] identifier[index] , identifier[wk_id] keyword[in] identifier[enumerate] ( identifier[workitems] ): keyword[try] : keyword[if] identifier[template_names] keyword[is] keyword[not] keyword[None] : identifier[template_name] = identifier[template_names] [ identifier[index] ] keyword[else] : identifier[template_name] = literal[string] . identifier[join] ([ identifier[wk_id] , literal[string] ]) identifier[self] . identifier[getTemplate] ( identifier[wk_id] , identifier[template_name] = identifier[template_name] , identifier[template_folder] = identifier[template_folder] , identifier[keep] = identifier[keep] , identifier[encoding] = identifier[encoding] ) keyword[except] identifier[Exception] keyword[as] identifier[excp] : identifier[self] . identifier[log] . identifier[error] ( literal[string] literal[string] , identifier[str] ( identifier[wk_id] ), identifier[excp] ) keyword[continue] identifier[self] . identifier[log] . identifier[info] ( literal[string] literal[string] , identifier[workitems] )
def getTemplates(self, workitems, template_folder=None, template_names=None, keep=False, encoding='UTF-8'): """Get templates from a group of to-be-copied :class:`Workitems` and write them to files named after the names in `template_names` respectively. :param workitems: a :class:`list`/:class:`tuple`/:class:`set` contains the ids (integer or equivalent string) of some to-be-copied :class:`Workitems` :param template_names: a :class:`list`/:class:`tuple`/:class:`set` contains the template file names for copied :class:`Workitems`. If `None`, the new template files will be named after the :class:`rtcclient.workitem.Workitem` id with "`.template`" as a postfix :param template_folder: refer to :class:`rtcclient.template.Templater.getTemplate` :param keep: (default is False) refer to :class:`rtcclient.template.Templater.getTemplate` :param encoding: (default is "UTF-8") refer to :class:`rtcclient.template.Templater.getTemplate` """ if not workitems or isinstance(workitems, six.string_types) or isinstance(workitems, int) or isinstance(workitems, float) or (not hasattr(workitems, '__iter__')): error_msg = "Input parameter 'workitems' is not iterable" self.log.error(error_msg) raise exception.BadValue(error_msg) # depends on [control=['if'], data=[]] if template_names is not None: if not hasattr(template_names, '__iter__'): error_msg = "Input parameter 'template_names' is not iterable" self.log.error(error_msg) raise exception.BadValue(error_msg) # depends on [control=['if'], data=[]] if len(workitems) != len(template_names): error_msg = ''.join(["Input parameters 'workitems' and ", "'template_names' have different length"]) self.log.error(error_msg) raise exception.BadValue(error_msg) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['template_names']] for (index, wk_id) in enumerate(workitems): try: if template_names is not None: template_name = template_names[index] # depends on [control=['if'], data=['template_names']] else: template_name = '.'.join([wk_id, 'template']) self.getTemplate(wk_id, template_name=template_name, template_folder=template_folder, keep=keep, encoding=encoding) # depends on [control=['try'], data=[]] except Exception as excp: self.log.error('Exception occurred when fetchingtemplate from <Workitem %s>: %s', str(wk_id), excp) continue # depends on [control=['except'], data=['excp']] # depends on [control=['for'], data=[]] self.log.info('Successfully fetch all the templates from workitems: %s', workitems)
def set_contents_from_file(self, fp, headers=None, replace=True, cb=None, num_cb=10, policy=None, md5=None, res_upload_handler=None, size=None): """ Store an object in GS using the name of the Key object as the key in GS and the contents of the file pointed to by 'fp' as the contents. :type fp: file :param fp: the file whose contents are to be uploaded :type headers: dict :param headers: additional HTTP headers to be sent with the PUT request. :type replace: bool :param replace: If this parameter is False, the method will first check to see if an object exists in the bucket with the same key. If it does, it won't overwrite it. The default value is True which will overwrite the object. :type cb: function :param cb: a callback function that will be called to report progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted to GS and the second representing the total number of bytes that need to be transmitted. :type num_cb: int :param num_cb: (optional) If a callback is specified with the cb parameter, this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer. :type policy: :class:`boto.gs.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in GS. :type md5: A tuple containing the hexdigest version of the MD5 checksum of the file as the first element and the Base64-encoded version of the plain checksum as the second element. This is the same format returned by the compute_md5 method. :param md5: If you need to compute the MD5 for any reason prior to upload, it's silly to have to do it twice so this param, if present, will be used as the MD5 values of the file. Otherwise, the checksum will be computed. :type res_upload_handler: ResumableUploadHandler :param res_upload_handler: If provided, this handler will perform the upload. :type size: int :param size: (optional) The Maximum number of bytes to read from the file pointer (fp). This is useful when uploading a file in multiple parts where you are splitting the file up into different ranges to be uploaded. If not specified, the default behaviour is to read all bytes from the file pointer. Less bytes may be available. Notes: 1. The "size" parameter currently cannot be used when a resumable upload handler is given but is still useful for uploading part of a file as implemented by the parent class. 2. At present Google Cloud Storage does not support multipart uploads. TODO: At some point we should refactor the Bucket and Key classes, to move functionality common to all providers into a parent class, and provider-specific functionality into subclasses (rather than just overriding/sharing code the way it currently works). """ provider = self.bucket.connection.provider if res_upload_handler and size: # could use size instead of file_length if provided but... raise BotoClientError('"size" param not supported for resumable uploads.') headers = headers or {} if policy: headers[provider.acl_header] = policy if hasattr(fp, 'name'): self.path = fp.name if self.bucket != None: if not md5: # compute_md5() and also set self.size to actual # size of the bytes read computing the md5. md5 = self.compute_md5(fp, size) # adjust size if required size = self.size elif size: self.size = size else: # If md5 is provided, still need to size so # calculate based on bytes to end of content spos = fp.tell() fp.seek(0, os.SEEK_END) self.size = fp.tell() - spos fp.seek(spos) size = self.size self.md5 = md5[0] self.base64md5 = md5[1] if self.name == None: self.name = self.md5 if not replace: if self.bucket.lookup(self.name): return if res_upload_handler: res_upload_handler.send_file(self, fp, headers, cb, num_cb) else: # Not a resumable transfer so use basic send_file mechanism. self.send_file(fp, headers, cb, num_cb, size=size)
def function[set_contents_from_file, parameter[self, fp, headers, replace, cb, num_cb, policy, md5, res_upload_handler, size]]: constant[ Store an object in GS using the name of the Key object as the key in GS and the contents of the file pointed to by 'fp' as the contents. :type fp: file :param fp: the file whose contents are to be uploaded :type headers: dict :param headers: additional HTTP headers to be sent with the PUT request. :type replace: bool :param replace: If this parameter is False, the method will first check to see if an object exists in the bucket with the same key. If it does, it won't overwrite it. The default value is True which will overwrite the object. :type cb: function :param cb: a callback function that will be called to report progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted to GS and the second representing the total number of bytes that need to be transmitted. :type num_cb: int :param num_cb: (optional) If a callback is specified with the cb parameter, this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer. :type policy: :class:`boto.gs.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in GS. :type md5: A tuple containing the hexdigest version of the MD5 checksum of the file as the first element and the Base64-encoded version of the plain checksum as the second element. This is the same format returned by the compute_md5 method. :param md5: If you need to compute the MD5 for any reason prior to upload, it's silly to have to do it twice so this param, if present, will be used as the MD5 values of the file. Otherwise, the checksum will be computed. :type res_upload_handler: ResumableUploadHandler :param res_upload_handler: If provided, this handler will perform the upload. :type size: int :param size: (optional) The Maximum number of bytes to read from the file pointer (fp). This is useful when uploading a file in multiple parts where you are splitting the file up into different ranges to be uploaded. If not specified, the default behaviour is to read all bytes from the file pointer. Less bytes may be available. Notes: 1. The "size" parameter currently cannot be used when a resumable upload handler is given but is still useful for uploading part of a file as implemented by the parent class. 2. At present Google Cloud Storage does not support multipart uploads. TODO: At some point we should refactor the Bucket and Key classes, to move functionality common to all providers into a parent class, and provider-specific functionality into subclasses (rather than just overriding/sharing code the way it currently works). ] variable[provider] assign[=] name[self].bucket.connection.provider if <ast.BoolOp object at 0x7da1b2661e40> begin[:] <ast.Raise object at 0x7da1b2661390> variable[headers] assign[=] <ast.BoolOp object at 0x7da1b2661630> if name[policy] begin[:] call[name[headers]][name[provider].acl_header] assign[=] name[policy] if call[name[hasattr], parameter[name[fp], constant[name]]] begin[:] name[self].path assign[=] name[fp].name if compare[name[self].bucket not_equal[!=] constant[None]] begin[:] if <ast.UnaryOp object at 0x7da1b2660430> begin[:] variable[md5] assign[=] call[name[self].compute_md5, parameter[name[fp], name[size]]] variable[size] assign[=] name[self].size name[self].md5 assign[=] call[name[md5]][constant[0]] name[self].base64md5 assign[=] call[name[md5]][constant[1]] if compare[name[self].name equal[==] constant[None]] begin[:] name[self].name assign[=] name[self].md5 if <ast.UnaryOp object at 0x7da20c6a9cc0> begin[:] if call[name[self].bucket.lookup, parameter[name[self].name]] begin[:] return[None] if name[res_upload_handler] begin[:] call[name[res_upload_handler].send_file, parameter[name[self], name[fp], name[headers], name[cb], name[num_cb]]]
keyword[def] identifier[set_contents_from_file] ( identifier[self] , identifier[fp] , identifier[headers] = keyword[None] , identifier[replace] = keyword[True] , identifier[cb] = keyword[None] , identifier[num_cb] = literal[int] , identifier[policy] = keyword[None] , identifier[md5] = keyword[None] , identifier[res_upload_handler] = keyword[None] , identifier[size] = keyword[None] ): literal[string] identifier[provider] = identifier[self] . identifier[bucket] . identifier[connection] . identifier[provider] keyword[if] identifier[res_upload_handler] keyword[and] identifier[size] : keyword[raise] identifier[BotoClientError] ( literal[string] ) identifier[headers] = identifier[headers] keyword[or] {} keyword[if] identifier[policy] : identifier[headers] [ identifier[provider] . identifier[acl_header] ]= identifier[policy] keyword[if] identifier[hasattr] ( identifier[fp] , literal[string] ): identifier[self] . identifier[path] = identifier[fp] . identifier[name] keyword[if] identifier[self] . identifier[bucket] != keyword[None] : keyword[if] keyword[not] identifier[md5] : identifier[md5] = identifier[self] . identifier[compute_md5] ( identifier[fp] , identifier[size] ) identifier[size] = identifier[self] . identifier[size] keyword[elif] identifier[size] : identifier[self] . identifier[size] = identifier[size] keyword[else] : identifier[spos] = identifier[fp] . identifier[tell] () identifier[fp] . identifier[seek] ( literal[int] , identifier[os] . identifier[SEEK_END] ) identifier[self] . identifier[size] = identifier[fp] . identifier[tell] ()- identifier[spos] identifier[fp] . identifier[seek] ( identifier[spos] ) identifier[size] = identifier[self] . identifier[size] identifier[self] . identifier[md5] = identifier[md5] [ literal[int] ] identifier[self] . identifier[base64md5] = identifier[md5] [ literal[int] ] keyword[if] identifier[self] . identifier[name] == keyword[None] : identifier[self] . identifier[name] = identifier[self] . identifier[md5] keyword[if] keyword[not] identifier[replace] : keyword[if] identifier[self] . identifier[bucket] . identifier[lookup] ( identifier[self] . identifier[name] ): keyword[return] keyword[if] identifier[res_upload_handler] : identifier[res_upload_handler] . identifier[send_file] ( identifier[self] , identifier[fp] , identifier[headers] , identifier[cb] , identifier[num_cb] ) keyword[else] : identifier[self] . identifier[send_file] ( identifier[fp] , identifier[headers] , identifier[cb] , identifier[num_cb] , identifier[size] = identifier[size] )
def set_contents_from_file(self, fp, headers=None, replace=True, cb=None, num_cb=10, policy=None, md5=None, res_upload_handler=None, size=None): """ Store an object in GS using the name of the Key object as the key in GS and the contents of the file pointed to by 'fp' as the contents. :type fp: file :param fp: the file whose contents are to be uploaded :type headers: dict :param headers: additional HTTP headers to be sent with the PUT request. :type replace: bool :param replace: If this parameter is False, the method will first check to see if an object exists in the bucket with the same key. If it does, it won't overwrite it. The default value is True which will overwrite the object. :type cb: function :param cb: a callback function that will be called to report progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted to GS and the second representing the total number of bytes that need to be transmitted. :type num_cb: int :param num_cb: (optional) If a callback is specified with the cb parameter, this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer. :type policy: :class:`boto.gs.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in GS. :type md5: A tuple containing the hexdigest version of the MD5 checksum of the file as the first element and the Base64-encoded version of the plain checksum as the second element. This is the same format returned by the compute_md5 method. :param md5: If you need to compute the MD5 for any reason prior to upload, it's silly to have to do it twice so this param, if present, will be used as the MD5 values of the file. Otherwise, the checksum will be computed. :type res_upload_handler: ResumableUploadHandler :param res_upload_handler: If provided, this handler will perform the upload. :type size: int :param size: (optional) The Maximum number of bytes to read from the file pointer (fp). This is useful when uploading a file in multiple parts where you are splitting the file up into different ranges to be uploaded. If not specified, the default behaviour is to read all bytes from the file pointer. Less bytes may be available. Notes: 1. The "size" parameter currently cannot be used when a resumable upload handler is given but is still useful for uploading part of a file as implemented by the parent class. 2. At present Google Cloud Storage does not support multipart uploads. TODO: At some point we should refactor the Bucket and Key classes, to move functionality common to all providers into a parent class, and provider-specific functionality into subclasses (rather than just overriding/sharing code the way it currently works). """ provider = self.bucket.connection.provider if res_upload_handler and size: # could use size instead of file_length if provided but... raise BotoClientError('"size" param not supported for resumable uploads.') # depends on [control=['if'], data=[]] headers = headers or {} if policy: headers[provider.acl_header] = policy # depends on [control=['if'], data=[]] if hasattr(fp, 'name'): self.path = fp.name # depends on [control=['if'], data=[]] if self.bucket != None: if not md5: # compute_md5() and also set self.size to actual # size of the bytes read computing the md5. md5 = self.compute_md5(fp, size) # adjust size if required size = self.size # depends on [control=['if'], data=[]] elif size: self.size = size # depends on [control=['if'], data=[]] else: # If md5 is provided, still need to size so # calculate based on bytes to end of content spos = fp.tell() fp.seek(0, os.SEEK_END) self.size = fp.tell() - spos fp.seek(spos) size = self.size self.md5 = md5[0] self.base64md5 = md5[1] if self.name == None: self.name = self.md5 # depends on [control=['if'], data=[]] if not replace: if self.bucket.lookup(self.name): return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if res_upload_handler: res_upload_handler.send_file(self, fp, headers, cb, num_cb) # depends on [control=['if'], data=[]] else: # Not a resumable transfer so use basic send_file mechanism. self.send_file(fp, headers, cb, num_cb, size=size) # depends on [control=['if'], data=[]]
def sentinel2_toa_cloud_mask(input_img): """Extract cloud mask from the Sentinel 2 TOA QA60 band Parameters ---------- input_img : ee.Image Image from the COPERNICUS/S2 collection with a QA60 band. Returns ------- ee.Image Notes ----- Output image is structured to be applied directly with updateMask() i.e. 0 is cloud, 1 is cloud free Bits 10: Opaque clouds present 11: Cirrus clouds present The Sentinel 2 TOA and SR cloud masks functions are currently identical References ---------- https://sentinel.esa.int/documents/247904/685211/Sentinel-2_User_Handbook https://sentinel.esa.int/web/sentinel/technical-guides/sentinel-2-msi/level-1c/cloud-masks """ qa_img = input_img.select(['QA60']) cloud_mask = qa_img.rightShift(10).bitwiseAnd(1).neq(0)\ .Or(qa_img.rightShift(11).bitwiseAnd(1).neq(0)) # Set cloudy pixels to 0 and clear to 1 return cloud_mask.Not()
def function[sentinel2_toa_cloud_mask, parameter[input_img]]: constant[Extract cloud mask from the Sentinel 2 TOA QA60 band Parameters ---------- input_img : ee.Image Image from the COPERNICUS/S2 collection with a QA60 band. Returns ------- ee.Image Notes ----- Output image is structured to be applied directly with updateMask() i.e. 0 is cloud, 1 is cloud free Bits 10: Opaque clouds present 11: Cirrus clouds present The Sentinel 2 TOA and SR cloud masks functions are currently identical References ---------- https://sentinel.esa.int/documents/247904/685211/Sentinel-2_User_Handbook https://sentinel.esa.int/web/sentinel/technical-guides/sentinel-2-msi/level-1c/cloud-masks ] variable[qa_img] assign[=] call[name[input_img].select, parameter[list[[<ast.Constant object at 0x7da1b23e4d90>]]]] variable[cloud_mask] assign[=] call[call[call[call[name[qa_img].rightShift, parameter[constant[10]]].bitwiseAnd, parameter[constant[1]]].neq, parameter[constant[0]]].Or, parameter[call[call[call[name[qa_img].rightShift, parameter[constant[11]]].bitwiseAnd, parameter[constant[1]]].neq, parameter[constant[0]]]]] return[call[name[cloud_mask].Not, parameter[]]]
keyword[def] identifier[sentinel2_toa_cloud_mask] ( identifier[input_img] ): literal[string] identifier[qa_img] = identifier[input_img] . identifier[select] ([ literal[string] ]) identifier[cloud_mask] = identifier[qa_img] . identifier[rightShift] ( literal[int] ). identifier[bitwiseAnd] ( literal[int] ). identifier[neq] ( literal[int] ). identifier[Or] ( identifier[qa_img] . identifier[rightShift] ( literal[int] ). identifier[bitwiseAnd] ( literal[int] ). identifier[neq] ( literal[int] )) keyword[return] identifier[cloud_mask] . identifier[Not] ()
def sentinel2_toa_cloud_mask(input_img): """Extract cloud mask from the Sentinel 2 TOA QA60 band Parameters ---------- input_img : ee.Image Image from the COPERNICUS/S2 collection with a QA60 band. Returns ------- ee.Image Notes ----- Output image is structured to be applied directly with updateMask() i.e. 0 is cloud, 1 is cloud free Bits 10: Opaque clouds present 11: Cirrus clouds present The Sentinel 2 TOA and SR cloud masks functions are currently identical References ---------- https://sentinel.esa.int/documents/247904/685211/Sentinel-2_User_Handbook https://sentinel.esa.int/web/sentinel/technical-guides/sentinel-2-msi/level-1c/cloud-masks """ qa_img = input_img.select(['QA60']) cloud_mask = qa_img.rightShift(10).bitwiseAnd(1).neq(0).Or(qa_img.rightShift(11).bitwiseAnd(1).neq(0)) # Set cloudy pixels to 0 and clear to 1 return cloud_mask.Not()
def get_grade_entry(self, grade_entry_id): """Gets the ``GradeEntry`` specified by its ``Id``. arg: grade_entry_id (osid.id.Id): ``Id`` of the ``GradeEntry`` return: (osid.grading.GradeEntry) - the grade entry raise: NotFound - ``grade_entry_id`` not found raise: NullArgument - ``grade_entry_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method is must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resource # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('grading', collection='GradeEntry', runtime=self._runtime) result = collection.find_one( dict({'_id': ObjectId(self._get_id(grade_entry_id, 'grading').get_identifier())}, **self._view_filter())) return objects.GradeEntry(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
def function[get_grade_entry, parameter[self, grade_entry_id]]: constant[Gets the ``GradeEntry`` specified by its ``Id``. arg: grade_entry_id (osid.id.Id): ``Id`` of the ``GradeEntry`` return: (osid.grading.GradeEntry) - the grade entry raise: NotFound - ``grade_entry_id`` not found raise: NullArgument - ``grade_entry_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method is must be implemented.* ] variable[collection] assign[=] call[name[JSONClientValidated], parameter[constant[grading]]] variable[result] assign[=] call[name[collection].find_one, parameter[call[name[dict], parameter[dictionary[[<ast.Constant object at 0x7da18f811a50>], [<ast.Call object at 0x7da18f8108e0>]]]]]] return[call[name[objects].GradeEntry, parameter[]]]
keyword[def] identifier[get_grade_entry] ( identifier[self] , identifier[grade_entry_id] ): literal[string] identifier[collection] = identifier[JSONClientValidated] ( literal[string] , identifier[collection] = literal[string] , identifier[runtime] = identifier[self] . identifier[_runtime] ) identifier[result] = identifier[collection] . identifier[find_one] ( identifier[dict] ({ literal[string] : identifier[ObjectId] ( identifier[self] . identifier[_get_id] ( identifier[grade_entry_id] , literal[string] ). identifier[get_identifier] ())}, ** identifier[self] . identifier[_view_filter] ())) keyword[return] identifier[objects] . identifier[GradeEntry] ( identifier[osid_object_map] = identifier[result] , identifier[runtime] = identifier[self] . identifier[_runtime] , identifier[proxy] = identifier[self] . identifier[_proxy] )
def get_grade_entry(self, grade_entry_id): """Gets the ``GradeEntry`` specified by its ``Id``. arg: grade_entry_id (osid.id.Id): ``Id`` of the ``GradeEntry`` return: (osid.grading.GradeEntry) - the grade entry raise: NotFound - ``grade_entry_id`` not found raise: NullArgument - ``grade_entry_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method is must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resource # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('grading', collection='GradeEntry', runtime=self._runtime) result = collection.find_one(dict({'_id': ObjectId(self._get_id(grade_entry_id, 'grading').get_identifier())}, **self._view_filter())) return objects.GradeEntry(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
def prepare_files(self, finder): """ Prepare process. Create temp directories, download and/or unpack files. """ from pip.index import Link unnamed = list(self.unnamed_requirements) reqs = list(self.requirements.values()) while reqs or unnamed: if unnamed: req_to_install = unnamed.pop(0) else: req_to_install = reqs.pop(0) install = True best_installed = False not_found = None # ############################################# # # # Search for archive to fulfill requirement # # # ############################################# # if not self.ignore_installed and not req_to_install.editable: req_to_install.check_if_exists() if req_to_install.satisfied_by: if self.upgrade: if not self.force_reinstall and not req_to_install.url: try: url = finder.find_requirement( req_to_install, self.upgrade) except BestVersionAlreadyInstalled: best_installed = True install = False except DistributionNotFound as exc: not_found = exc else: # Avoid the need to call find_requirement again req_to_install.url = url.url if not best_installed: # don't uninstall conflict if user install and # conflict is not user install if not (self.use_user_site and not dist_in_usersite( req_to_install.satisfied_by )): req_to_install.conflicts_with = \ req_to_install.satisfied_by req_to_install.satisfied_by = None else: install = False if req_to_install.satisfied_by: if best_installed: logger.info( 'Requirement already up-to-date: %s', req_to_install, ) else: logger.info( 'Requirement already satisfied (use --upgrade to ' 'upgrade): %s', req_to_install, ) if req_to_install.editable: logger.info('Obtaining %s', req_to_install) elif install: if (req_to_install.url and req_to_install.url.lower().startswith('file:')): path = url_to_path(req_to_install.url) logger.info('Processing %s', display_path(path)) else: logger.info('Collecting %s', req_to_install) with indent_log(): # ################################ # # # vcs update or unpack archive # # # ################################ # is_wheel = False if req_to_install.editable: if req_to_install.source_dir is None: location = req_to_install.build_location(self.src_dir) req_to_install.source_dir = location else: location = req_to_install.source_dir if not os.path.exists(self.build_dir): _make_build_dir(self.build_dir) req_to_install.update_editable(not self.is_download) if self.is_download: req_to_install.run_egg_info() req_to_install.archive(self.download_dir) else: req_to_install.run_egg_info() elif install: # @@ if filesystem packages are not marked # editable in a req, a non deterministic error # occurs when the script attempts to unpack the # build directory # NB: This call can result in the creation of a temporary # build directory location = req_to_install.build_location( self.build_dir, ) unpack = True url = None # If a checkout exists, it's unwise to keep going. version # inconsistencies are logged later, but do not fail the # installation. if os.path.exists(os.path.join(location, 'setup.py')): raise PreviousBuildDirError( "pip can't proceed with requirements '%s' due to a" " pre-existing build directory (%s). This is " "likely due to a previous installation that failed" ". pip is being responsible and not assuming it " "can delete this. Please delete it and try again." % (req_to_install, location) ) else: # FIXME: this won't upgrade when there's an existing # package unpacked in `location` if req_to_install.url is None: if not_found: raise not_found url = finder.find_requirement( req_to_install, upgrade=self.upgrade, ) else: # FIXME: should req_to_install.url already be a # link? url = Link(req_to_install.url) assert url if url: try: if ( url.filename.endswith(wheel_ext) and self.wheel_download_dir ): # when doing 'pip wheel` download_dir = self.wheel_download_dir do_download = True else: download_dir = self.download_dir do_download = self.is_download unpack_url( url, location, download_dir, do_download, session=self.session, ) except requests.HTTPError as exc: logger.critical( 'Could not install requirement %s because ' 'of error %s', req_to_install, exc, ) raise InstallationError( 'Could not install requirement %s because ' 'of HTTP error %s for URL %s' % (req_to_install, exc, url) ) else: unpack = False if unpack: is_wheel = url and url.filename.endswith(wheel_ext) if self.is_download: req_to_install.source_dir = location if not is_wheel: # FIXME:https://github.com/pypa/pip/issues/1112 req_to_install.run_egg_info() if url and url.scheme in vcs.all_schemes: req_to_install.archive(self.download_dir) elif is_wheel: req_to_install.source_dir = location req_to_install.url = url.url else: req_to_install.source_dir = location req_to_install.run_egg_info() req_to_install.assert_source_matches_version() # req_to_install.req is only avail after unpack for URL # pkgs repeat check_if_exists to uninstall-on-upgrade # (#14) if not self.ignore_installed: req_to_install.check_if_exists() if req_to_install.satisfied_by: if self.upgrade or self.ignore_installed: # don't uninstall conflict if user install and # conflict is not user install if not (self.use_user_site and not dist_in_usersite( req_to_install.satisfied_by)): req_to_install.conflicts_with = \ req_to_install.satisfied_by req_to_install.satisfied_by = None else: logger.info( 'Requirement already satisfied (use ' '--upgrade to upgrade): %s', req_to_install, ) install = False # ###################### # # # parse dependencies # # # ###################### # if (req_to_install.extras): logger.debug( "Installing extra requirements: %r", ','.join(req_to_install.extras), ) if is_wheel: dist = list( pkg_resources.find_distributions(location) )[0] else: # sdists if req_to_install.satisfied_by: dist = req_to_install.satisfied_by else: dist = req_to_install.get_dist() # FIXME: shouldn't be globally added: if dist.has_metadata('dependency_links.txt'): finder.add_dependency_links( dist.get_metadata_lines('dependency_links.txt') ) if not self.ignore_dependencies: for subreq in dist.requires( req_to_install.extras): if self.has_requirement( subreq.project_name): # FIXME: check for conflict continue subreq = InstallRequirement( str(subreq), req_to_install, isolated=self.isolated, ) reqs.append(subreq) self.add_requirement(subreq) if not self.has_requirement(req_to_install.name): # 'unnamed' requirements will get added here self.add_requirement(req_to_install) # cleanup tmp src if (self.is_download or req_to_install._temp_build_dir is not None): self.reqs_to_cleanup.append(req_to_install) if install: self.successfully_downloaded.append(req_to_install)
def function[prepare_files, parameter[self, finder]]: constant[ Prepare process. Create temp directories, download and/or unpack files. ] from relative_module[pip.index] import module[Link] variable[unnamed] assign[=] call[name[list], parameter[name[self].unnamed_requirements]] variable[reqs] assign[=] call[name[list], parameter[call[name[self].requirements.values, parameter[]]]] while <ast.BoolOp object at 0x7da1b26ad1b0> begin[:] if name[unnamed] begin[:] variable[req_to_install] assign[=] call[name[unnamed].pop, parameter[constant[0]]] variable[install] assign[=] constant[True] variable[best_installed] assign[=] constant[False] variable[not_found] assign[=] constant[None] if <ast.BoolOp object at 0x7da1b26ad6c0> begin[:] call[name[req_to_install].check_if_exists, parameter[]] if name[req_to_install].satisfied_by begin[:] if name[self].upgrade begin[:] if <ast.BoolOp object at 0x7da1b26ad150> begin[:] <ast.Try object at 0x7da1b021ded0> if <ast.UnaryOp object at 0x7da1b021e410> begin[:] if <ast.UnaryOp object at 0x7da1b021cf70> begin[:] name[req_to_install].conflicts_with assign[=] name[req_to_install].satisfied_by name[req_to_install].satisfied_by assign[=] constant[None] if name[req_to_install].satisfied_by begin[:] if name[best_installed] begin[:] call[name[logger].info, parameter[constant[Requirement already up-to-date: %s], name[req_to_install]]] if name[req_to_install].editable begin[:] call[name[logger].info, parameter[constant[Obtaining %s], name[req_to_install]]] with call[name[indent_log], parameter[]] begin[:] variable[is_wheel] assign[=] constant[False] if name[req_to_install].editable begin[:] if compare[name[req_to_install].source_dir is constant[None]] begin[:] variable[location] assign[=] call[name[req_to_install].build_location, parameter[name[self].src_dir]] name[req_to_install].source_dir assign[=] name[location] if <ast.UnaryOp object at 0x7da1b021de40> begin[:] call[name[_make_build_dir], parameter[name[self].build_dir]] call[name[req_to_install].update_editable, parameter[<ast.UnaryOp object at 0x7da1b021e890>]] if name[self].is_download begin[:] call[name[req_to_install].run_egg_info, parameter[]] call[name[req_to_install].archive, parameter[name[self].download_dir]] if name[req_to_install].extras begin[:] call[name[logger].debug, parameter[constant[Installing extra requirements: %r], call[constant[,].join, parameter[name[req_to_install].extras]]]] if name[is_wheel] begin[:] variable[dist] assign[=] call[call[name[list], parameter[call[name[pkg_resources].find_distributions, parameter[name[location]]]]]][constant[0]] if <ast.UnaryOp object at 0x7da18f811f60> begin[:] for taget[name[subreq]] in starred[call[name[dist].requires, parameter[name[req_to_install].extras]]] begin[:] if call[name[self].has_requirement, parameter[name[subreq].project_name]] begin[:] continue variable[subreq] assign[=] call[name[InstallRequirement], parameter[call[name[str], parameter[name[subreq]]], name[req_to_install]]] call[name[reqs].append, parameter[name[subreq]]] call[name[self].add_requirement, parameter[name[subreq]]] if <ast.UnaryOp object at 0x7da18f810190> begin[:] call[name[self].add_requirement, parameter[name[req_to_install]]] if <ast.BoolOp object at 0x7da18f8127a0> begin[:] call[name[self].reqs_to_cleanup.append, parameter[name[req_to_install]]] if name[install] begin[:] call[name[self].successfully_downloaded.append, parameter[name[req_to_install]]]
keyword[def] identifier[prepare_files] ( identifier[self] , identifier[finder] ): literal[string] keyword[from] identifier[pip] . identifier[index] keyword[import] identifier[Link] identifier[unnamed] = identifier[list] ( identifier[self] . identifier[unnamed_requirements] ) identifier[reqs] = identifier[list] ( identifier[self] . identifier[requirements] . identifier[values] ()) keyword[while] identifier[reqs] keyword[or] identifier[unnamed] : keyword[if] identifier[unnamed] : identifier[req_to_install] = identifier[unnamed] . identifier[pop] ( literal[int] ) keyword[else] : identifier[req_to_install] = identifier[reqs] . identifier[pop] ( literal[int] ) identifier[install] = keyword[True] identifier[best_installed] = keyword[False] identifier[not_found] = keyword[None] keyword[if] keyword[not] identifier[self] . identifier[ignore_installed] keyword[and] keyword[not] identifier[req_to_install] . identifier[editable] : identifier[req_to_install] . identifier[check_if_exists] () keyword[if] identifier[req_to_install] . identifier[satisfied_by] : keyword[if] identifier[self] . identifier[upgrade] : keyword[if] keyword[not] identifier[self] . identifier[force_reinstall] keyword[and] keyword[not] identifier[req_to_install] . identifier[url] : keyword[try] : identifier[url] = identifier[finder] . identifier[find_requirement] ( identifier[req_to_install] , identifier[self] . identifier[upgrade] ) keyword[except] identifier[BestVersionAlreadyInstalled] : identifier[best_installed] = keyword[True] identifier[install] = keyword[False] keyword[except] identifier[DistributionNotFound] keyword[as] identifier[exc] : identifier[not_found] = identifier[exc] keyword[else] : identifier[req_to_install] . identifier[url] = identifier[url] . identifier[url] keyword[if] keyword[not] identifier[best_installed] : keyword[if] keyword[not] ( identifier[self] . identifier[use_user_site] keyword[and] keyword[not] identifier[dist_in_usersite] ( identifier[req_to_install] . identifier[satisfied_by] )): identifier[req_to_install] . identifier[conflicts_with] = identifier[req_to_install] . identifier[satisfied_by] identifier[req_to_install] . identifier[satisfied_by] = keyword[None] keyword[else] : identifier[install] = keyword[False] keyword[if] identifier[req_to_install] . identifier[satisfied_by] : keyword[if] identifier[best_installed] : identifier[logger] . identifier[info] ( literal[string] , identifier[req_to_install] , ) keyword[else] : identifier[logger] . identifier[info] ( literal[string] literal[string] , identifier[req_to_install] , ) keyword[if] identifier[req_to_install] . identifier[editable] : identifier[logger] . identifier[info] ( literal[string] , identifier[req_to_install] ) keyword[elif] identifier[install] : keyword[if] ( identifier[req_to_install] . identifier[url] keyword[and] identifier[req_to_install] . identifier[url] . identifier[lower] (). identifier[startswith] ( literal[string] )): identifier[path] = identifier[url_to_path] ( identifier[req_to_install] . identifier[url] ) identifier[logger] . identifier[info] ( literal[string] , identifier[display_path] ( identifier[path] )) keyword[else] : identifier[logger] . identifier[info] ( literal[string] , identifier[req_to_install] ) keyword[with] identifier[indent_log] (): identifier[is_wheel] = keyword[False] keyword[if] identifier[req_to_install] . identifier[editable] : keyword[if] identifier[req_to_install] . identifier[source_dir] keyword[is] keyword[None] : identifier[location] = identifier[req_to_install] . identifier[build_location] ( identifier[self] . identifier[src_dir] ) identifier[req_to_install] . identifier[source_dir] = identifier[location] keyword[else] : identifier[location] = identifier[req_to_install] . identifier[source_dir] keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[build_dir] ): identifier[_make_build_dir] ( identifier[self] . identifier[build_dir] ) identifier[req_to_install] . identifier[update_editable] ( keyword[not] identifier[self] . identifier[is_download] ) keyword[if] identifier[self] . identifier[is_download] : identifier[req_to_install] . identifier[run_egg_info] () identifier[req_to_install] . identifier[archive] ( identifier[self] . identifier[download_dir] ) keyword[else] : identifier[req_to_install] . identifier[run_egg_info] () keyword[elif] identifier[install] : identifier[location] = identifier[req_to_install] . identifier[build_location] ( identifier[self] . identifier[build_dir] , ) identifier[unpack] = keyword[True] identifier[url] = keyword[None] keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[join] ( identifier[location] , literal[string] )): keyword[raise] identifier[PreviousBuildDirError] ( literal[string] literal[string] literal[string] literal[string] literal[string] %( identifier[req_to_install] , identifier[location] ) ) keyword[else] : keyword[if] identifier[req_to_install] . identifier[url] keyword[is] keyword[None] : keyword[if] identifier[not_found] : keyword[raise] identifier[not_found] identifier[url] = identifier[finder] . identifier[find_requirement] ( identifier[req_to_install] , identifier[upgrade] = identifier[self] . identifier[upgrade] , ) keyword[else] : identifier[url] = identifier[Link] ( identifier[req_to_install] . identifier[url] ) keyword[assert] identifier[url] keyword[if] identifier[url] : keyword[try] : keyword[if] ( identifier[url] . identifier[filename] . identifier[endswith] ( identifier[wheel_ext] ) keyword[and] identifier[self] . identifier[wheel_download_dir] ): identifier[download_dir] = identifier[self] . identifier[wheel_download_dir] identifier[do_download] = keyword[True] keyword[else] : identifier[download_dir] = identifier[self] . identifier[download_dir] identifier[do_download] = identifier[self] . identifier[is_download] identifier[unpack_url] ( identifier[url] , identifier[location] , identifier[download_dir] , identifier[do_download] , identifier[session] = identifier[self] . identifier[session] , ) keyword[except] identifier[requests] . identifier[HTTPError] keyword[as] identifier[exc] : identifier[logger] . identifier[critical] ( literal[string] literal[string] , identifier[req_to_install] , identifier[exc] , ) keyword[raise] identifier[InstallationError] ( literal[string] literal[string] % ( identifier[req_to_install] , identifier[exc] , identifier[url] ) ) keyword[else] : identifier[unpack] = keyword[False] keyword[if] identifier[unpack] : identifier[is_wheel] = identifier[url] keyword[and] identifier[url] . identifier[filename] . identifier[endswith] ( identifier[wheel_ext] ) keyword[if] identifier[self] . identifier[is_download] : identifier[req_to_install] . identifier[source_dir] = identifier[location] keyword[if] keyword[not] identifier[is_wheel] : identifier[req_to_install] . identifier[run_egg_info] () keyword[if] identifier[url] keyword[and] identifier[url] . identifier[scheme] keyword[in] identifier[vcs] . identifier[all_schemes] : identifier[req_to_install] . identifier[archive] ( identifier[self] . identifier[download_dir] ) keyword[elif] identifier[is_wheel] : identifier[req_to_install] . identifier[source_dir] = identifier[location] identifier[req_to_install] . identifier[url] = identifier[url] . identifier[url] keyword[else] : identifier[req_to_install] . identifier[source_dir] = identifier[location] identifier[req_to_install] . identifier[run_egg_info] () identifier[req_to_install] . identifier[assert_source_matches_version] () keyword[if] keyword[not] identifier[self] . identifier[ignore_installed] : identifier[req_to_install] . identifier[check_if_exists] () keyword[if] identifier[req_to_install] . identifier[satisfied_by] : keyword[if] identifier[self] . identifier[upgrade] keyword[or] identifier[self] . identifier[ignore_installed] : keyword[if] keyword[not] ( identifier[self] . identifier[use_user_site] keyword[and] keyword[not] identifier[dist_in_usersite] ( identifier[req_to_install] . identifier[satisfied_by] )): identifier[req_to_install] . identifier[conflicts_with] = identifier[req_to_install] . identifier[satisfied_by] identifier[req_to_install] . identifier[satisfied_by] = keyword[None] keyword[else] : identifier[logger] . identifier[info] ( literal[string] literal[string] , identifier[req_to_install] , ) identifier[install] = keyword[False] keyword[if] ( identifier[req_to_install] . identifier[extras] ): identifier[logger] . identifier[debug] ( literal[string] , literal[string] . identifier[join] ( identifier[req_to_install] . identifier[extras] ), ) keyword[if] identifier[is_wheel] : identifier[dist] = identifier[list] ( identifier[pkg_resources] . identifier[find_distributions] ( identifier[location] ) )[ literal[int] ] keyword[else] : keyword[if] identifier[req_to_install] . identifier[satisfied_by] : identifier[dist] = identifier[req_to_install] . identifier[satisfied_by] keyword[else] : identifier[dist] = identifier[req_to_install] . identifier[get_dist] () keyword[if] identifier[dist] . identifier[has_metadata] ( literal[string] ): identifier[finder] . identifier[add_dependency_links] ( identifier[dist] . identifier[get_metadata_lines] ( literal[string] ) ) keyword[if] keyword[not] identifier[self] . identifier[ignore_dependencies] : keyword[for] identifier[subreq] keyword[in] identifier[dist] . identifier[requires] ( identifier[req_to_install] . identifier[extras] ): keyword[if] identifier[self] . identifier[has_requirement] ( identifier[subreq] . identifier[project_name] ): keyword[continue] identifier[subreq] = identifier[InstallRequirement] ( identifier[str] ( identifier[subreq] ), identifier[req_to_install] , identifier[isolated] = identifier[self] . identifier[isolated] , ) identifier[reqs] . identifier[append] ( identifier[subreq] ) identifier[self] . identifier[add_requirement] ( identifier[subreq] ) keyword[if] keyword[not] identifier[self] . identifier[has_requirement] ( identifier[req_to_install] . identifier[name] ): identifier[self] . identifier[add_requirement] ( identifier[req_to_install] ) keyword[if] ( identifier[self] . identifier[is_download] keyword[or] identifier[req_to_install] . identifier[_temp_build_dir] keyword[is] keyword[not] keyword[None] ): identifier[self] . identifier[reqs_to_cleanup] . identifier[append] ( identifier[req_to_install] ) keyword[if] identifier[install] : identifier[self] . identifier[successfully_downloaded] . identifier[append] ( identifier[req_to_install] )
def prepare_files(self, finder): """ Prepare process. Create temp directories, download and/or unpack files. """ from pip.index import Link unnamed = list(self.unnamed_requirements) reqs = list(self.requirements.values()) while reqs or unnamed: if unnamed: req_to_install = unnamed.pop(0) # depends on [control=['if'], data=[]] else: req_to_install = reqs.pop(0) install = True best_installed = False not_found = None # ############################################# # # # Search for archive to fulfill requirement # # # ############################################# # if not self.ignore_installed and (not req_to_install.editable): req_to_install.check_if_exists() if req_to_install.satisfied_by: if self.upgrade: if not self.force_reinstall and (not req_to_install.url): try: url = finder.find_requirement(req_to_install, self.upgrade) # depends on [control=['try'], data=[]] except BestVersionAlreadyInstalled: best_installed = True install = False # depends on [control=['except'], data=[]] except DistributionNotFound as exc: not_found = exc # depends on [control=['except'], data=['exc']] else: # Avoid the need to call find_requirement again req_to_install.url = url.url # depends on [control=['if'], data=[]] if not best_installed: # don't uninstall conflict if user install and # conflict is not user install if not (self.use_user_site and (not dist_in_usersite(req_to_install.satisfied_by))): req_to_install.conflicts_with = req_to_install.satisfied_by # depends on [control=['if'], data=[]] req_to_install.satisfied_by = None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: install = False # depends on [control=['if'], data=[]] if req_to_install.satisfied_by: if best_installed: logger.info('Requirement already up-to-date: %s', req_to_install) # depends on [control=['if'], data=[]] else: logger.info('Requirement already satisfied (use --upgrade to upgrade): %s', req_to_install) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if req_to_install.editable: logger.info('Obtaining %s', req_to_install) # depends on [control=['if'], data=[]] elif install: if req_to_install.url and req_to_install.url.lower().startswith('file:'): path = url_to_path(req_to_install.url) logger.info('Processing %s', display_path(path)) # depends on [control=['if'], data=[]] else: logger.info('Collecting %s', req_to_install) # depends on [control=['if'], data=[]] with indent_log(): # ################################ # # # vcs update or unpack archive # # # ################################ # is_wheel = False if req_to_install.editable: if req_to_install.source_dir is None: location = req_to_install.build_location(self.src_dir) req_to_install.source_dir = location # depends on [control=['if'], data=[]] else: location = req_to_install.source_dir if not os.path.exists(self.build_dir): _make_build_dir(self.build_dir) # depends on [control=['if'], data=[]] req_to_install.update_editable(not self.is_download) if self.is_download: req_to_install.run_egg_info() req_to_install.archive(self.download_dir) # depends on [control=['if'], data=[]] else: req_to_install.run_egg_info() # depends on [control=['if'], data=[]] elif install: # @@ if filesystem packages are not marked # editable in a req, a non deterministic error # occurs when the script attempts to unpack the # build directory # NB: This call can result in the creation of a temporary # build directory location = req_to_install.build_location(self.build_dir) unpack = True url = None # If a checkout exists, it's unwise to keep going. version # inconsistencies are logged later, but do not fail the # installation. if os.path.exists(os.path.join(location, 'setup.py')): raise PreviousBuildDirError("pip can't proceed with requirements '%s' due to a pre-existing build directory (%s). This is likely due to a previous installation that failed. pip is being responsible and not assuming it can delete this. Please delete it and try again." % (req_to_install, location)) # depends on [control=['if'], data=[]] else: # FIXME: this won't upgrade when there's an existing # package unpacked in `location` if req_to_install.url is None: if not_found: raise not_found # depends on [control=['if'], data=[]] url = finder.find_requirement(req_to_install, upgrade=self.upgrade) # depends on [control=['if'], data=[]] else: # FIXME: should req_to_install.url already be a # link? url = Link(req_to_install.url) assert url if url: try: if url.filename.endswith(wheel_ext) and self.wheel_download_dir: # when doing 'pip wheel` download_dir = self.wheel_download_dir do_download = True # depends on [control=['if'], data=[]] else: download_dir = self.download_dir do_download = self.is_download unpack_url(url, location, download_dir, do_download, session=self.session) # depends on [control=['try'], data=[]] except requests.HTTPError as exc: logger.critical('Could not install requirement %s because of error %s', req_to_install, exc) raise InstallationError('Could not install requirement %s because of HTTP error %s for URL %s' % (req_to_install, exc, url)) # depends on [control=['except'], data=['exc']] # depends on [control=['if'], data=[]] else: unpack = False if unpack: is_wheel = url and url.filename.endswith(wheel_ext) if self.is_download: req_to_install.source_dir = location if not is_wheel: # FIXME:https://github.com/pypa/pip/issues/1112 req_to_install.run_egg_info() # depends on [control=['if'], data=[]] if url and url.scheme in vcs.all_schemes: req_to_install.archive(self.download_dir) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif is_wheel: req_to_install.source_dir = location req_to_install.url = url.url # depends on [control=['if'], data=[]] else: req_to_install.source_dir = location req_to_install.run_egg_info() req_to_install.assert_source_matches_version() # req_to_install.req is only avail after unpack for URL # pkgs repeat check_if_exists to uninstall-on-upgrade # (#14) if not self.ignore_installed: req_to_install.check_if_exists() # depends on [control=['if'], data=[]] if req_to_install.satisfied_by: if self.upgrade or self.ignore_installed: # don't uninstall conflict if user install and # conflict is not user install if not (self.use_user_site and (not dist_in_usersite(req_to_install.satisfied_by))): req_to_install.conflicts_with = req_to_install.satisfied_by # depends on [control=['if'], data=[]] req_to_install.satisfied_by = None # depends on [control=['if'], data=[]] else: logger.info('Requirement already satisfied (use --upgrade to upgrade): %s', req_to_install) install = False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # ###################### # # # parse dependencies # # # ###################### # if req_to_install.extras: logger.debug('Installing extra requirements: %r', ','.join(req_to_install.extras)) # depends on [control=['if'], data=[]] if is_wheel: dist = list(pkg_resources.find_distributions(location))[0] # depends on [control=['if'], data=[]] else: # sdists if req_to_install.satisfied_by: dist = req_to_install.satisfied_by # depends on [control=['if'], data=[]] else: dist = req_to_install.get_dist() # FIXME: shouldn't be globally added: if dist.has_metadata('dependency_links.txt'): finder.add_dependency_links(dist.get_metadata_lines('dependency_links.txt')) # depends on [control=['if'], data=[]] if not self.ignore_dependencies: for subreq in dist.requires(req_to_install.extras): if self.has_requirement(subreq.project_name): # FIXME: check for conflict continue # depends on [control=['if'], data=[]] subreq = InstallRequirement(str(subreq), req_to_install, isolated=self.isolated) reqs.append(subreq) self.add_requirement(subreq) # depends on [control=['for'], data=['subreq']] # depends on [control=['if'], data=[]] if not self.has_requirement(req_to_install.name): # 'unnamed' requirements will get added here self.add_requirement(req_to_install) # depends on [control=['if'], data=[]] # cleanup tmp src if self.is_download or req_to_install._temp_build_dir is not None: self.reqs_to_cleanup.append(req_to_install) # depends on [control=['if'], data=[]] if install: self.successfully_downloaded.append(req_to_install) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]] # depends on [control=['while'], data=[]]
def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): """Generate samples of the encoded frames with possible extra data. By default this function just encodes the numpy array returned as "frame" from `self.generate_samples` into a PNG image. Override this function to get other encodings on disk. Args: data_dir: final data directory. Typically only used in this method to copy over user-supplied vocab files if there are extra fields needing them. tmp_dir: temporary directory that you can use for downloading and scratch. dataset_split: problem.DatasetSplit, which data split to generate samples for (for example, training and evaluation). Yields: Sample: dict<str feature_name, feature value> which is in disk encoding. Raises: ValueError: if the frame has a different number of channels than required. """ writer = None with tf.Graph().as_default(): image_t = tf.placeholder(dtype=tf.uint8, shape=(None, None, None)) encoded_image_t = tf.image.encode_png(image_t) with tf.Session() as sess: for features in self.generate_samples(data_dir, tmp_dir, dataset_split): unencoded_frame = features.pop("frame") self.validate_frame(unencoded_frame) height, width, _ = unencoded_frame.shape encoded_frame = sess.run( encoded_image_t, feed_dict={image_t: unencoded_frame}) features["image/encoded"] = [encoded_frame] features["image/format"] = ["png"] features["image/height"] = [height] features["image/width"] = [width] has_debug_image = "image/debug" in features if has_debug_image: unencoded_debug = features.pop("image/debug") encoded_debug = sess.run( encoded_image_t, feed_dict={image_t: unencoded_debug}) features["image/encoded_debug"] = [encoded_debug] if self.debug_dump_frames_path: # Defer creating debug writer until we know debug_dump_frames_path. if writer is None: if not tf.gfile.Exists(self.debug_dump_frames_path): tf.gfile.MkDir(self.debug_dump_frames_path) writer = debug_video_writer_factory(self.debug_dump_frames_path) img = unencoded_debug if has_debug_image else unencoded_frame encoded_img = encoded_debug if has_debug_image else encoded_frame writer.write(img, encoded_img) yield features if self.debug_dump_frames_path: writer.finish_to_disk()
def function[generate_encoded_samples, parameter[self, data_dir, tmp_dir, dataset_split]]: constant[Generate samples of the encoded frames with possible extra data. By default this function just encodes the numpy array returned as "frame" from `self.generate_samples` into a PNG image. Override this function to get other encodings on disk. Args: data_dir: final data directory. Typically only used in this method to copy over user-supplied vocab files if there are extra fields needing them. tmp_dir: temporary directory that you can use for downloading and scratch. dataset_split: problem.DatasetSplit, which data split to generate samples for (for example, training and evaluation). Yields: Sample: dict<str feature_name, feature value> which is in disk encoding. Raises: ValueError: if the frame has a different number of channels than required. ] variable[writer] assign[=] constant[None] with call[call[name[tf].Graph, parameter[]].as_default, parameter[]] begin[:] variable[image_t] assign[=] call[name[tf].placeholder, parameter[]] variable[encoded_image_t] assign[=] call[name[tf].image.encode_png, parameter[name[image_t]]] with call[name[tf].Session, parameter[]] begin[:] for taget[name[features]] in starred[call[name[self].generate_samples, parameter[name[data_dir], name[tmp_dir], name[dataset_split]]]] begin[:] variable[unencoded_frame] assign[=] call[name[features].pop, parameter[constant[frame]]] call[name[self].validate_frame, parameter[name[unencoded_frame]]] <ast.Tuple object at 0x7da204346c80> assign[=] name[unencoded_frame].shape variable[encoded_frame] assign[=] call[name[sess].run, parameter[name[encoded_image_t]]] call[name[features]][constant[image/encoded]] assign[=] list[[<ast.Name object at 0x7da2043450c0>]] call[name[features]][constant[image/format]] assign[=] list[[<ast.Constant object at 0x7da204345a80>]] call[name[features]][constant[image/height]] assign[=] list[[<ast.Name object at 0x7da204346770>]] call[name[features]][constant[image/width]] assign[=] list[[<ast.Name object at 0x7da204345840>]] variable[has_debug_image] assign[=] compare[constant[image/debug] in name[features]] if name[has_debug_image] begin[:] variable[unencoded_debug] assign[=] call[name[features].pop, parameter[constant[image/debug]]] variable[encoded_debug] assign[=] call[name[sess].run, parameter[name[encoded_image_t]]] call[name[features]][constant[image/encoded_debug]] assign[=] list[[<ast.Name object at 0x7da204344490>]] if name[self].debug_dump_frames_path begin[:] if compare[name[writer] is constant[None]] begin[:] if <ast.UnaryOp object at 0x7da204347fd0> begin[:] call[name[tf].gfile.MkDir, parameter[name[self].debug_dump_frames_path]] variable[writer] assign[=] call[name[debug_video_writer_factory], parameter[name[self].debug_dump_frames_path]] variable[img] assign[=] <ast.IfExp object at 0x7da2043460b0> variable[encoded_img] assign[=] <ast.IfExp object at 0x7da204346950> call[name[writer].write, parameter[name[img], name[encoded_img]]] <ast.Yield object at 0x7da204345f30> if name[self].debug_dump_frames_path begin[:] call[name[writer].finish_to_disk, parameter[]]
keyword[def] identifier[generate_encoded_samples] ( identifier[self] , identifier[data_dir] , identifier[tmp_dir] , identifier[dataset_split] ): literal[string] identifier[writer] = keyword[None] keyword[with] identifier[tf] . identifier[Graph] (). identifier[as_default] (): identifier[image_t] = identifier[tf] . identifier[placeholder] ( identifier[dtype] = identifier[tf] . identifier[uint8] , identifier[shape] =( keyword[None] , keyword[None] , keyword[None] )) identifier[encoded_image_t] = identifier[tf] . identifier[image] . identifier[encode_png] ( identifier[image_t] ) keyword[with] identifier[tf] . identifier[Session] () keyword[as] identifier[sess] : keyword[for] identifier[features] keyword[in] identifier[self] . identifier[generate_samples] ( identifier[data_dir] , identifier[tmp_dir] , identifier[dataset_split] ): identifier[unencoded_frame] = identifier[features] . identifier[pop] ( literal[string] ) identifier[self] . identifier[validate_frame] ( identifier[unencoded_frame] ) identifier[height] , identifier[width] , identifier[_] = identifier[unencoded_frame] . identifier[shape] identifier[encoded_frame] = identifier[sess] . identifier[run] ( identifier[encoded_image_t] , identifier[feed_dict] ={ identifier[image_t] : identifier[unencoded_frame] }) identifier[features] [ literal[string] ]=[ identifier[encoded_frame] ] identifier[features] [ literal[string] ]=[ literal[string] ] identifier[features] [ literal[string] ]=[ identifier[height] ] identifier[features] [ literal[string] ]=[ identifier[width] ] identifier[has_debug_image] = literal[string] keyword[in] identifier[features] keyword[if] identifier[has_debug_image] : identifier[unencoded_debug] = identifier[features] . identifier[pop] ( literal[string] ) identifier[encoded_debug] = identifier[sess] . identifier[run] ( identifier[encoded_image_t] , identifier[feed_dict] ={ identifier[image_t] : identifier[unencoded_debug] }) identifier[features] [ literal[string] ]=[ identifier[encoded_debug] ] keyword[if] identifier[self] . identifier[debug_dump_frames_path] : keyword[if] identifier[writer] keyword[is] keyword[None] : keyword[if] keyword[not] identifier[tf] . identifier[gfile] . identifier[Exists] ( identifier[self] . identifier[debug_dump_frames_path] ): identifier[tf] . identifier[gfile] . identifier[MkDir] ( identifier[self] . identifier[debug_dump_frames_path] ) identifier[writer] = identifier[debug_video_writer_factory] ( identifier[self] . identifier[debug_dump_frames_path] ) identifier[img] = identifier[unencoded_debug] keyword[if] identifier[has_debug_image] keyword[else] identifier[unencoded_frame] identifier[encoded_img] = identifier[encoded_debug] keyword[if] identifier[has_debug_image] keyword[else] identifier[encoded_frame] identifier[writer] . identifier[write] ( identifier[img] , identifier[encoded_img] ) keyword[yield] identifier[features] keyword[if] identifier[self] . identifier[debug_dump_frames_path] : identifier[writer] . identifier[finish_to_disk] ()
def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): """Generate samples of the encoded frames with possible extra data. By default this function just encodes the numpy array returned as "frame" from `self.generate_samples` into a PNG image. Override this function to get other encodings on disk. Args: data_dir: final data directory. Typically only used in this method to copy over user-supplied vocab files if there are extra fields needing them. tmp_dir: temporary directory that you can use for downloading and scratch. dataset_split: problem.DatasetSplit, which data split to generate samples for (for example, training and evaluation). Yields: Sample: dict<str feature_name, feature value> which is in disk encoding. Raises: ValueError: if the frame has a different number of channels than required. """ writer = None with tf.Graph().as_default(): image_t = tf.placeholder(dtype=tf.uint8, shape=(None, None, None)) encoded_image_t = tf.image.encode_png(image_t) with tf.Session() as sess: for features in self.generate_samples(data_dir, tmp_dir, dataset_split): unencoded_frame = features.pop('frame') self.validate_frame(unencoded_frame) (height, width, _) = unencoded_frame.shape encoded_frame = sess.run(encoded_image_t, feed_dict={image_t: unencoded_frame}) features['image/encoded'] = [encoded_frame] features['image/format'] = ['png'] features['image/height'] = [height] features['image/width'] = [width] has_debug_image = 'image/debug' in features if has_debug_image: unencoded_debug = features.pop('image/debug') encoded_debug = sess.run(encoded_image_t, feed_dict={image_t: unencoded_debug}) features['image/encoded_debug'] = [encoded_debug] # depends on [control=['if'], data=[]] if self.debug_dump_frames_path: # Defer creating debug writer until we know debug_dump_frames_path. if writer is None: if not tf.gfile.Exists(self.debug_dump_frames_path): tf.gfile.MkDir(self.debug_dump_frames_path) # depends on [control=['if'], data=[]] writer = debug_video_writer_factory(self.debug_dump_frames_path) # depends on [control=['if'], data=['writer']] img = unencoded_debug if has_debug_image else unencoded_frame encoded_img = encoded_debug if has_debug_image else encoded_frame writer.write(img, encoded_img) # depends on [control=['if'], data=[]] yield features # depends on [control=['for'], data=['features']] # depends on [control=['with'], data=['sess']] # depends on [control=['with'], data=[]] if self.debug_dump_frames_path: writer.finish_to_disk() # depends on [control=['if'], data=[]]
def compile_rename_column(self, blueprint, command, connection): """ Compile a rename column command. :param blueprint: The blueprint :type blueprint: Blueprint :param command: The command :type command: Fluent :param connection: The connection :type connection: orator.connections.Connection :rtype: list """ table = self.get_table_prefix() + blueprint.get_table() column = self.wrap(command.from_) return "ALTER TABLE %s RENAME COLUMN %s TO %s" % ( table, column, self.wrap(command.to), )
def function[compile_rename_column, parameter[self, blueprint, command, connection]]: constant[ Compile a rename column command. :param blueprint: The blueprint :type blueprint: Blueprint :param command: The command :type command: Fluent :param connection: The connection :type connection: orator.connections.Connection :rtype: list ] variable[table] assign[=] binary_operation[call[name[self].get_table_prefix, parameter[]] + call[name[blueprint].get_table, parameter[]]] variable[column] assign[=] call[name[self].wrap, parameter[name[command].from_]] return[binary_operation[constant[ALTER TABLE %s RENAME COLUMN %s TO %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18eb56200>, <ast.Name object at 0x7da18eb555d0>, <ast.Call object at 0x7da18eb57ac0>]]]]
keyword[def] identifier[compile_rename_column] ( identifier[self] , identifier[blueprint] , identifier[command] , identifier[connection] ): literal[string] identifier[table] = identifier[self] . identifier[get_table_prefix] ()+ identifier[blueprint] . identifier[get_table] () identifier[column] = identifier[self] . identifier[wrap] ( identifier[command] . identifier[from_] ) keyword[return] literal[string] %( identifier[table] , identifier[column] , identifier[self] . identifier[wrap] ( identifier[command] . identifier[to] ), )
def compile_rename_column(self, blueprint, command, connection): """ Compile a rename column command. :param blueprint: The blueprint :type blueprint: Blueprint :param command: The command :type command: Fluent :param connection: The connection :type connection: orator.connections.Connection :rtype: list """ table = self.get_table_prefix() + blueprint.get_table() column = self.wrap(command.from_) return 'ALTER TABLE %s RENAME COLUMN %s TO %s' % (table, column, self.wrap(command.to))
def remove_timezone(cls, timestr): """Completely remove timezone information, if any. :return: the new string if timezone was found, `None` otherwise """ if re.match(r".*[\-+]?\d{2}:\d{2}$", timestr): return re.sub( r"(.*)(\s[\+-]?\d\d:\d\d)$", r"\1", timestr )
def function[remove_timezone, parameter[cls, timestr]]: constant[Completely remove timezone information, if any. :return: the new string if timezone was found, `None` otherwise ] if call[name[re].match, parameter[constant[.*[\-+]?\d{2}:\d{2}$], name[timestr]]] begin[:] return[call[name[re].sub, parameter[constant[(.*)(\s[\+-]?\d\d:\d\d)$], constant[\1], name[timestr]]]]
keyword[def] identifier[remove_timezone] ( identifier[cls] , identifier[timestr] ): literal[string] keyword[if] identifier[re] . identifier[match] ( literal[string] , identifier[timestr] ): keyword[return] identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[timestr] )
def remove_timezone(cls, timestr): """Completely remove timezone information, if any. :return: the new string if timezone was found, `None` otherwise """ if re.match('.*[\\-+]?\\d{2}:\\d{2}$', timestr): return re.sub('(.*)(\\s[\\+-]?\\d\\d:\\d\\d)$', '\\1', timestr) # depends on [control=['if'], data=[]]
def imread(filename, masked = False): """Convenience function that uses the QImage_ constructor to read an image from the given file and return an `rgb_view` of the result. This is intentionally similar to scipy.ndimage.imread (which uses PIL), scipy.misc.imread, or matplotlib.pyplot.imread (using PIL for non-PNGs). For grayscale images, return 2D array (even if it comes from a 32-bit representation; this is a consequence of the QImage API). For images with an alpha channel, the resulting number of channels will be 2 (grayscale+alpha) or 4 (RGB+alpha). Alternatively, one may pass `masked = True` in order to get `masked arrays`_ back. Note that only fully transparent pixels are masked (and that masked arrays only support binary masks). The value of `masked` is ignored when the loaded image has no alpha channel (i.e., one would not get a masked array in that case). This function has been added in version 1.3. """ qImage = _qt.QImage(filename) if qImage.isNull(): raise IOError('loading %r failed' % filename) isGray = qImage.isGrayscale() if isGray and qImage.depth() == 8: return byte_view(qImage)[...,0] hasAlpha = qImage.hasAlphaChannel() if hasAlpha: targetFormat = _qt.QImage.Format_ARGB32 else: targetFormat = _qt.QImage.Format_RGB32 if qImage.format() != targetFormat: qImage = qImage.convertToFormat(targetFormat) result = rgb_view(qImage) if isGray: result = result[...,0] if hasAlpha: if masked: mask = (alpha_view(qImage) == 0) if _np.ndim(result) == 3: mask = _np.repeat(mask[...,None], 3, axis = 2) result = _np.ma.masked_array(result, mask) else: result = _np.dstack((result, alpha_view(qImage))) return result
def function[imread, parameter[filename, masked]]: constant[Convenience function that uses the QImage_ constructor to read an image from the given file and return an `rgb_view` of the result. This is intentionally similar to scipy.ndimage.imread (which uses PIL), scipy.misc.imread, or matplotlib.pyplot.imread (using PIL for non-PNGs). For grayscale images, return 2D array (even if it comes from a 32-bit representation; this is a consequence of the QImage API). For images with an alpha channel, the resulting number of channels will be 2 (grayscale+alpha) or 4 (RGB+alpha). Alternatively, one may pass `masked = True` in order to get `masked arrays`_ back. Note that only fully transparent pixels are masked (and that masked arrays only support binary masks). The value of `masked` is ignored when the loaded image has no alpha channel (i.e., one would not get a masked array in that case). This function has been added in version 1.3. ] variable[qImage] assign[=] call[name[_qt].QImage, parameter[name[filename]]] if call[name[qImage].isNull, parameter[]] begin[:] <ast.Raise object at 0x7da18f58c580> variable[isGray] assign[=] call[name[qImage].isGrayscale, parameter[]] if <ast.BoolOp object at 0x7da18f58f610> begin[:] return[call[call[name[byte_view], parameter[name[qImage]]]][tuple[[<ast.Constant object at 0x7da18f58e9b0>, <ast.Constant object at 0x7da18f58ec20>]]]] variable[hasAlpha] assign[=] call[name[qImage].hasAlphaChannel, parameter[]] if name[hasAlpha] begin[:] variable[targetFormat] assign[=] name[_qt].QImage.Format_ARGB32 if compare[call[name[qImage].format, parameter[]] not_equal[!=] name[targetFormat]] begin[:] variable[qImage] assign[=] call[name[qImage].convertToFormat, parameter[name[targetFormat]]] variable[result] assign[=] call[name[rgb_view], parameter[name[qImage]]] if name[isGray] begin[:] variable[result] assign[=] call[name[result]][tuple[[<ast.Constant object at 0x7da18f8133a0>, <ast.Constant object at 0x7da18f8111b0>]]] if name[hasAlpha] begin[:] if name[masked] begin[:] variable[mask] assign[=] compare[call[name[alpha_view], parameter[name[qImage]]] equal[==] constant[0]] if compare[call[name[_np].ndim, parameter[name[result]]] equal[==] constant[3]] begin[:] variable[mask] assign[=] call[name[_np].repeat, parameter[call[name[mask]][tuple[[<ast.Constant object at 0x7da20e9560e0>, <ast.Constant object at 0x7da20e955240>]]], constant[3]]] variable[result] assign[=] call[name[_np].ma.masked_array, parameter[name[result], name[mask]]] return[name[result]]
keyword[def] identifier[imread] ( identifier[filename] , identifier[masked] = keyword[False] ): literal[string] identifier[qImage] = identifier[_qt] . identifier[QImage] ( identifier[filename] ) keyword[if] identifier[qImage] . identifier[isNull] (): keyword[raise] identifier[IOError] ( literal[string] % identifier[filename] ) identifier[isGray] = identifier[qImage] . identifier[isGrayscale] () keyword[if] identifier[isGray] keyword[and] identifier[qImage] . identifier[depth] ()== literal[int] : keyword[return] identifier[byte_view] ( identifier[qImage] )[..., literal[int] ] identifier[hasAlpha] = identifier[qImage] . identifier[hasAlphaChannel] () keyword[if] identifier[hasAlpha] : identifier[targetFormat] = identifier[_qt] . identifier[QImage] . identifier[Format_ARGB32] keyword[else] : identifier[targetFormat] = identifier[_qt] . identifier[QImage] . identifier[Format_RGB32] keyword[if] identifier[qImage] . identifier[format] ()!= identifier[targetFormat] : identifier[qImage] = identifier[qImage] . identifier[convertToFormat] ( identifier[targetFormat] ) identifier[result] = identifier[rgb_view] ( identifier[qImage] ) keyword[if] identifier[isGray] : identifier[result] = identifier[result] [..., literal[int] ] keyword[if] identifier[hasAlpha] : keyword[if] identifier[masked] : identifier[mask] =( identifier[alpha_view] ( identifier[qImage] )== literal[int] ) keyword[if] identifier[_np] . identifier[ndim] ( identifier[result] )== literal[int] : identifier[mask] = identifier[_np] . identifier[repeat] ( identifier[mask] [..., keyword[None] ], literal[int] , identifier[axis] = literal[int] ) identifier[result] = identifier[_np] . identifier[ma] . identifier[masked_array] ( identifier[result] , identifier[mask] ) keyword[else] : identifier[result] = identifier[_np] . identifier[dstack] (( identifier[result] , identifier[alpha_view] ( identifier[qImage] ))) keyword[return] identifier[result]
def imread(filename, masked=False): """Convenience function that uses the QImage_ constructor to read an image from the given file and return an `rgb_view` of the result. This is intentionally similar to scipy.ndimage.imread (which uses PIL), scipy.misc.imread, or matplotlib.pyplot.imread (using PIL for non-PNGs). For grayscale images, return 2D array (even if it comes from a 32-bit representation; this is a consequence of the QImage API). For images with an alpha channel, the resulting number of channels will be 2 (grayscale+alpha) or 4 (RGB+alpha). Alternatively, one may pass `masked = True` in order to get `masked arrays`_ back. Note that only fully transparent pixels are masked (and that masked arrays only support binary masks). The value of `masked` is ignored when the loaded image has no alpha channel (i.e., one would not get a masked array in that case). This function has been added in version 1.3. """ qImage = _qt.QImage(filename) if qImage.isNull(): raise IOError('loading %r failed' % filename) # depends on [control=['if'], data=[]] isGray = qImage.isGrayscale() if isGray and qImage.depth() == 8: return byte_view(qImage)[..., 0] # depends on [control=['if'], data=[]] hasAlpha = qImage.hasAlphaChannel() if hasAlpha: targetFormat = _qt.QImage.Format_ARGB32 # depends on [control=['if'], data=[]] else: targetFormat = _qt.QImage.Format_RGB32 if qImage.format() != targetFormat: qImage = qImage.convertToFormat(targetFormat) # depends on [control=['if'], data=['targetFormat']] result = rgb_view(qImage) if isGray: result = result[..., 0] # depends on [control=['if'], data=[]] if hasAlpha: if masked: mask = alpha_view(qImage) == 0 if _np.ndim(result) == 3: mask = _np.repeat(mask[..., None], 3, axis=2) # depends on [control=['if'], data=[]] result = _np.ma.masked_array(result, mask) # depends on [control=['if'], data=[]] else: result = _np.dstack((result, alpha_view(qImage))) # depends on [control=['if'], data=[]] return result
def setup(self): """ We need to use socket._fileobject Because SSL.Connection doesn't have a 'dup'. Not exactly sure WHY this is, but this is backed up by comments in socket.py and SSL/connection.c """ self.connection = self.request # for doPOST self.rfile = socket._fileobject(self.request, "rb", self.rbufsize) self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
def function[setup, parameter[self]]: constant[ We need to use socket._fileobject Because SSL.Connection doesn't have a 'dup'. Not exactly sure WHY this is, but this is backed up by comments in socket.py and SSL/connection.c ] name[self].connection assign[=] name[self].request name[self].rfile assign[=] call[name[socket]._fileobject, parameter[name[self].request, constant[rb], name[self].rbufsize]] name[self].wfile assign[=] call[name[socket]._fileobject, parameter[name[self].request, constant[wb], name[self].wbufsize]]
keyword[def] identifier[setup] ( identifier[self] ): literal[string] identifier[self] . identifier[connection] = identifier[self] . identifier[request] identifier[self] . identifier[rfile] = identifier[socket] . identifier[_fileobject] ( identifier[self] . identifier[request] , literal[string] , identifier[self] . identifier[rbufsize] ) identifier[self] . identifier[wfile] = identifier[socket] . identifier[_fileobject] ( identifier[self] . identifier[request] , literal[string] , identifier[self] . identifier[wbufsize] )
def setup(self): """ We need to use socket._fileobject Because SSL.Connection doesn't have a 'dup'. Not exactly sure WHY this is, but this is backed up by comments in socket.py and SSL/connection.c """ self.connection = self.request # for doPOST self.rfile = socket._fileobject(self.request, 'rb', self.rbufsize) self.wfile = socket._fileobject(self.request, 'wb', self.wbufsize)
def add_rect(img, box, color=None, thickness=1): """ Draws a bounding box inside the image. :param img: Input image :param box: Box object that defines the bounding box. :param color: Color of the box :param thickness: Thickness of line :return: Rectangle added image """ if color is None: color = COL_GRAY box = box.to_int() cv.rectangle(img, box.top_left(), box.bottom_right(), color, thickness)
def function[add_rect, parameter[img, box, color, thickness]]: constant[ Draws a bounding box inside the image. :param img: Input image :param box: Box object that defines the bounding box. :param color: Color of the box :param thickness: Thickness of line :return: Rectangle added image ] if compare[name[color] is constant[None]] begin[:] variable[color] assign[=] name[COL_GRAY] variable[box] assign[=] call[name[box].to_int, parameter[]] call[name[cv].rectangle, parameter[name[img], call[name[box].top_left, parameter[]], call[name[box].bottom_right, parameter[]], name[color], name[thickness]]]
keyword[def] identifier[add_rect] ( identifier[img] , identifier[box] , identifier[color] = keyword[None] , identifier[thickness] = literal[int] ): literal[string] keyword[if] identifier[color] keyword[is] keyword[None] : identifier[color] = identifier[COL_GRAY] identifier[box] = identifier[box] . identifier[to_int] () identifier[cv] . identifier[rectangle] ( identifier[img] , identifier[box] . identifier[top_left] (), identifier[box] . identifier[bottom_right] (), identifier[color] , identifier[thickness] )
def add_rect(img, box, color=None, thickness=1): """ Draws a bounding box inside the image. :param img: Input image :param box: Box object that defines the bounding box. :param color: Color of the box :param thickness: Thickness of line :return: Rectangle added image """ if color is None: color = COL_GRAY # depends on [control=['if'], data=['color']] box = box.to_int() cv.rectangle(img, box.top_left(), box.bottom_right(), color, thickness)
def reset_attr_by_path(self, field_path): """ It restores original values for fields looked up by field path. Field path is dot-formatted string path: ``parent_field.child_field``. :param field_path: field path. It allows ``*`` as wildcard. :type field_path: str """ fields, next_field = self._get_fields_by_path(field_path) for field in fields: if next_field: try: self.get_field_value(field).reset_attr_by_path(next_field) except AttributeError: pass else: self.reset_field_value(field)
def function[reset_attr_by_path, parameter[self, field_path]]: constant[ It restores original values for fields looked up by field path. Field path is dot-formatted string path: ``parent_field.child_field``. :param field_path: field path. It allows ``*`` as wildcard. :type field_path: str ] <ast.Tuple object at 0x7da1b0aa7910> assign[=] call[name[self]._get_fields_by_path, parameter[name[field_path]]] for taget[name[field]] in starred[name[fields]] begin[:] if name[next_field] begin[:] <ast.Try object at 0x7da1b0aa4a30>
keyword[def] identifier[reset_attr_by_path] ( identifier[self] , identifier[field_path] ): literal[string] identifier[fields] , identifier[next_field] = identifier[self] . identifier[_get_fields_by_path] ( identifier[field_path] ) keyword[for] identifier[field] keyword[in] identifier[fields] : keyword[if] identifier[next_field] : keyword[try] : identifier[self] . identifier[get_field_value] ( identifier[field] ). identifier[reset_attr_by_path] ( identifier[next_field] ) keyword[except] identifier[AttributeError] : keyword[pass] keyword[else] : identifier[self] . identifier[reset_field_value] ( identifier[field] )
def reset_attr_by_path(self, field_path): """ It restores original values for fields looked up by field path. Field path is dot-formatted string path: ``parent_field.child_field``. :param field_path: field path. It allows ``*`` as wildcard. :type field_path: str """ (fields, next_field) = self._get_fields_by_path(field_path) for field in fields: if next_field: try: self.get_field_value(field).reset_attr_by_path(next_field) # depends on [control=['try'], data=[]] except AttributeError: pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: self.reset_field_value(field) # depends on [control=['for'], data=['field']]
def _find_executable_task_instances(self, simple_dag_bag, states, session=None): """ Finds TIs that are ready for execution with respect to pool limits, dag concurrency, executor state, and priority. :param simple_dag_bag: TaskInstances associated with DAGs in the simple_dag_bag will be fetched from the DB and executed :type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag :param executor: the executor that runs task instances :type executor: BaseExecutor :param states: Execute TaskInstances in these states :type states: tuple[airflow.utils.state.State] :return: list[airflow.models.TaskInstance] """ executable_tis = [] # Get all task instances associated with scheduled # DagRuns which are not backfilled, in the given states, # and the dag is not paused TI = models.TaskInstance DR = models.DagRun DM = models.DagModel ti_query = ( session .query(TI) .filter(TI.dag_id.in_(simple_dag_bag.dag_ids)) .outerjoin( DR, and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date) ) .filter(or_(DR.run_id == None, # noqa: E711 not_(DR.run_id.like(BackfillJob.ID_PREFIX + '%')))) .outerjoin(DM, DM.dag_id == TI.dag_id) .filter(or_(DM.dag_id == None, # noqa: E711 not_(DM.is_paused))) ) # Additional filters on task instance state if None in states: ti_query = ti_query.filter( or_(TI.state == None, TI.state.in_(states)) # noqa: E711 ) else: ti_query = ti_query.filter(TI.state.in_(states)) task_instances_to_examine = ti_query.all() if len(task_instances_to_examine) == 0: self.log.debug("No tasks to consider for execution.") return executable_tis # Put one task instance on each line task_instance_str = "\n\t".join( [repr(x) for x in task_instances_to_examine]) self.log.info( "%s tasks up for execution:\n\t%s", len(task_instances_to_examine), task_instance_str ) # Get the pool settings pools = {p.pool: p for p in session.query(models.Pool).all()} pool_to_task_instances = defaultdict(list) for task_instance in task_instances_to_examine: pool_to_task_instances[task_instance.pool].append(task_instance) states_to_count_as_running = [State.RUNNING, State.QUEUED] # dag_id to # of running tasks and (dag_id, task_id) to # of running tasks. dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps( states=states_to_count_as_running, session=session) # Go through each pool, and queue up a task for execution if there are # any open slots in the pool. for pool, task_instances in pool_to_task_instances.items(): pool_name = pool if not pool: # Arbitrary: # If queued outside of a pool, trigger no more than # non_pooled_task_slot_count open_slots = models.Pool.default_pool_open_slots() pool_name = models.Pool.default_pool_name else: if pool not in pools: self.log.warning( "Tasks using non-existent pool '%s' will not be scheduled", pool ) open_slots = 0 else: open_slots = pools[pool].open_slots(session=session) num_ready = len(task_instances) self.log.info( "Figuring out tasks to run in Pool(name=%s) with %s open slots " "and %s task instances ready to be queued", pool, open_slots, num_ready ) priority_sorted_task_instances = sorted( task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date)) # Number of tasks that cannot be scheduled because of no open slot in pool num_starving_tasks = 0 for current_index, task_instance in enumerate(priority_sorted_task_instances): if open_slots <= 0: self.log.info( "Not scheduling since there are %s open slots in pool %s", open_slots, pool ) # Can't schedule any more since there are no more open slots. num_starving_tasks = len(priority_sorted_task_instances) - current_index break # Check to make sure that the task concurrency of the DAG hasn't been # reached. dag_id = task_instance.dag_id simple_dag = simple_dag_bag.get_dag(dag_id) current_dag_concurrency = dag_concurrency_map[dag_id] dag_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency self.log.info( "DAG %s has %s/%s running and queued tasks", dag_id, current_dag_concurrency, dag_concurrency_limit ) if current_dag_concurrency >= dag_concurrency_limit: self.log.info( "Not executing %s since the number of tasks running or queued " "from DAG %s is >= to the DAG's task concurrency limit of %s", task_instance, dag_id, dag_concurrency_limit ) continue task_concurrency_limit = simple_dag.get_task_special_arg( task_instance.task_id, 'task_concurrency') if task_concurrency_limit is not None: current_task_concurrency = task_concurrency_map[ (task_instance.dag_id, task_instance.task_id) ] if current_task_concurrency >= task_concurrency_limit: self.log.info("Not executing %s since the task concurrency for" " this task has been reached.", task_instance) continue if self.executor.has_task(task_instance): self.log.debug( "Not handling task %s as the executor reports it is running", task_instance.key ) continue executable_tis.append(task_instance) open_slots -= 1 dag_concurrency_map[dag_id] += 1 task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1 Stats.gauge('pool.starving_tasks.{pool_name}'.format(pool_name=pool_name), num_starving_tasks) task_instance_str = "\n\t".join( [repr(x) for x in executable_tis]) self.log.info( "Setting the following tasks to queued state:\n\t%s", task_instance_str) # so these dont expire on commit for ti in executable_tis: copy_dag_id = ti.dag_id copy_execution_date = ti.execution_date copy_task_id = ti.task_id make_transient(ti) ti.dag_id = copy_dag_id ti.execution_date = copy_execution_date ti.task_id = copy_task_id return executable_tis
def function[_find_executable_task_instances, parameter[self, simple_dag_bag, states, session]]: constant[ Finds TIs that are ready for execution with respect to pool limits, dag concurrency, executor state, and priority. :param simple_dag_bag: TaskInstances associated with DAGs in the simple_dag_bag will be fetched from the DB and executed :type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag :param executor: the executor that runs task instances :type executor: BaseExecutor :param states: Execute TaskInstances in these states :type states: tuple[airflow.utils.state.State] :return: list[airflow.models.TaskInstance] ] variable[executable_tis] assign[=] list[[]] variable[TI] assign[=] name[models].TaskInstance variable[DR] assign[=] name[models].DagRun variable[DM] assign[=] name[models].DagModel variable[ti_query] assign[=] call[call[call[call[call[call[name[session].query, parameter[name[TI]]].filter, parameter[call[name[TI].dag_id.in_, parameter[name[simple_dag_bag].dag_ids]]]].outerjoin, parameter[name[DR], call[name[and_], parameter[compare[name[DR].dag_id equal[==] name[TI].dag_id], compare[name[DR].execution_date equal[==] name[TI].execution_date]]]]].filter, parameter[call[name[or_], parameter[compare[name[DR].run_id equal[==] constant[None]], call[name[not_], parameter[call[name[DR].run_id.like, parameter[binary_operation[name[BackfillJob].ID_PREFIX + constant[%]]]]]]]]]].outerjoin, parameter[name[DM], compare[name[DM].dag_id equal[==] name[TI].dag_id]]].filter, parameter[call[name[or_], parameter[compare[name[DM].dag_id equal[==] constant[None]], call[name[not_], parameter[name[DM].is_paused]]]]]] if compare[constant[None] in name[states]] begin[:] variable[ti_query] assign[=] call[name[ti_query].filter, parameter[call[name[or_], parameter[compare[name[TI].state equal[==] constant[None]], call[name[TI].state.in_, parameter[name[states]]]]]]] variable[task_instances_to_examine] assign[=] call[name[ti_query].all, parameter[]] if compare[call[name[len], parameter[name[task_instances_to_examine]]] equal[==] constant[0]] begin[:] call[name[self].log.debug, parameter[constant[No tasks to consider for execution.]]] return[name[executable_tis]] variable[task_instance_str] assign[=] call[constant[ ].join, parameter[<ast.ListComp object at 0x7da1b0371db0>]] call[name[self].log.info, parameter[constant[%s tasks up for execution: %s], call[name[len], parameter[name[task_instances_to_examine]]], name[task_instance_str]]] variable[pools] assign[=] <ast.DictComp object at 0x7da1b03719c0> variable[pool_to_task_instances] assign[=] call[name[defaultdict], parameter[name[list]]] for taget[name[task_instance]] in starred[name[task_instances_to_examine]] begin[:] call[call[name[pool_to_task_instances]][name[task_instance].pool].append, parameter[name[task_instance]]] variable[states_to_count_as_running] assign[=] list[[<ast.Attribute object at 0x7da1b0371360>, <ast.Attribute object at 0x7da1b0371300>]] <ast.Tuple object at 0x7da1b0371270> assign[=] call[name[self].__get_concurrency_maps, parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b0371030>, <ast.Name object at 0x7da1b0371000>]]] in starred[call[name[pool_to_task_instances].items, parameter[]]] begin[:] variable[pool_name] assign[=] name[pool] if <ast.UnaryOp object at 0x7da1b0370e80> begin[:] variable[open_slots] assign[=] call[name[models].Pool.default_pool_open_slots, parameter[]] variable[pool_name] assign[=] name[models].Pool.default_pool_name variable[num_ready] assign[=] call[name[len], parameter[name[task_instances]]] call[name[self].log.info, parameter[constant[Figuring out tasks to run in Pool(name=%s) with %s open slots and %s task instances ready to be queued], name[pool], name[open_slots], name[num_ready]]] variable[priority_sorted_task_instances] assign[=] call[name[sorted], parameter[name[task_instances]]] variable[num_starving_tasks] assign[=] constant[0] for taget[tuple[[<ast.Name object at 0x7da1b0370100>, <ast.Name object at 0x7da1b03700d0>]]] in starred[call[name[enumerate], parameter[name[priority_sorted_task_instances]]]] begin[:] if compare[name[open_slots] less_or_equal[<=] constant[0]] begin[:] call[name[self].log.info, parameter[constant[Not scheduling since there are %s open slots in pool %s], name[open_slots], name[pool]]] variable[num_starving_tasks] assign[=] binary_operation[call[name[len], parameter[name[priority_sorted_task_instances]]] - name[current_index]] break variable[dag_id] assign[=] name[task_instance].dag_id variable[simple_dag] assign[=] call[name[simple_dag_bag].get_dag, parameter[name[dag_id]]] variable[current_dag_concurrency] assign[=] call[name[dag_concurrency_map]][name[dag_id]] variable[dag_concurrency_limit] assign[=] call[name[simple_dag_bag].get_dag, parameter[name[dag_id]]].concurrency call[name[self].log.info, parameter[constant[DAG %s has %s/%s running and queued tasks], name[dag_id], name[current_dag_concurrency], name[dag_concurrency_limit]]] if compare[name[current_dag_concurrency] greater_or_equal[>=] name[dag_concurrency_limit]] begin[:] call[name[self].log.info, parameter[constant[Not executing %s since the number of tasks running or queued from DAG %s is >= to the DAG's task concurrency limit of %s], name[task_instance], name[dag_id], name[dag_concurrency_limit]]] continue variable[task_concurrency_limit] assign[=] call[name[simple_dag].get_task_special_arg, parameter[name[task_instance].task_id, constant[task_concurrency]]] if compare[name[task_concurrency_limit] is_not constant[None]] begin[:] variable[current_task_concurrency] assign[=] call[name[task_concurrency_map]][tuple[[<ast.Attribute object at 0x7da18bcc8a60>, <ast.Attribute object at 0x7da18bcc8970>]]] if compare[name[current_task_concurrency] greater_or_equal[>=] name[task_concurrency_limit]] begin[:] call[name[self].log.info, parameter[constant[Not executing %s since the task concurrency for this task has been reached.], name[task_instance]]] continue if call[name[self].executor.has_task, parameter[name[task_instance]]] begin[:] call[name[self].log.debug, parameter[constant[Not handling task %s as the executor reports it is running], name[task_instance].key]] continue call[name[executable_tis].append, parameter[name[task_instance]]] <ast.AugAssign object at 0x7da204344520> <ast.AugAssign object at 0x7da2043448e0> <ast.AugAssign object at 0x7da204346f20> call[name[Stats].gauge, parameter[call[constant[pool.starving_tasks.{pool_name}].format, parameter[]], name[num_starving_tasks]]] variable[task_instance_str] assign[=] call[constant[ ].join, parameter[<ast.ListComp object at 0x7da1b034baf0>]] call[name[self].log.info, parameter[constant[Setting the following tasks to queued state: %s], name[task_instance_str]]] for taget[name[ti]] in starred[name[executable_tis]] begin[:] variable[copy_dag_id] assign[=] name[ti].dag_id variable[copy_execution_date] assign[=] name[ti].execution_date variable[copy_task_id] assign[=] name[ti].task_id call[name[make_transient], parameter[name[ti]]] name[ti].dag_id assign[=] name[copy_dag_id] name[ti].execution_date assign[=] name[copy_execution_date] name[ti].task_id assign[=] name[copy_task_id] return[name[executable_tis]]
keyword[def] identifier[_find_executable_task_instances] ( identifier[self] , identifier[simple_dag_bag] , identifier[states] , identifier[session] = keyword[None] ): literal[string] identifier[executable_tis] =[] identifier[TI] = identifier[models] . identifier[TaskInstance] identifier[DR] = identifier[models] . identifier[DagRun] identifier[DM] = identifier[models] . identifier[DagModel] identifier[ti_query] =( identifier[session] . identifier[query] ( identifier[TI] ) . identifier[filter] ( identifier[TI] . identifier[dag_id] . identifier[in_] ( identifier[simple_dag_bag] . identifier[dag_ids] )) . identifier[outerjoin] ( identifier[DR] , identifier[and_] ( identifier[DR] . identifier[dag_id] == identifier[TI] . identifier[dag_id] , identifier[DR] . identifier[execution_date] == identifier[TI] . identifier[execution_date] ) ) . identifier[filter] ( identifier[or_] ( identifier[DR] . identifier[run_id] == keyword[None] , identifier[not_] ( identifier[DR] . identifier[run_id] . identifier[like] ( identifier[BackfillJob] . identifier[ID_PREFIX] + literal[string] )))) . identifier[outerjoin] ( identifier[DM] , identifier[DM] . identifier[dag_id] == identifier[TI] . identifier[dag_id] ) . identifier[filter] ( identifier[or_] ( identifier[DM] . identifier[dag_id] == keyword[None] , identifier[not_] ( identifier[DM] . identifier[is_paused] ))) ) keyword[if] keyword[None] keyword[in] identifier[states] : identifier[ti_query] = identifier[ti_query] . identifier[filter] ( identifier[or_] ( identifier[TI] . identifier[state] == keyword[None] , identifier[TI] . identifier[state] . identifier[in_] ( identifier[states] )) ) keyword[else] : identifier[ti_query] = identifier[ti_query] . identifier[filter] ( identifier[TI] . identifier[state] . identifier[in_] ( identifier[states] )) identifier[task_instances_to_examine] = identifier[ti_query] . identifier[all] () keyword[if] identifier[len] ( identifier[task_instances_to_examine] )== literal[int] : identifier[self] . identifier[log] . identifier[debug] ( literal[string] ) keyword[return] identifier[executable_tis] identifier[task_instance_str] = literal[string] . identifier[join] ( [ identifier[repr] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[task_instances_to_examine] ]) identifier[self] . identifier[log] . identifier[info] ( literal[string] , identifier[len] ( identifier[task_instances_to_examine] ), identifier[task_instance_str] ) identifier[pools] ={ identifier[p] . identifier[pool] : identifier[p] keyword[for] identifier[p] keyword[in] identifier[session] . identifier[query] ( identifier[models] . identifier[Pool] ). identifier[all] ()} identifier[pool_to_task_instances] = identifier[defaultdict] ( identifier[list] ) keyword[for] identifier[task_instance] keyword[in] identifier[task_instances_to_examine] : identifier[pool_to_task_instances] [ identifier[task_instance] . identifier[pool] ]. identifier[append] ( identifier[task_instance] ) identifier[states_to_count_as_running] =[ identifier[State] . identifier[RUNNING] , identifier[State] . identifier[QUEUED] ] identifier[dag_concurrency_map] , identifier[task_concurrency_map] = identifier[self] . identifier[__get_concurrency_maps] ( identifier[states] = identifier[states_to_count_as_running] , identifier[session] = identifier[session] ) keyword[for] identifier[pool] , identifier[task_instances] keyword[in] identifier[pool_to_task_instances] . identifier[items] (): identifier[pool_name] = identifier[pool] keyword[if] keyword[not] identifier[pool] : identifier[open_slots] = identifier[models] . identifier[Pool] . identifier[default_pool_open_slots] () identifier[pool_name] = identifier[models] . identifier[Pool] . identifier[default_pool_name] keyword[else] : keyword[if] identifier[pool] keyword[not] keyword[in] identifier[pools] : identifier[self] . identifier[log] . identifier[warning] ( literal[string] , identifier[pool] ) identifier[open_slots] = literal[int] keyword[else] : identifier[open_slots] = identifier[pools] [ identifier[pool] ]. identifier[open_slots] ( identifier[session] = identifier[session] ) identifier[num_ready] = identifier[len] ( identifier[task_instances] ) identifier[self] . identifier[log] . identifier[info] ( literal[string] literal[string] , identifier[pool] , identifier[open_slots] , identifier[num_ready] ) identifier[priority_sorted_task_instances] = identifier[sorted] ( identifier[task_instances] , identifier[key] = keyword[lambda] identifier[ti] :(- identifier[ti] . identifier[priority_weight] , identifier[ti] . identifier[execution_date] )) identifier[num_starving_tasks] = literal[int] keyword[for] identifier[current_index] , identifier[task_instance] keyword[in] identifier[enumerate] ( identifier[priority_sorted_task_instances] ): keyword[if] identifier[open_slots] <= literal[int] : identifier[self] . identifier[log] . identifier[info] ( literal[string] , identifier[open_slots] , identifier[pool] ) identifier[num_starving_tasks] = identifier[len] ( identifier[priority_sorted_task_instances] )- identifier[current_index] keyword[break] identifier[dag_id] = identifier[task_instance] . identifier[dag_id] identifier[simple_dag] = identifier[simple_dag_bag] . identifier[get_dag] ( identifier[dag_id] ) identifier[current_dag_concurrency] = identifier[dag_concurrency_map] [ identifier[dag_id] ] identifier[dag_concurrency_limit] = identifier[simple_dag_bag] . identifier[get_dag] ( identifier[dag_id] ). identifier[concurrency] identifier[self] . identifier[log] . identifier[info] ( literal[string] , identifier[dag_id] , identifier[current_dag_concurrency] , identifier[dag_concurrency_limit] ) keyword[if] identifier[current_dag_concurrency] >= identifier[dag_concurrency_limit] : identifier[self] . identifier[log] . identifier[info] ( literal[string] literal[string] , identifier[task_instance] , identifier[dag_id] , identifier[dag_concurrency_limit] ) keyword[continue] identifier[task_concurrency_limit] = identifier[simple_dag] . identifier[get_task_special_arg] ( identifier[task_instance] . identifier[task_id] , literal[string] ) keyword[if] identifier[task_concurrency_limit] keyword[is] keyword[not] keyword[None] : identifier[current_task_concurrency] = identifier[task_concurrency_map] [ ( identifier[task_instance] . identifier[dag_id] , identifier[task_instance] . identifier[task_id] ) ] keyword[if] identifier[current_task_concurrency] >= identifier[task_concurrency_limit] : identifier[self] . identifier[log] . identifier[info] ( literal[string] literal[string] , identifier[task_instance] ) keyword[continue] keyword[if] identifier[self] . identifier[executor] . identifier[has_task] ( identifier[task_instance] ): identifier[self] . identifier[log] . identifier[debug] ( literal[string] , identifier[task_instance] . identifier[key] ) keyword[continue] identifier[executable_tis] . identifier[append] ( identifier[task_instance] ) identifier[open_slots] -= literal[int] identifier[dag_concurrency_map] [ identifier[dag_id] ]+= literal[int] identifier[task_concurrency_map] [( identifier[task_instance] . identifier[dag_id] , identifier[task_instance] . identifier[task_id] )]+= literal[int] identifier[Stats] . identifier[gauge] ( literal[string] . identifier[format] ( identifier[pool_name] = identifier[pool_name] ), identifier[num_starving_tasks] ) identifier[task_instance_str] = literal[string] . identifier[join] ( [ identifier[repr] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[executable_tis] ]) identifier[self] . identifier[log] . identifier[info] ( literal[string] , identifier[task_instance_str] ) keyword[for] identifier[ti] keyword[in] identifier[executable_tis] : identifier[copy_dag_id] = identifier[ti] . identifier[dag_id] identifier[copy_execution_date] = identifier[ti] . identifier[execution_date] identifier[copy_task_id] = identifier[ti] . identifier[task_id] identifier[make_transient] ( identifier[ti] ) identifier[ti] . identifier[dag_id] = identifier[copy_dag_id] identifier[ti] . identifier[execution_date] = identifier[copy_execution_date] identifier[ti] . identifier[task_id] = identifier[copy_task_id] keyword[return] identifier[executable_tis]
def _find_executable_task_instances(self, simple_dag_bag, states, session=None): """ Finds TIs that are ready for execution with respect to pool limits, dag concurrency, executor state, and priority. :param simple_dag_bag: TaskInstances associated with DAGs in the simple_dag_bag will be fetched from the DB and executed :type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag :param executor: the executor that runs task instances :type executor: BaseExecutor :param states: Execute TaskInstances in these states :type states: tuple[airflow.utils.state.State] :return: list[airflow.models.TaskInstance] """ executable_tis = [] # Get all task instances associated with scheduled # DagRuns which are not backfilled, in the given states, # and the dag is not paused TI = models.TaskInstance DR = models.DagRun DM = models.DagModel # noqa: E711 # noqa: E711 ti_query = session.query(TI).filter(TI.dag_id.in_(simple_dag_bag.dag_ids)).outerjoin(DR, and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)).filter(or_(DR.run_id == None, not_(DR.run_id.like(BackfillJob.ID_PREFIX + '%')))).outerjoin(DM, DM.dag_id == TI.dag_id).filter(or_(DM.dag_id == None, not_(DM.is_paused))) # Additional filters on task instance state if None in states: # noqa: E711 ti_query = ti_query.filter(or_(TI.state == None, TI.state.in_(states))) # depends on [control=['if'], data=['states']] else: ti_query = ti_query.filter(TI.state.in_(states)) task_instances_to_examine = ti_query.all() if len(task_instances_to_examine) == 0: self.log.debug('No tasks to consider for execution.') return executable_tis # depends on [control=['if'], data=[]] # Put one task instance on each line task_instance_str = '\n\t'.join([repr(x) for x in task_instances_to_examine]) self.log.info('%s tasks up for execution:\n\t%s', len(task_instances_to_examine), task_instance_str) # Get the pool settings pools = {p.pool: p for p in session.query(models.Pool).all()} pool_to_task_instances = defaultdict(list) for task_instance in task_instances_to_examine: pool_to_task_instances[task_instance.pool].append(task_instance) # depends on [control=['for'], data=['task_instance']] states_to_count_as_running = [State.RUNNING, State.QUEUED] # dag_id to # of running tasks and (dag_id, task_id) to # of running tasks. (dag_concurrency_map, task_concurrency_map) = self.__get_concurrency_maps(states=states_to_count_as_running, session=session) # Go through each pool, and queue up a task for execution if there are # any open slots in the pool. for (pool, task_instances) in pool_to_task_instances.items(): pool_name = pool if not pool: # Arbitrary: # If queued outside of a pool, trigger no more than # non_pooled_task_slot_count open_slots = models.Pool.default_pool_open_slots() pool_name = models.Pool.default_pool_name # depends on [control=['if'], data=[]] elif pool not in pools: self.log.warning("Tasks using non-existent pool '%s' will not be scheduled", pool) open_slots = 0 # depends on [control=['if'], data=['pool']] else: open_slots = pools[pool].open_slots(session=session) num_ready = len(task_instances) self.log.info('Figuring out tasks to run in Pool(name=%s) with %s open slots and %s task instances ready to be queued', pool, open_slots, num_ready) priority_sorted_task_instances = sorted(task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date)) # Number of tasks that cannot be scheduled because of no open slot in pool num_starving_tasks = 0 for (current_index, task_instance) in enumerate(priority_sorted_task_instances): if open_slots <= 0: self.log.info('Not scheduling since there are %s open slots in pool %s', open_slots, pool) # Can't schedule any more since there are no more open slots. num_starving_tasks = len(priority_sorted_task_instances) - current_index break # depends on [control=['if'], data=['open_slots']] # Check to make sure that the task concurrency of the DAG hasn't been # reached. dag_id = task_instance.dag_id simple_dag = simple_dag_bag.get_dag(dag_id) current_dag_concurrency = dag_concurrency_map[dag_id] dag_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency self.log.info('DAG %s has %s/%s running and queued tasks', dag_id, current_dag_concurrency, dag_concurrency_limit) if current_dag_concurrency >= dag_concurrency_limit: self.log.info("Not executing %s since the number of tasks running or queued from DAG %s is >= to the DAG's task concurrency limit of %s", task_instance, dag_id, dag_concurrency_limit) continue # depends on [control=['if'], data=['dag_concurrency_limit']] task_concurrency_limit = simple_dag.get_task_special_arg(task_instance.task_id, 'task_concurrency') if task_concurrency_limit is not None: current_task_concurrency = task_concurrency_map[task_instance.dag_id, task_instance.task_id] if current_task_concurrency >= task_concurrency_limit: self.log.info('Not executing %s since the task concurrency for this task has been reached.', task_instance) continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['task_concurrency_limit']] if self.executor.has_task(task_instance): self.log.debug('Not handling task %s as the executor reports it is running', task_instance.key) continue # depends on [control=['if'], data=[]] executable_tis.append(task_instance) open_slots -= 1 dag_concurrency_map[dag_id] += 1 task_concurrency_map[task_instance.dag_id, task_instance.task_id] += 1 # depends on [control=['for'], data=[]] Stats.gauge('pool.starving_tasks.{pool_name}'.format(pool_name=pool_name), num_starving_tasks) # depends on [control=['for'], data=[]] task_instance_str = '\n\t'.join([repr(x) for x in executable_tis]) self.log.info('Setting the following tasks to queued state:\n\t%s', task_instance_str) # so these dont expire on commit for ti in executable_tis: copy_dag_id = ti.dag_id copy_execution_date = ti.execution_date copy_task_id = ti.task_id make_transient(ti) ti.dag_id = copy_dag_id ti.execution_date = copy_execution_date ti.task_id = copy_task_id # depends on [control=['for'], data=['ti']] return executable_tis
def move_edge_target(self, edge_id, node_a): """Moves an edge so that it targets node_a.""" # Grab the edge edge = self.get_edge(edge_id) # Alter the vertices edge['vertices'] = (edge['vertices'][0], node_a)
def function[move_edge_target, parameter[self, edge_id, node_a]]: constant[Moves an edge so that it targets node_a.] variable[edge] assign[=] call[name[self].get_edge, parameter[name[edge_id]]] call[name[edge]][constant[vertices]] assign[=] tuple[[<ast.Subscript object at 0x7da1b28f3c10>, <ast.Name object at 0x7da1b28f10f0>]]
keyword[def] identifier[move_edge_target] ( identifier[self] , identifier[edge_id] , identifier[node_a] ): literal[string] identifier[edge] = identifier[self] . identifier[get_edge] ( identifier[edge_id] ) identifier[edge] [ literal[string] ]=( identifier[edge] [ literal[string] ][ literal[int] ], identifier[node_a] )
def move_edge_target(self, edge_id, node_a): """Moves an edge so that it targets node_a.""" # Grab the edge edge = self.get_edge(edge_id) # Alter the vertices edge['vertices'] = (edge['vertices'][0], node_a)
def defaults(self): """Return the defaults, with their values interpolated (with the defaults dict itself) Mainly to support defaults using values such as %(here)s """ defaults = ConfigParser.defaults(self).copy() for key, val in iteritems(defaults): defaults[key] = self.get('DEFAULT', key) or val return defaults
def function[defaults, parameter[self]]: constant[Return the defaults, with their values interpolated (with the defaults dict itself) Mainly to support defaults using values such as %(here)s ] variable[defaults] assign[=] call[call[name[ConfigParser].defaults, parameter[name[self]]].copy, parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b053b0d0>, <ast.Name object at 0x7da1b05382b0>]]] in starred[call[name[iteritems], parameter[name[defaults]]]] begin[:] call[name[defaults]][name[key]] assign[=] <ast.BoolOp object at 0x7da1b0538760> return[name[defaults]]
keyword[def] identifier[defaults] ( identifier[self] ): literal[string] identifier[defaults] = identifier[ConfigParser] . identifier[defaults] ( identifier[self] ). identifier[copy] () keyword[for] identifier[key] , identifier[val] keyword[in] identifier[iteritems] ( identifier[defaults] ): identifier[defaults] [ identifier[key] ]= identifier[self] . identifier[get] ( literal[string] , identifier[key] ) keyword[or] identifier[val] keyword[return] identifier[defaults]
def defaults(self): """Return the defaults, with their values interpolated (with the defaults dict itself) Mainly to support defaults using values such as %(here)s """ defaults = ConfigParser.defaults(self).copy() for (key, val) in iteritems(defaults): defaults[key] = self.get('DEFAULT', key) or val # depends on [control=['for'], data=[]] return defaults
def transition_cycle(n_states, prob): '''Construct a cyclic transition matrix over `n_states`. The transition matrix will have the following properties: - `transition[i, i] = p` - `transition[i, i + 1] = (1 - p)` This type of transition matrix is appropriate for state spaces with cyclical structure, such as metrical position within a bar. For example, a song in 4/4 time has state transitions of the form 1->{1, 2}, 2->{2, 3}, 3->{3, 4}, 4->{4, 1}. Parameters ---------- n_states : int > 1 The number of states prob : float in [0, 1] or iterable, length=n_states If a scalar, this is the probability of a self-transition. If a vector of length `n_states`, `p[i]` is the probability of state `i`'s self-transition. Returns ------- transition : np.ndarray [shape=(n_states, n_states)] The transition matrix Examples -------- >>> librosa.sequence.transition_cycle(4, 0.9) array([[0.9, 0.1, 0. , 0. ], [0. , 0.9, 0.1, 0. ], [0. , 0. , 0.9, 0.1], [0.1, 0. , 0. , 0.9]]) ''' if not isinstance(n_states, int) or n_states <= 1: raise ParameterError('n_states={} must be a positive integer > 1') transition = np.zeros((n_states, n_states), dtype=np.float) # if it's a float, make it a vector prob = np.asarray(prob, dtype=np.float) if prob.ndim == 0: prob = np.tile(prob, n_states) if prob.shape != (n_states,): raise ParameterError('prob={} must have length equal to n_states={}'.format(prob, n_states)) if np.any(prob < 0) or np.any(prob > 1): raise ParameterError('prob={} must have values in the range [0, 1]'.format(prob)) for i, prob_i in enumerate(prob): transition[i, np.mod(i + 1, n_states)] = 1. - prob_i transition[i, i] = prob_i return transition
def function[transition_cycle, parameter[n_states, prob]]: constant[Construct a cyclic transition matrix over `n_states`. The transition matrix will have the following properties: - `transition[i, i] = p` - `transition[i, i + 1] = (1 - p)` This type of transition matrix is appropriate for state spaces with cyclical structure, such as metrical position within a bar. For example, a song in 4/4 time has state transitions of the form 1->{1, 2}, 2->{2, 3}, 3->{3, 4}, 4->{4, 1}. Parameters ---------- n_states : int > 1 The number of states prob : float in [0, 1] or iterable, length=n_states If a scalar, this is the probability of a self-transition. If a vector of length `n_states`, `p[i]` is the probability of state `i`'s self-transition. Returns ------- transition : np.ndarray [shape=(n_states, n_states)] The transition matrix Examples -------- >>> librosa.sequence.transition_cycle(4, 0.9) array([[0.9, 0.1, 0. , 0. ], [0. , 0.9, 0.1, 0. ], [0. , 0. , 0.9, 0.1], [0.1, 0. , 0. , 0.9]]) ] if <ast.BoolOp object at 0x7da20c991b10> begin[:] <ast.Raise object at 0x7da20c993400> variable[transition] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da20c993040>, <ast.Name object at 0x7da20c992c20>]]]] variable[prob] assign[=] call[name[np].asarray, parameter[name[prob]]] if compare[name[prob].ndim equal[==] constant[0]] begin[:] variable[prob] assign[=] call[name[np].tile, parameter[name[prob], name[n_states]]] if compare[name[prob].shape not_equal[!=] tuple[[<ast.Name object at 0x7da20c991180>]]] begin[:] <ast.Raise object at 0x7da20c991750> if <ast.BoolOp object at 0x7da20c9926e0> begin[:] <ast.Raise object at 0x7da20c993ca0> for taget[tuple[[<ast.Name object at 0x7da20c991cc0>, <ast.Name object at 0x7da20c990730>]]] in starred[call[name[enumerate], parameter[name[prob]]]] begin[:] call[name[transition]][tuple[[<ast.Name object at 0x7da20c990400>, <ast.Call object at 0x7da20c990c40>]]] assign[=] binary_operation[constant[1.0] - name[prob_i]] call[name[transition]][tuple[[<ast.Name object at 0x7da18f09f4f0>, <ast.Name object at 0x7da18f09ee90>]]] assign[=] name[prob_i] return[name[transition]]
keyword[def] identifier[transition_cycle] ( identifier[n_states] , identifier[prob] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[n_states] , identifier[int] ) keyword[or] identifier[n_states] <= literal[int] : keyword[raise] identifier[ParameterError] ( literal[string] ) identifier[transition] = identifier[np] . identifier[zeros] (( identifier[n_states] , identifier[n_states] ), identifier[dtype] = identifier[np] . identifier[float] ) identifier[prob] = identifier[np] . identifier[asarray] ( identifier[prob] , identifier[dtype] = identifier[np] . identifier[float] ) keyword[if] identifier[prob] . identifier[ndim] == literal[int] : identifier[prob] = identifier[np] . identifier[tile] ( identifier[prob] , identifier[n_states] ) keyword[if] identifier[prob] . identifier[shape] !=( identifier[n_states] ,): keyword[raise] identifier[ParameterError] ( literal[string] . identifier[format] ( identifier[prob] , identifier[n_states] )) keyword[if] identifier[np] . identifier[any] ( identifier[prob] < literal[int] ) keyword[or] identifier[np] . identifier[any] ( identifier[prob] > literal[int] ): keyword[raise] identifier[ParameterError] ( literal[string] . identifier[format] ( identifier[prob] )) keyword[for] identifier[i] , identifier[prob_i] keyword[in] identifier[enumerate] ( identifier[prob] ): identifier[transition] [ identifier[i] , identifier[np] . identifier[mod] ( identifier[i] + literal[int] , identifier[n_states] )]= literal[int] - identifier[prob_i] identifier[transition] [ identifier[i] , identifier[i] ]= identifier[prob_i] keyword[return] identifier[transition]
def transition_cycle(n_states, prob): """Construct a cyclic transition matrix over `n_states`. The transition matrix will have the following properties: - `transition[i, i] = p` - `transition[i, i + 1] = (1 - p)` This type of transition matrix is appropriate for state spaces with cyclical structure, such as metrical position within a bar. For example, a song in 4/4 time has state transitions of the form 1->{1, 2}, 2->{2, 3}, 3->{3, 4}, 4->{4, 1}. Parameters ---------- n_states : int > 1 The number of states prob : float in [0, 1] or iterable, length=n_states If a scalar, this is the probability of a self-transition. If a vector of length `n_states`, `p[i]` is the probability of state `i`'s self-transition. Returns ------- transition : np.ndarray [shape=(n_states, n_states)] The transition matrix Examples -------- >>> librosa.sequence.transition_cycle(4, 0.9) array([[0.9, 0.1, 0. , 0. ], [0. , 0.9, 0.1, 0. ], [0. , 0. , 0.9, 0.1], [0.1, 0. , 0. , 0.9]]) """ if not isinstance(n_states, int) or n_states <= 1: raise ParameterError('n_states={} must be a positive integer > 1') # depends on [control=['if'], data=[]] transition = np.zeros((n_states, n_states), dtype=np.float) # if it's a float, make it a vector prob = np.asarray(prob, dtype=np.float) if prob.ndim == 0: prob = np.tile(prob, n_states) # depends on [control=['if'], data=[]] if prob.shape != (n_states,): raise ParameterError('prob={} must have length equal to n_states={}'.format(prob, n_states)) # depends on [control=['if'], data=[]] if np.any(prob < 0) or np.any(prob > 1): raise ParameterError('prob={} must have values in the range [0, 1]'.format(prob)) # depends on [control=['if'], data=[]] for (i, prob_i) in enumerate(prob): transition[i, np.mod(i + 1, n_states)] = 1.0 - prob_i transition[i, i] = prob_i # depends on [control=['for'], data=[]] return transition
def timeit(func): """ Decorator that logs the cost time of a function. """ @wraps(func) def wrapped_func(*args, **kwargs): start = timer() result = func(*args, **kwargs) cost = timer() - start logger.debug('<method: %s> finished in %2.2f sec' % (func.__name__, cost)) return result return wrapped_func
def function[timeit, parameter[func]]: constant[ Decorator that logs the cost time of a function. ] def function[wrapped_func, parameter[]]: variable[start] assign[=] call[name[timer], parameter[]] variable[result] assign[=] call[name[func], parameter[<ast.Starred object at 0x7da1b085c1c0>]] variable[cost] assign[=] binary_operation[call[name[timer], parameter[]] - name[start]] call[name[logger].debug, parameter[binary_operation[constant[<method: %s> finished in %2.2f sec] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b085fee0>, <ast.Name object at 0x7da1b085e4a0>]]]]] return[name[result]] return[name[wrapped_func]]
keyword[def] identifier[timeit] ( identifier[func] ): literal[string] @ identifier[wraps] ( identifier[func] ) keyword[def] identifier[wrapped_func] (* identifier[args] ,** identifier[kwargs] ): identifier[start] = identifier[timer] () identifier[result] = identifier[func] (* identifier[args] ,** identifier[kwargs] ) identifier[cost] = identifier[timer] ()- identifier[start] identifier[logger] . identifier[debug] ( literal[string] %( identifier[func] . identifier[__name__] , identifier[cost] )) keyword[return] identifier[result] keyword[return] identifier[wrapped_func]
def timeit(func): """ Decorator that logs the cost time of a function. """ @wraps(func) def wrapped_func(*args, **kwargs): start = timer() result = func(*args, **kwargs) cost = timer() - start logger.debug('<method: %s> finished in %2.2f sec' % (func.__name__, cost)) return result return wrapped_func
def verify_integrity(self): """Verifies that all required functions been injected.""" if not self.__integrity_check: if not self.__appid: raise Exception('U2F_APPID was not defined! Please define it in configuration file.') if self.__facets_enabled and not len(self.__facets_list): raise Exception("""U2F facets been enabled, but U2F facet list is empty. Please either disable facets by setting U2F_FACETS_ENABLED to False. Or add facets list using, by assigning it to U2F_FACETS_LIST. """) # Injection undefined_message = 'U2F {name} handler is not defined! Please import {name} through {method}!' if not self.__get_u2f_devices: raise Exception(undefined_message.format(name='Read', method='@u2f.read')) if not self.__save_u2f_devices: raise Exception(undefined_message.format(name='Save', method='@u2f.save')) if not self.__call_success_enroll: raise Exception(undefined_message.format(name='enroll onSuccess', method='@u2f.enroll_on_success')) if not self.__call_success_sign: raise Exception(undefined_message.format(name='sign onSuccess', method='@u2f.sign_on_success')) self.__integrity_check = True return True
def function[verify_integrity, parameter[self]]: constant[Verifies that all required functions been injected.] if <ast.UnaryOp object at 0x7da20e9b0fd0> begin[:] if <ast.UnaryOp object at 0x7da20e9b1ab0> begin[:] <ast.Raise object at 0x7da20e9b1f30> if <ast.BoolOp object at 0x7da20e9b13c0> begin[:] <ast.Raise object at 0x7da20e9b2650> variable[undefined_message] assign[=] constant[U2F {name} handler is not defined! Please import {name} through {method}!] if <ast.UnaryOp object at 0x7da20e9b2f20> begin[:] <ast.Raise object at 0x7da20e9b0490> if <ast.UnaryOp object at 0x7da20e9b16c0> begin[:] <ast.Raise object at 0x7da20e9b0a00> if <ast.UnaryOp object at 0x7da2041d81c0> begin[:] <ast.Raise object at 0x7da2041d9780> if <ast.UnaryOp object at 0x7da2041dabc0> begin[:] <ast.Raise object at 0x7da2041db790> name[self].__integrity_check assign[=] constant[True] return[constant[True]]
keyword[def] identifier[verify_integrity] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[__integrity_check] : keyword[if] keyword[not] identifier[self] . identifier[__appid] : keyword[raise] identifier[Exception] ( literal[string] ) keyword[if] identifier[self] . identifier[__facets_enabled] keyword[and] keyword[not] identifier[len] ( identifier[self] . identifier[__facets_list] ): keyword[raise] identifier[Exception] ( literal[string] ) identifier[undefined_message] = literal[string] keyword[if] keyword[not] identifier[self] . identifier[__get_u2f_devices] : keyword[raise] identifier[Exception] ( identifier[undefined_message] . identifier[format] ( identifier[name] = literal[string] , identifier[method] = literal[string] )) keyword[if] keyword[not] identifier[self] . identifier[__save_u2f_devices] : keyword[raise] identifier[Exception] ( identifier[undefined_message] . identifier[format] ( identifier[name] = literal[string] , identifier[method] = literal[string] )) keyword[if] keyword[not] identifier[self] . identifier[__call_success_enroll] : keyword[raise] identifier[Exception] ( identifier[undefined_message] . identifier[format] ( identifier[name] = literal[string] , identifier[method] = literal[string] )) keyword[if] keyword[not] identifier[self] . identifier[__call_success_sign] : keyword[raise] identifier[Exception] ( identifier[undefined_message] . identifier[format] ( identifier[name] = literal[string] , identifier[method] = literal[string] )) identifier[self] . identifier[__integrity_check] = keyword[True] keyword[return] keyword[True]
def verify_integrity(self): """Verifies that all required functions been injected.""" if not self.__integrity_check: if not self.__appid: raise Exception('U2F_APPID was not defined! Please define it in configuration file.') # depends on [control=['if'], data=[]] if self.__facets_enabled and (not len(self.__facets_list)): raise Exception('U2F facets been enabled, but U2F facet list is empty.\n Please either disable facets by setting U2F_FACETS_ENABLED to False.\n Or add facets list using, by assigning it to U2F_FACETS_LIST.\n ') # depends on [control=['if'], data=[]] # Injection undefined_message = 'U2F {name} handler is not defined! Please import {name} through {method}!' if not self.__get_u2f_devices: raise Exception(undefined_message.format(name='Read', method='@u2f.read')) # depends on [control=['if'], data=[]] if not self.__save_u2f_devices: raise Exception(undefined_message.format(name='Save', method='@u2f.save')) # depends on [control=['if'], data=[]] if not self.__call_success_enroll: raise Exception(undefined_message.format(name='enroll onSuccess', method='@u2f.enroll_on_success')) # depends on [control=['if'], data=[]] if not self.__call_success_sign: raise Exception(undefined_message.format(name='sign onSuccess', method='@u2f.sign_on_success')) # depends on [control=['if'], data=[]] self.__integrity_check = True # depends on [control=['if'], data=[]] return True
def replace_coord(arr, old_dim, new_dim, new_coord): """Replace a coordinate with new one; new and old must have same shape.""" new_arr = arr.rename({old_dim: new_dim}) new_arr[new_dim] = new_coord return new_arr
def function[replace_coord, parameter[arr, old_dim, new_dim, new_coord]]: constant[Replace a coordinate with new one; new and old must have same shape.] variable[new_arr] assign[=] call[name[arr].rename, parameter[dictionary[[<ast.Name object at 0x7da1b0464880>], [<ast.Name object at 0x7da1b0464850>]]]] call[name[new_arr]][name[new_dim]] assign[=] name[new_coord] return[name[new_arr]]
keyword[def] identifier[replace_coord] ( identifier[arr] , identifier[old_dim] , identifier[new_dim] , identifier[new_coord] ): literal[string] identifier[new_arr] = identifier[arr] . identifier[rename] ({ identifier[old_dim] : identifier[new_dim] }) identifier[new_arr] [ identifier[new_dim] ]= identifier[new_coord] keyword[return] identifier[new_arr]
def replace_coord(arr, old_dim, new_dim, new_coord): """Replace a coordinate with new one; new and old must have same shape.""" new_arr = arr.rename({old_dim: new_dim}) new_arr[new_dim] = new_coord return new_arr
def _display_stream(normalized_data, stream): """ print stream message from docker-py stream. """ try: stream.write(normalized_data['stream']) except UnicodeEncodeError: stream.write(normalized_data['stream'].encode("utf-8"))
def function[_display_stream, parameter[normalized_data, stream]]: constant[ print stream message from docker-py stream. ] <ast.Try object at 0x7da18f813730>
keyword[def] identifier[_display_stream] ( identifier[normalized_data] , identifier[stream] ): literal[string] keyword[try] : identifier[stream] . identifier[write] ( identifier[normalized_data] [ literal[string] ]) keyword[except] identifier[UnicodeEncodeError] : identifier[stream] . identifier[write] ( identifier[normalized_data] [ literal[string] ]. identifier[encode] ( literal[string] ))
def _display_stream(normalized_data, stream): """ print stream message from docker-py stream. """ try: stream.write(normalized_data['stream']) # depends on [control=['try'], data=[]] except UnicodeEncodeError: stream.write(normalized_data['stream'].encode('utf-8')) # depends on [control=['except'], data=[]]
def formMarkup(self, realm, return_to=None, immediate=False, form_tag_attrs=None): """Get html for a form to submit this request to the IDP. @param form_tag_attrs: Dictionary of attributes to be added to the form tag. 'accept-charset' and 'enctype' have defaults that can be overridden. If a value is supplied for 'action' or 'method', it will be replaced. @type form_tag_attrs: {unicode: unicode} """ message = self.getMessage(realm, return_to, immediate) return message.toFormMarkup(self.endpoint.server_url, form_tag_attrs)
def function[formMarkup, parameter[self, realm, return_to, immediate, form_tag_attrs]]: constant[Get html for a form to submit this request to the IDP. @param form_tag_attrs: Dictionary of attributes to be added to the form tag. 'accept-charset' and 'enctype' have defaults that can be overridden. If a value is supplied for 'action' or 'method', it will be replaced. @type form_tag_attrs: {unicode: unicode} ] variable[message] assign[=] call[name[self].getMessage, parameter[name[realm], name[return_to], name[immediate]]] return[call[name[message].toFormMarkup, parameter[name[self].endpoint.server_url, name[form_tag_attrs]]]]
keyword[def] identifier[formMarkup] ( identifier[self] , identifier[realm] , identifier[return_to] = keyword[None] , identifier[immediate] = keyword[False] , identifier[form_tag_attrs] = keyword[None] ): literal[string] identifier[message] = identifier[self] . identifier[getMessage] ( identifier[realm] , identifier[return_to] , identifier[immediate] ) keyword[return] identifier[message] . identifier[toFormMarkup] ( identifier[self] . identifier[endpoint] . identifier[server_url] , identifier[form_tag_attrs] )
def formMarkup(self, realm, return_to=None, immediate=False, form_tag_attrs=None): """Get html for a form to submit this request to the IDP. @param form_tag_attrs: Dictionary of attributes to be added to the form tag. 'accept-charset' and 'enctype' have defaults that can be overridden. If a value is supplied for 'action' or 'method', it will be replaced. @type form_tag_attrs: {unicode: unicode} """ message = self.getMessage(realm, return_to, immediate) return message.toFormMarkup(self.endpoint.server_url, form_tag_attrs)
def _parse_extra(self, fp): """ Parse and store the config comments and create maps for dot notion lookup """ comment = '' section = '' fp.seek(0) for line in fp: line = line.rstrip() if not line: if comment: comment += '\n' continue if line.startswith('#'): # Comment comment += line + '\n' continue if line.startswith('['): # Section section = line.strip('[]') self._add_dot_key(section) if comment: self._comments[section] = comment.rstrip() elif CONFIG_KEY_RE.match(line): # Config key = line.split('=', 1)[0].strip() self._add_dot_key(section, key) if comment: self._comments[(section, key)] = comment.rstrip() comment = '' if comment: self._comments[self.LAST_COMMENT_KEY] = comment
def function[_parse_extra, parameter[self, fp]]: constant[ Parse and store the config comments and create maps for dot notion lookup ] variable[comment] assign[=] constant[] variable[section] assign[=] constant[] call[name[fp].seek, parameter[constant[0]]] for taget[name[line]] in starred[name[fp]] begin[:] variable[line] assign[=] call[name[line].rstrip, parameter[]] if <ast.UnaryOp object at 0x7da20c795f30> begin[:] if name[comment] begin[:] <ast.AugAssign object at 0x7da20c7958d0> continue if call[name[line].startswith, parameter[constant[#]]] begin[:] <ast.AugAssign object at 0x7da20c7963e0> continue if call[name[line].startswith, parameter[constant[[]]] begin[:] variable[section] assign[=] call[name[line].strip, parameter[constant[[]]]] call[name[self]._add_dot_key, parameter[name[section]]] if name[comment] begin[:] call[name[self]._comments][name[section]] assign[=] call[name[comment].rstrip, parameter[]] variable[comment] assign[=] constant[] if name[comment] begin[:] call[name[self]._comments][name[self].LAST_COMMENT_KEY] assign[=] name[comment]
keyword[def] identifier[_parse_extra] ( identifier[self] , identifier[fp] ): literal[string] identifier[comment] = literal[string] identifier[section] = literal[string] identifier[fp] . identifier[seek] ( literal[int] ) keyword[for] identifier[line] keyword[in] identifier[fp] : identifier[line] = identifier[line] . identifier[rstrip] () keyword[if] keyword[not] identifier[line] : keyword[if] identifier[comment] : identifier[comment] += literal[string] keyword[continue] keyword[if] identifier[line] . identifier[startswith] ( literal[string] ): identifier[comment] += identifier[line] + literal[string] keyword[continue] keyword[if] identifier[line] . identifier[startswith] ( literal[string] ): identifier[section] = identifier[line] . identifier[strip] ( literal[string] ) identifier[self] . identifier[_add_dot_key] ( identifier[section] ) keyword[if] identifier[comment] : identifier[self] . identifier[_comments] [ identifier[section] ]= identifier[comment] . identifier[rstrip] () keyword[elif] identifier[CONFIG_KEY_RE] . identifier[match] ( identifier[line] ): identifier[key] = identifier[line] . identifier[split] ( literal[string] , literal[int] )[ literal[int] ]. identifier[strip] () identifier[self] . identifier[_add_dot_key] ( identifier[section] , identifier[key] ) keyword[if] identifier[comment] : identifier[self] . identifier[_comments] [( identifier[section] , identifier[key] )]= identifier[comment] . identifier[rstrip] () identifier[comment] = literal[string] keyword[if] identifier[comment] : identifier[self] . identifier[_comments] [ identifier[self] . identifier[LAST_COMMENT_KEY] ]= identifier[comment]
def _parse_extra(self, fp): """ Parse and store the config comments and create maps for dot notion lookup """ comment = '' section = '' fp.seek(0) for line in fp: line = line.rstrip() if not line: if comment: comment += '\n' # depends on [control=['if'], data=[]] continue # depends on [control=['if'], data=[]] if line.startswith('#'): # Comment comment += line + '\n' continue # depends on [control=['if'], data=[]] if line.startswith('['): # Section section = line.strip('[]') self._add_dot_key(section) if comment: self._comments[section] = comment.rstrip() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif CONFIG_KEY_RE.match(line): # Config key = line.split('=', 1)[0].strip() self._add_dot_key(section, key) if comment: self._comments[section, key] = comment.rstrip() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] comment = '' # depends on [control=['for'], data=['line']] if comment: self._comments[self.LAST_COMMENT_KEY] = comment # depends on [control=['if'], data=[]]
def set_trial_config(experiment_config, port, config_file_name): '''set trial configuration''' request_data = dict() request_data['trial_config'] = experiment_config['trial'] response = rest_put(cluster_metadata_url(port), json.dumps(request_data), REST_TIME_OUT) if check_response(response): return True else: print('Error message is {}'.format(response.text)) _, stderr_full_path = get_log_path(config_file_name) if response: with open(stderr_full_path, 'a+') as fout: fout.write(json.dumps(json.loads(response.text), indent=4, sort_keys=True, separators=(',', ':'))) return False
def function[set_trial_config, parameter[experiment_config, port, config_file_name]]: constant[set trial configuration] variable[request_data] assign[=] call[name[dict], parameter[]] call[name[request_data]][constant[trial_config]] assign[=] call[name[experiment_config]][constant[trial]] variable[response] assign[=] call[name[rest_put], parameter[call[name[cluster_metadata_url], parameter[name[port]]], call[name[json].dumps, parameter[name[request_data]]], name[REST_TIME_OUT]]] if call[name[check_response], parameter[name[response]]] begin[:] return[constant[True]]
keyword[def] identifier[set_trial_config] ( identifier[experiment_config] , identifier[port] , identifier[config_file_name] ): literal[string] identifier[request_data] = identifier[dict] () identifier[request_data] [ literal[string] ]= identifier[experiment_config] [ literal[string] ] identifier[response] = identifier[rest_put] ( identifier[cluster_metadata_url] ( identifier[port] ), identifier[json] . identifier[dumps] ( identifier[request_data] ), identifier[REST_TIME_OUT] ) keyword[if] identifier[check_response] ( identifier[response] ): keyword[return] keyword[True] keyword[else] : identifier[print] ( literal[string] . identifier[format] ( identifier[response] . identifier[text] )) identifier[_] , identifier[stderr_full_path] = identifier[get_log_path] ( identifier[config_file_name] ) keyword[if] identifier[response] : keyword[with] identifier[open] ( identifier[stderr_full_path] , literal[string] ) keyword[as] identifier[fout] : identifier[fout] . identifier[write] ( identifier[json] . identifier[dumps] ( identifier[json] . identifier[loads] ( identifier[response] . identifier[text] ), identifier[indent] = literal[int] , identifier[sort_keys] = keyword[True] , identifier[separators] =( literal[string] , literal[string] ))) keyword[return] keyword[False]
def set_trial_config(experiment_config, port, config_file_name): """set trial configuration""" request_data = dict() request_data['trial_config'] = experiment_config['trial'] response = rest_put(cluster_metadata_url(port), json.dumps(request_data), REST_TIME_OUT) if check_response(response): return True # depends on [control=['if'], data=[]] else: print('Error message is {}'.format(response.text)) (_, stderr_full_path) = get_log_path(config_file_name) if response: with open(stderr_full_path, 'a+') as fout: fout.write(json.dumps(json.loads(response.text), indent=4, sort_keys=True, separators=(',', ':'))) # depends on [control=['with'], data=['fout']] # depends on [control=['if'], data=[]] return False
def to_numpy(self, dtype=None, copy=False): """Convert the DataFrame to a NumPy array. Args: dtype: The dtype to pass to numpy.asarray() copy: Whether to ensure that the returned value is a not a view on another array. Returns: A numpy array. """ return self._default_to_pandas("to_numpy", dtype=dtype, copy=copy)
def function[to_numpy, parameter[self, dtype, copy]]: constant[Convert the DataFrame to a NumPy array. Args: dtype: The dtype to pass to numpy.asarray() copy: Whether to ensure that the returned value is a not a view on another array. Returns: A numpy array. ] return[call[name[self]._default_to_pandas, parameter[constant[to_numpy]]]]
keyword[def] identifier[to_numpy] ( identifier[self] , identifier[dtype] = keyword[None] , identifier[copy] = keyword[False] ): literal[string] keyword[return] identifier[self] . identifier[_default_to_pandas] ( literal[string] , identifier[dtype] = identifier[dtype] , identifier[copy] = identifier[copy] )
def to_numpy(self, dtype=None, copy=False): """Convert the DataFrame to a NumPy array. Args: dtype: The dtype to pass to numpy.asarray() copy: Whether to ensure that the returned value is a not a view on another array. Returns: A numpy array. """ return self._default_to_pandas('to_numpy', dtype=dtype, copy=copy)
def delete(self, key: str) -> None: """ Deletes a setting from this object's storage. The write to the database is performed immediately and the cache in the cache backend is flushed. The cache within this object will be updated correctly. """ if key in self._write_cache(): self._write_cache()[key].delete() del self._write_cache()[key] if key in self._cache(): del self._cache()[key] self._flush_external_cache()
def function[delete, parameter[self, key]]: constant[ Deletes a setting from this object's storage. The write to the database is performed immediately and the cache in the cache backend is flushed. The cache within this object will be updated correctly. ] if compare[name[key] in call[name[self]._write_cache, parameter[]]] begin[:] call[call[call[name[self]._write_cache, parameter[]]][name[key]].delete, parameter[]] <ast.Delete object at 0x7da20e9b3c70> if compare[name[key] in call[name[self]._cache, parameter[]]] begin[:] <ast.Delete object at 0x7da20e9b0700> call[name[self]._flush_external_cache, parameter[]]
keyword[def] identifier[delete] ( identifier[self] , identifier[key] : identifier[str] )-> keyword[None] : literal[string] keyword[if] identifier[key] keyword[in] identifier[self] . identifier[_write_cache] (): identifier[self] . identifier[_write_cache] ()[ identifier[key] ]. identifier[delete] () keyword[del] identifier[self] . identifier[_write_cache] ()[ identifier[key] ] keyword[if] identifier[key] keyword[in] identifier[self] . identifier[_cache] (): keyword[del] identifier[self] . identifier[_cache] ()[ identifier[key] ] identifier[self] . identifier[_flush_external_cache] ()
def delete(self, key: str) -> None: """ Deletes a setting from this object's storage. The write to the database is performed immediately and the cache in the cache backend is flushed. The cache within this object will be updated correctly. """ if key in self._write_cache(): self._write_cache()[key].delete() del self._write_cache()[key] # depends on [control=['if'], data=['key']] if key in self._cache(): del self._cache()[key] # depends on [control=['if'], data=['key']] self._flush_external_cache()
def abort(self, frame): """ Handles ABORT command: Rolls back specified transaction. """ if not frame.transaction: raise ProtocolError("Missing transaction for ABORT command.") if not frame.transaction in self.engine.transactions: raise ProtocolError("Invalid transaction: %s" % frame.transaction) self.engine.queue_manager.resend_transaction_frames( self.engine.connection, frame.transaction) del self.engine.transactions[frame.transaction]
def function[abort, parameter[self, frame]]: constant[ Handles ABORT command: Rolls back specified transaction. ] if <ast.UnaryOp object at 0x7da1b19383d0> begin[:] <ast.Raise object at 0x7da1b193ada0> if <ast.UnaryOp object at 0x7da1b193ab60> begin[:] <ast.Raise object at 0x7da1b1939db0> call[name[self].engine.queue_manager.resend_transaction_frames, parameter[name[self].engine.connection, name[frame].transaction]] <ast.Delete object at 0x7da1b193b400>
keyword[def] identifier[abort] ( identifier[self] , identifier[frame] ): literal[string] keyword[if] keyword[not] identifier[frame] . identifier[transaction] : keyword[raise] identifier[ProtocolError] ( literal[string] ) keyword[if] keyword[not] identifier[frame] . identifier[transaction] keyword[in] identifier[self] . identifier[engine] . identifier[transactions] : keyword[raise] identifier[ProtocolError] ( literal[string] % identifier[frame] . identifier[transaction] ) identifier[self] . identifier[engine] . identifier[queue_manager] . identifier[resend_transaction_frames] ( identifier[self] . identifier[engine] . identifier[connection] , identifier[frame] . identifier[transaction] ) keyword[del] identifier[self] . identifier[engine] . identifier[transactions] [ identifier[frame] . identifier[transaction] ]
def abort(self, frame): """ Handles ABORT command: Rolls back specified transaction. """ if not frame.transaction: raise ProtocolError('Missing transaction for ABORT command.') # depends on [control=['if'], data=[]] if not frame.transaction in self.engine.transactions: raise ProtocolError('Invalid transaction: %s' % frame.transaction) # depends on [control=['if'], data=[]] self.engine.queue_manager.resend_transaction_frames(self.engine.connection, frame.transaction) del self.engine.transactions[frame.transaction]
def find_file(self, path, saltenv, back=None): ''' Find the path and return the fnd structure, this structure is passed to other backend interfaces. ''' path = salt.utils.stringutils.to_unicode(path) saltenv = salt.utils.stringutils.to_unicode(saltenv) back = self.backends(back) kwargs = {} fnd = {'path': '', 'rel': ''} if os.path.isabs(path): return fnd if '../' in path: return fnd if salt.utils.url.is_escaped(path): # don't attempt to find URL query arguments in the path path = salt.utils.url.unescape(path) else: if '?' in path: hcomps = path.split('?') path = hcomps[0] comps = hcomps[1].split('&') for comp in comps: if '=' not in comp: # Invalid option, skip it continue args = comp.split('=', 1) kwargs[args[0]] = args[1] if 'env' in kwargs: # "env" is not supported; Use "saltenv". kwargs.pop('env') if 'saltenv' in kwargs: saltenv = kwargs.pop('saltenv') if not isinstance(saltenv, six.string_types): saltenv = six.text_type(saltenv) for fsb in back: fstr = '{0}.find_file'.format(fsb) if fstr in self.servers: fnd = self.servers[fstr](path, saltenv, **kwargs) if fnd.get('path'): fnd['back'] = fsb return fnd return fnd
def function[find_file, parameter[self, path, saltenv, back]]: constant[ Find the path and return the fnd structure, this structure is passed to other backend interfaces. ] variable[path] assign[=] call[name[salt].utils.stringutils.to_unicode, parameter[name[path]]] variable[saltenv] assign[=] call[name[salt].utils.stringutils.to_unicode, parameter[name[saltenv]]] variable[back] assign[=] call[name[self].backends, parameter[name[back]]] variable[kwargs] assign[=] dictionary[[], []] variable[fnd] assign[=] dictionary[[<ast.Constant object at 0x7da2041dba90>, <ast.Constant object at 0x7da2041d8a90>], [<ast.Constant object at 0x7da2041d9e40>, <ast.Constant object at 0x7da2041da620>]] if call[name[os].path.isabs, parameter[name[path]]] begin[:] return[name[fnd]] if compare[constant[../] in name[path]] begin[:] return[name[fnd]] if call[name[salt].utils.url.is_escaped, parameter[name[path]]] begin[:] variable[path] assign[=] call[name[salt].utils.url.unescape, parameter[name[path]]] if compare[constant[env] in name[kwargs]] begin[:] call[name[kwargs].pop, parameter[constant[env]]] if compare[constant[saltenv] in name[kwargs]] begin[:] variable[saltenv] assign[=] call[name[kwargs].pop, parameter[constant[saltenv]]] if <ast.UnaryOp object at 0x7da18c4cd720> begin[:] variable[saltenv] assign[=] call[name[six].text_type, parameter[name[saltenv]]] for taget[name[fsb]] in starred[name[back]] begin[:] variable[fstr] assign[=] call[constant[{0}.find_file].format, parameter[name[fsb]]] if compare[name[fstr] in name[self].servers] begin[:] variable[fnd] assign[=] call[call[name[self].servers][name[fstr]], parameter[name[path], name[saltenv]]] if call[name[fnd].get, parameter[constant[path]]] begin[:] call[name[fnd]][constant[back]] assign[=] name[fsb] return[name[fnd]] return[name[fnd]]
keyword[def] identifier[find_file] ( identifier[self] , identifier[path] , identifier[saltenv] , identifier[back] = keyword[None] ): literal[string] identifier[path] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[path] ) identifier[saltenv] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[saltenv] ) identifier[back] = identifier[self] . identifier[backends] ( identifier[back] ) identifier[kwargs] ={} identifier[fnd] ={ literal[string] : literal[string] , literal[string] : literal[string] } keyword[if] identifier[os] . identifier[path] . identifier[isabs] ( identifier[path] ): keyword[return] identifier[fnd] keyword[if] literal[string] keyword[in] identifier[path] : keyword[return] identifier[fnd] keyword[if] identifier[salt] . identifier[utils] . identifier[url] . identifier[is_escaped] ( identifier[path] ): identifier[path] = identifier[salt] . identifier[utils] . identifier[url] . identifier[unescape] ( identifier[path] ) keyword[else] : keyword[if] literal[string] keyword[in] identifier[path] : identifier[hcomps] = identifier[path] . identifier[split] ( literal[string] ) identifier[path] = identifier[hcomps] [ literal[int] ] identifier[comps] = identifier[hcomps] [ literal[int] ]. identifier[split] ( literal[string] ) keyword[for] identifier[comp] keyword[in] identifier[comps] : keyword[if] literal[string] keyword[not] keyword[in] identifier[comp] : keyword[continue] identifier[args] = identifier[comp] . identifier[split] ( literal[string] , literal[int] ) identifier[kwargs] [ identifier[args] [ literal[int] ]]= identifier[args] [ literal[int] ] keyword[if] literal[string] keyword[in] identifier[kwargs] : identifier[kwargs] . identifier[pop] ( literal[string] ) keyword[if] literal[string] keyword[in] identifier[kwargs] : identifier[saltenv] = identifier[kwargs] . identifier[pop] ( literal[string] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[saltenv] , identifier[six] . identifier[string_types] ): identifier[saltenv] = identifier[six] . identifier[text_type] ( identifier[saltenv] ) keyword[for] identifier[fsb] keyword[in] identifier[back] : identifier[fstr] = literal[string] . identifier[format] ( identifier[fsb] ) keyword[if] identifier[fstr] keyword[in] identifier[self] . identifier[servers] : identifier[fnd] = identifier[self] . identifier[servers] [ identifier[fstr] ]( identifier[path] , identifier[saltenv] ,** identifier[kwargs] ) keyword[if] identifier[fnd] . identifier[get] ( literal[string] ): identifier[fnd] [ literal[string] ]= identifier[fsb] keyword[return] identifier[fnd] keyword[return] identifier[fnd]
def find_file(self, path, saltenv, back=None): """ Find the path and return the fnd structure, this structure is passed to other backend interfaces. """ path = salt.utils.stringutils.to_unicode(path) saltenv = salt.utils.stringutils.to_unicode(saltenv) back = self.backends(back) kwargs = {} fnd = {'path': '', 'rel': ''} if os.path.isabs(path): return fnd # depends on [control=['if'], data=[]] if '../' in path: return fnd # depends on [control=['if'], data=[]] if salt.utils.url.is_escaped(path): # don't attempt to find URL query arguments in the path path = salt.utils.url.unescape(path) # depends on [control=['if'], data=[]] elif '?' in path: hcomps = path.split('?') path = hcomps[0] comps = hcomps[1].split('&') for comp in comps: if '=' not in comp: # Invalid option, skip it continue # depends on [control=['if'], data=[]] args = comp.split('=', 1) kwargs[args[0]] = args[1] # depends on [control=['for'], data=['comp']] # depends on [control=['if'], data=['path']] if 'env' in kwargs: # "env" is not supported; Use "saltenv". kwargs.pop('env') # depends on [control=['if'], data=['kwargs']] if 'saltenv' in kwargs: saltenv = kwargs.pop('saltenv') # depends on [control=['if'], data=['kwargs']] if not isinstance(saltenv, six.string_types): saltenv = six.text_type(saltenv) # depends on [control=['if'], data=[]] for fsb in back: fstr = '{0}.find_file'.format(fsb) if fstr in self.servers: fnd = self.servers[fstr](path, saltenv, **kwargs) if fnd.get('path'): fnd['back'] = fsb return fnd # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['fstr']] # depends on [control=['for'], data=['fsb']] return fnd
async def send_and_receive(self, message, generate_identifier=True, timeout=5): """Send a message and wait for a response.""" await self._connect_and_encrypt() # Some messages will respond with the same identifier as used in the # corresponding request. Others will not and one example is the crypto # message (for pairing). They will never include an identifer, but it # it is in turn only possible to have one of those message outstanding # at one time (i.e. it's not possible to mix up the responses). In # those cases, a "fake" identifier is used that includes the message # type instead. if generate_identifier: identifier = str(uuid.uuid4()) message.identifier = identifier else: identifier = 'type_' + str(message.type) self.connection.send(message) return await self._receive(identifier, timeout)
<ast.AsyncFunctionDef object at 0x7da2054a5540>
keyword[async] keyword[def] identifier[send_and_receive] ( identifier[self] , identifier[message] , identifier[generate_identifier] = keyword[True] , identifier[timeout] = literal[int] ): literal[string] keyword[await] identifier[self] . identifier[_connect_and_encrypt] () keyword[if] identifier[generate_identifier] : identifier[identifier] = identifier[str] ( identifier[uuid] . identifier[uuid4] ()) identifier[message] . identifier[identifier] = identifier[identifier] keyword[else] : identifier[identifier] = literal[string] + identifier[str] ( identifier[message] . identifier[type] ) identifier[self] . identifier[connection] . identifier[send] ( identifier[message] ) keyword[return] keyword[await] identifier[self] . identifier[_receive] ( identifier[identifier] , identifier[timeout] )
async def send_and_receive(self, message, generate_identifier=True, timeout=5): """Send a message and wait for a response.""" await self._connect_and_encrypt() # Some messages will respond with the same identifier as used in the # corresponding request. Others will not and one example is the crypto # message (for pairing). They will never include an identifer, but it # it is in turn only possible to have one of those message outstanding # at one time (i.e. it's not possible to mix up the responses). In # those cases, a "fake" identifier is used that includes the message # type instead. if generate_identifier: identifier = str(uuid.uuid4()) message.identifier = identifier # depends on [control=['if'], data=[]] else: identifier = 'type_' + str(message.type) self.connection.send(message) return await self._receive(identifier, timeout)
def works(self, ids = None, query = None, filter = None, offset = None, limit = None, sample = None, sort = None, order = None, facet = None, select = None, cursor = None, cursor_max = 5000, **kwargs): ''' Search Crossref works :param ids: [Array] DOIs (digital object identifier) or other identifiers :param query: [String] A query string :param filter: [Hash] Filter options. See examples for usage. Accepts a dict, with filter names and their values. For repeating filter names pass in a list of the values to that filter name, e.g., `{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`. See https://github.com/CrossRef/rest-api-doc#filter-names for filter names and their descriptions and :func:`~habanero.Crossref.filter_names` and :func:`~habanero.Crossref.filter_details` :param offset: [Fixnum] Number of record to start at, from 1 to 10000 :param limit: [Fixnum] Number of results to return. Not relavant when searching with specific dois. Default: 20. Max: 1000 :param sample: [Fixnum] Number of random results to return. when you use the sample parameter, the limit and offset parameters are ignored. Max: 100 :param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort order will be by the relevance score. If no query is included, then the sort order will be by DOI update date. See sorting_ for possible values. :param order: [String] Sort order, one of 'asc' or 'desc' :param facet: [Boolean/String] Set to `true` to include facet results (default: false). Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`. See Facets_ for options. :param select: [String/list(Strings)] Crossref metadata records can be quite large. Sometimes you just want a few elements from the schema. You can "select" a subset of elements to return. This can make your API calls much more efficient. Not clear yet which fields are allowed here. :param cursor: [String] Cursor character string to do deep paging. Default is None. Pass in '*' to start deep paging. Any combination of query, filters and facets may be used with deep paging cursors. While rows may be specified along with cursor, offset and sample cannot be used. See https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md#deep-paging-with-cursors :param cursor_max: [Fixnum] Max records to retrieve. Only used when cursor param used. Because deep paging can result in continuous requests until all are retrieved, use this parameter to set a maximum number of records. Of course, if there are less records found than this value, you will get only those found. :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples and FieldQueries_) :return: A dict Usage:: from habanero import Crossref cr = Crossref() cr.works() cr.works(ids = '10.1371/journal.pone.0033693') dois = ['10.1371/journal.pone.0033693', ] cr.works(ids = dois) x = cr.works(query = "ecology") x['status'] x['message-type'] x['message-version'] x['message'] x['message']['total-results'] x['message']['items-per-page'] x['message']['query'] x['message']['items'] # Get full text links x = cr.works(filter = {'has_full_text': True}) x # Parse output to various data pieces x = cr.works(filter = {'has_full_text': True}) ## get doi for each item [ z['DOI'] for z in x['message']['items'] ] ## get doi and url for each item [ {"doi": z['DOI'], "url": z['URL']} for z in x['message']['items'] ] ### print every doi for i in x['message']['items']: print i['DOI'] # filters - pass in as a dict ## see https://github.com/CrossRef/rest-api-doc#filter-names cr.works(filter = {'has_full_text': True}) cr.works(filter = {'has_funder': True, 'has_full_text': True}) cr.works(filter = {'award_number': 'CBET-0756451', 'award_funder': '10.13039/100000001'}) ## to repeat a filter name, pass in a list x = cr.works(filter = {'award_funder': ['10.13039/100004440', '10.13039/100000861']}, limit = 100) map(lambda z:z['funder'][0]['DOI'], x['message']['items']) # Deep paging, using the cursor parameter ## this search should lead to only ~215 results cr.works(query = "widget", cursor = "*", cursor_max = 100) ## this search should lead to only ~2500 results, in chunks of 500 res = cr.works(query = "octopus", cursor = "*", limit = 500) sum([ len(z['message']['items']) for z in res ]) ## about 167 results res = cr.works(query = "extravagant", cursor = "*", limit = 50, cursor_max = 500) sum([ len(z['message']['items']) for z in res ]) ## cursor_max to get back only a maximum set of results res = cr.works(query = "widget", cursor = "*", cursor_max = 100) sum([ len(z['message']['items']) for z in res ]) ## cursor_max - especially useful when a request could be very large ### e.g., "ecology" results in ~275K records, lets max at 10,000 ### with 1000 at a time res = cr.works(query = "ecology", cursor = "*", cursor_max = 10000, limit = 1000) sum([ len(z['message']['items']) for z in res ]) items = [ z['message']['items'] for z in res ] items = [ item for sublist in items for item in sublist ] [ z['DOI'] for z in items ][0:50] # field queries res = cr.works(query = "ecology", query_author = 'carl boettiger') [ x['author'][0]['family'] for x in res['message']['items'] ] # select certain fields to return ## as a comma separated string cr.works(query = "ecology", select = "DOI,title") ## or as a list cr.works(query = "ecology", select = ["DOI","title"]) ''' if ids.__class__.__name__ != 'NoneType': return request(self.mailto, self.base_url, "/works/", ids, query, filter, offset, limit, sample, sort, order, facet, select, None, None, None, None, **kwargs) else: return Request(self.mailto, self.base_url, "/works/", query, filter, offset, limit, sample, sort, order, facet, select, cursor, cursor_max, None, **kwargs).do_request()
def function[works, parameter[self, ids, query, filter, offset, limit, sample, sort, order, facet, select, cursor, cursor_max]]: constant[ Search Crossref works :param ids: [Array] DOIs (digital object identifier) or other identifiers :param query: [String] A query string :param filter: [Hash] Filter options. See examples for usage. Accepts a dict, with filter names and their values. For repeating filter names pass in a list of the values to that filter name, e.g., `{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`. See https://github.com/CrossRef/rest-api-doc#filter-names for filter names and their descriptions and :func:`~habanero.Crossref.filter_names` and :func:`~habanero.Crossref.filter_details` :param offset: [Fixnum] Number of record to start at, from 1 to 10000 :param limit: [Fixnum] Number of results to return. Not relavant when searching with specific dois. Default: 20. Max: 1000 :param sample: [Fixnum] Number of random results to return. when you use the sample parameter, the limit and offset parameters are ignored. Max: 100 :param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort order will be by the relevance score. If no query is included, then the sort order will be by DOI update date. See sorting_ for possible values. :param order: [String] Sort order, one of 'asc' or 'desc' :param facet: [Boolean/String] Set to `true` to include facet results (default: false). Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`. See Facets_ for options. :param select: [String/list(Strings)] Crossref metadata records can be quite large. Sometimes you just want a few elements from the schema. You can "select" a subset of elements to return. This can make your API calls much more efficient. Not clear yet which fields are allowed here. :param cursor: [String] Cursor character string to do deep paging. Default is None. Pass in '*' to start deep paging. Any combination of query, filters and facets may be used with deep paging cursors. While rows may be specified along with cursor, offset and sample cannot be used. See https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md#deep-paging-with-cursors :param cursor_max: [Fixnum] Max records to retrieve. Only used when cursor param used. Because deep paging can result in continuous requests until all are retrieved, use this parameter to set a maximum number of records. Of course, if there are less records found than this value, you will get only those found. :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples and FieldQueries_) :return: A dict Usage:: from habanero import Crossref cr = Crossref() cr.works() cr.works(ids = '10.1371/journal.pone.0033693') dois = ['10.1371/journal.pone.0033693', ] cr.works(ids = dois) x = cr.works(query = "ecology") x['status'] x['message-type'] x['message-version'] x['message'] x['message']['total-results'] x['message']['items-per-page'] x['message']['query'] x['message']['items'] # Get full text links x = cr.works(filter = {'has_full_text': True}) x # Parse output to various data pieces x = cr.works(filter = {'has_full_text': True}) ## get doi for each item [ z['DOI'] for z in x['message']['items'] ] ## get doi and url for each item [ {"doi": z['DOI'], "url": z['URL']} for z in x['message']['items'] ] ### print every doi for i in x['message']['items']: print i['DOI'] # filters - pass in as a dict ## see https://github.com/CrossRef/rest-api-doc#filter-names cr.works(filter = {'has_full_text': True}) cr.works(filter = {'has_funder': True, 'has_full_text': True}) cr.works(filter = {'award_number': 'CBET-0756451', 'award_funder': '10.13039/100000001'}) ## to repeat a filter name, pass in a list x = cr.works(filter = {'award_funder': ['10.13039/100004440', '10.13039/100000861']}, limit = 100) map(lambda z:z['funder'][0]['DOI'], x['message']['items']) # Deep paging, using the cursor parameter ## this search should lead to only ~215 results cr.works(query = "widget", cursor = "*", cursor_max = 100) ## this search should lead to only ~2500 results, in chunks of 500 res = cr.works(query = "octopus", cursor = "*", limit = 500) sum([ len(z['message']['items']) for z in res ]) ## about 167 results res = cr.works(query = "extravagant", cursor = "*", limit = 50, cursor_max = 500) sum([ len(z['message']['items']) for z in res ]) ## cursor_max to get back only a maximum set of results res = cr.works(query = "widget", cursor = "*", cursor_max = 100) sum([ len(z['message']['items']) for z in res ]) ## cursor_max - especially useful when a request could be very large ### e.g., "ecology" results in ~275K records, lets max at 10,000 ### with 1000 at a time res = cr.works(query = "ecology", cursor = "*", cursor_max = 10000, limit = 1000) sum([ len(z['message']['items']) for z in res ]) items = [ z['message']['items'] for z in res ] items = [ item for sublist in items for item in sublist ] [ z['DOI'] for z in items ][0:50] # field queries res = cr.works(query = "ecology", query_author = 'carl boettiger') [ x['author'][0]['family'] for x in res['message']['items'] ] # select certain fields to return ## as a comma separated string cr.works(query = "ecology", select = "DOI,title") ## or as a list cr.works(query = "ecology", select = ["DOI","title"]) ] if compare[name[ids].__class__.__name__ not_equal[!=] constant[NoneType]] begin[:] return[call[name[request], parameter[name[self].mailto, name[self].base_url, constant[/works/], name[ids], name[query], name[filter], name[offset], name[limit], name[sample], name[sort], name[order], name[facet], name[select], constant[None], constant[None], constant[None], constant[None]]]]
keyword[def] identifier[works] ( identifier[self] , identifier[ids] = keyword[None] , identifier[query] = keyword[None] , identifier[filter] = keyword[None] , identifier[offset] = keyword[None] , identifier[limit] = keyword[None] , identifier[sample] = keyword[None] , identifier[sort] = keyword[None] , identifier[order] = keyword[None] , identifier[facet] = keyword[None] , identifier[select] = keyword[None] , identifier[cursor] = keyword[None] , identifier[cursor_max] = literal[int] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[ids] . identifier[__class__] . identifier[__name__] != literal[string] : keyword[return] identifier[request] ( identifier[self] . identifier[mailto] , identifier[self] . identifier[base_url] , literal[string] , identifier[ids] , identifier[query] , identifier[filter] , identifier[offset] , identifier[limit] , identifier[sample] , identifier[sort] , identifier[order] , identifier[facet] , identifier[select] , keyword[None] , keyword[None] , keyword[None] , keyword[None] ,** identifier[kwargs] ) keyword[else] : keyword[return] identifier[Request] ( identifier[self] . identifier[mailto] , identifier[self] . identifier[base_url] , literal[string] , identifier[query] , identifier[filter] , identifier[offset] , identifier[limit] , identifier[sample] , identifier[sort] , identifier[order] , identifier[facet] , identifier[select] , identifier[cursor] , identifier[cursor_max] , keyword[None] ,** identifier[kwargs] ). identifier[do_request] ()
def works(self, ids=None, query=None, filter=None, offset=None, limit=None, sample=None, sort=None, order=None, facet=None, select=None, cursor=None, cursor_max=5000, **kwargs): """ Search Crossref works :param ids: [Array] DOIs (digital object identifier) or other identifiers :param query: [String] A query string :param filter: [Hash] Filter options. See examples for usage. Accepts a dict, with filter names and their values. For repeating filter names pass in a list of the values to that filter name, e.g., `{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`. See https://github.com/CrossRef/rest-api-doc#filter-names for filter names and their descriptions and :func:`~habanero.Crossref.filter_names` and :func:`~habanero.Crossref.filter_details` :param offset: [Fixnum] Number of record to start at, from 1 to 10000 :param limit: [Fixnum] Number of results to return. Not relavant when searching with specific dois. Default: 20. Max: 1000 :param sample: [Fixnum] Number of random results to return. when you use the sample parameter, the limit and offset parameters are ignored. Max: 100 :param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort order will be by the relevance score. If no query is included, then the sort order will be by DOI update date. See sorting_ for possible values. :param order: [String] Sort order, one of 'asc' or 'desc' :param facet: [Boolean/String] Set to `true` to include facet results (default: false). Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`. See Facets_ for options. :param select: [String/list(Strings)] Crossref metadata records can be quite large. Sometimes you just want a few elements from the schema. You can "select" a subset of elements to return. This can make your API calls much more efficient. Not clear yet which fields are allowed here. :param cursor: [String] Cursor character string to do deep paging. Default is None. Pass in '*' to start deep paging. Any combination of query, filters and facets may be used with deep paging cursors. While rows may be specified along with cursor, offset and sample cannot be used. See https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md#deep-paging-with-cursors :param cursor_max: [Fixnum] Max records to retrieve. Only used when cursor param used. Because deep paging can result in continuous requests until all are retrieved, use this parameter to set a maximum number of records. Of course, if there are less records found than this value, you will get only those found. :param kwargs: additional named arguments passed on to `requests.get`, e.g., field queries (see examples and FieldQueries_) :return: A dict Usage:: from habanero import Crossref cr = Crossref() cr.works() cr.works(ids = '10.1371/journal.pone.0033693') dois = ['10.1371/journal.pone.0033693', ] cr.works(ids = dois) x = cr.works(query = "ecology") x['status'] x['message-type'] x['message-version'] x['message'] x['message']['total-results'] x['message']['items-per-page'] x['message']['query'] x['message']['items'] # Get full text links x = cr.works(filter = {'has_full_text': True}) x # Parse output to various data pieces x = cr.works(filter = {'has_full_text': True}) ## get doi for each item [ z['DOI'] for z in x['message']['items'] ] ## get doi and url for each item [ {"doi": z['DOI'], "url": z['URL']} for z in x['message']['items'] ] ### print every doi for i in x['message']['items']: print i['DOI'] # filters - pass in as a dict ## see https://github.com/CrossRef/rest-api-doc#filter-names cr.works(filter = {'has_full_text': True}) cr.works(filter = {'has_funder': True, 'has_full_text': True}) cr.works(filter = {'award_number': 'CBET-0756451', 'award_funder': '10.13039/100000001'}) ## to repeat a filter name, pass in a list x = cr.works(filter = {'award_funder': ['10.13039/100004440', '10.13039/100000861']}, limit = 100) map(lambda z:z['funder'][0]['DOI'], x['message']['items']) # Deep paging, using the cursor parameter ## this search should lead to only ~215 results cr.works(query = "widget", cursor = "*", cursor_max = 100) ## this search should lead to only ~2500 results, in chunks of 500 res = cr.works(query = "octopus", cursor = "*", limit = 500) sum([ len(z['message']['items']) for z in res ]) ## about 167 results res = cr.works(query = "extravagant", cursor = "*", limit = 50, cursor_max = 500) sum([ len(z['message']['items']) for z in res ]) ## cursor_max to get back only a maximum set of results res = cr.works(query = "widget", cursor = "*", cursor_max = 100) sum([ len(z['message']['items']) for z in res ]) ## cursor_max - especially useful when a request could be very large ### e.g., "ecology" results in ~275K records, lets max at 10,000 ### with 1000 at a time res = cr.works(query = "ecology", cursor = "*", cursor_max = 10000, limit = 1000) sum([ len(z['message']['items']) for z in res ]) items = [ z['message']['items'] for z in res ] items = [ item for sublist in items for item in sublist ] [ z['DOI'] for z in items ][0:50] # field queries res = cr.works(query = "ecology", query_author = 'carl boettiger') [ x['author'][0]['family'] for x in res['message']['items'] ] # select certain fields to return ## as a comma separated string cr.works(query = "ecology", select = "DOI,title") ## or as a list cr.works(query = "ecology", select = ["DOI","title"]) """ if ids.__class__.__name__ != 'NoneType': return request(self.mailto, self.base_url, '/works/', ids, query, filter, offset, limit, sample, sort, order, facet, select, None, None, None, None, **kwargs) # depends on [control=['if'], data=[]] else: return Request(self.mailto, self.base_url, '/works/', query, filter, offset, limit, sample, sort, order, facet, select, cursor, cursor_max, None, **kwargs).do_request()
def del_rules(cls, *names, attr=None): """Delete algebraic rules used by :meth:`create` Remove the rules with the given `names`, or all rules if no names are given Args: names (str): Names of rules to delete attr (None or str): Name of the class attribute from which to delete the rules. Cf. :meth:`add_rule`. Raises: KeyError: If any rules in `names` does not exist AttributeError: If invalid `attr` """ if attr is None: attr = cls._rules_attr() if len(names) == 0: getattr(cls, attr) # raise AttributeError if wrong attr setattr(cls, attr, OrderedDict()) else: for name in names: del getattr(cls, attr)[name]
def function[del_rules, parameter[cls]]: constant[Delete algebraic rules used by :meth:`create` Remove the rules with the given `names`, or all rules if no names are given Args: names (str): Names of rules to delete attr (None or str): Name of the class attribute from which to delete the rules. Cf. :meth:`add_rule`. Raises: KeyError: If any rules in `names` does not exist AttributeError: If invalid `attr` ] if compare[name[attr] is constant[None]] begin[:] variable[attr] assign[=] call[name[cls]._rules_attr, parameter[]] if compare[call[name[len], parameter[name[names]]] equal[==] constant[0]] begin[:] call[name[getattr], parameter[name[cls], name[attr]]] call[name[setattr], parameter[name[cls], name[attr], call[name[OrderedDict], parameter[]]]]
keyword[def] identifier[del_rules] ( identifier[cls] ,* identifier[names] , identifier[attr] = keyword[None] ): literal[string] keyword[if] identifier[attr] keyword[is] keyword[None] : identifier[attr] = identifier[cls] . identifier[_rules_attr] () keyword[if] identifier[len] ( identifier[names] )== literal[int] : identifier[getattr] ( identifier[cls] , identifier[attr] ) identifier[setattr] ( identifier[cls] , identifier[attr] , identifier[OrderedDict] ()) keyword[else] : keyword[for] identifier[name] keyword[in] identifier[names] : keyword[del] identifier[getattr] ( identifier[cls] , identifier[attr] )[ identifier[name] ]
def del_rules(cls, *names, attr=None): """Delete algebraic rules used by :meth:`create` Remove the rules with the given `names`, or all rules if no names are given Args: names (str): Names of rules to delete attr (None or str): Name of the class attribute from which to delete the rules. Cf. :meth:`add_rule`. Raises: KeyError: If any rules in `names` does not exist AttributeError: If invalid `attr` """ if attr is None: attr = cls._rules_attr() # depends on [control=['if'], data=['attr']] if len(names) == 0: getattr(cls, attr) # raise AttributeError if wrong attr setattr(cls, attr, OrderedDict()) # depends on [control=['if'], data=[]] else: for name in names: del getattr(cls, attr)[name] # depends on [control=['for'], data=['name']]
def dictfetchone(cursor: Cursor) -> Optional[Dict[str, Any]]: """ Return the next row from a cursor as an :class:`OrderedDict`, or ``None``. """ columns = get_fieldnames_from_cursor(cursor) row = cursor.fetchone() if not row: return None return OrderedDict(zip(columns, row))
def function[dictfetchone, parameter[cursor]]: constant[ Return the next row from a cursor as an :class:`OrderedDict`, or ``None``. ] variable[columns] assign[=] call[name[get_fieldnames_from_cursor], parameter[name[cursor]]] variable[row] assign[=] call[name[cursor].fetchone, parameter[]] if <ast.UnaryOp object at 0x7da1b172a2c0> begin[:] return[constant[None]] return[call[name[OrderedDict], parameter[call[name[zip], parameter[name[columns], name[row]]]]]]
keyword[def] identifier[dictfetchone] ( identifier[cursor] : identifier[Cursor] )-> identifier[Optional] [ identifier[Dict] [ identifier[str] , identifier[Any] ]]: literal[string] identifier[columns] = identifier[get_fieldnames_from_cursor] ( identifier[cursor] ) identifier[row] = identifier[cursor] . identifier[fetchone] () keyword[if] keyword[not] identifier[row] : keyword[return] keyword[None] keyword[return] identifier[OrderedDict] ( identifier[zip] ( identifier[columns] , identifier[row] ))
def dictfetchone(cursor: Cursor) -> Optional[Dict[str, Any]]: """ Return the next row from a cursor as an :class:`OrderedDict`, or ``None``. """ columns = get_fieldnames_from_cursor(cursor) row = cursor.fetchone() if not row: return None # depends on [control=['if'], data=[]] return OrderedDict(zip(columns, row))
def _prepare_row(task, full, summary): """return a dict with the task's info (more if "full" is set).""" # Would like to include the Job ID in the default set of columns, but # it is a long value and would leave little room for status and update time. row_spec = collections.namedtuple('row_spec', ['key', 'required', 'default_value']) # pyformat: disable default_columns = [ row_spec('job-name', True, None), row_spec('task-id', False, None), row_spec('last-update', True, None), row_spec('status-message', True, None) ] full_columns = default_columns + [ row_spec('job-id', True, None), row_spec('user-id', True, None), row_spec('status', True, None), row_spec('status-detail', True, None), row_spec('task-attempt', False, None), row_spec('create-time', True, None), row_spec('start-time', True, None), row_spec('end-time', True, None), row_spec('internal-id', True, None), row_spec('logging', True, None), row_spec('labels', True, {}), row_spec('envs', True, {}), row_spec('inputs', True, {}), row_spec('input-recursives', False, {}), row_spec('outputs', True, {}), row_spec('output-recursives', False, {}), row_spec('mounts', True, {}), row_spec('provider', True, None), row_spec('provider-attributes', True, {}), row_spec('events', True, []), row_spec('user-project', False, None), row_spec('dsub-version', False, None), row_spec('script-name', False, None), row_spec('script', False, None), ] summary_columns = default_columns + [ row_spec('job-id', True, None), row_spec('user-id', True, None), row_spec('status', True, None), ] # pyformat: enable assert not (full and summary), 'Full and summary cannot both be enabled' if full: columns = full_columns elif summary: columns = summary_columns else: columns = default_columns row = {} for col in columns: key, required, default = col value = task.get_field(key, default) if required or value is not None: row[key] = value return row
def function[_prepare_row, parameter[task, full, summary]]: constant[return a dict with the task's info (more if "full" is set).] variable[row_spec] assign[=] call[name[collections].namedtuple, parameter[constant[row_spec], list[[<ast.Constant object at 0x7da1b0127070>, <ast.Constant object at 0x7da1b0127e80>, <ast.Constant object at 0x7da1b01247c0>]]]] variable[default_columns] assign[=] list[[<ast.Call object at 0x7da1b0124250>, <ast.Call object at 0x7da1b0125630>, <ast.Call object at 0x7da1b0124490>, <ast.Call object at 0x7da1b0124400>]] variable[full_columns] assign[=] binary_operation[name[default_columns] + list[[<ast.Call object at 0x7da1b0126800>, <ast.Call object at 0x7da1b0124f70>, <ast.Call object at 0x7da1b0127730>, <ast.Call object at 0x7da1b0125fc0>, <ast.Call object at 0x7da1b013c2e0>, <ast.Call object at 0x7da1b013c7f0>, <ast.Call object at 0x7da1b013e140>, <ast.Call object at 0x7da1b013ea70>, <ast.Call object at 0x7da1b013efb0>, <ast.Call object at 0x7da1b013d030>, <ast.Call object at 0x7da1b013fa90>, <ast.Call object at 0x7da1b013e9e0>, <ast.Call object at 0x7da1b013dc00>, <ast.Call object at 0x7da1b013c070>, <ast.Call object at 0x7da1b013cbb0>, <ast.Call object at 0x7da1b013f9d0>, <ast.Call object at 0x7da1b013ca30>, <ast.Call object at 0x7da1b013c5b0>, <ast.Call object at 0x7da1b013d5a0>, <ast.Call object at 0x7da1b013de10>, <ast.Call object at 0x7da1b013c0d0>, <ast.Call object at 0x7da1b013c700>, <ast.Call object at 0x7da1b013f550>, <ast.Call object at 0x7da1b013f160>]]] variable[summary_columns] assign[=] binary_operation[name[default_columns] + list[[<ast.Call object at 0x7da1b013fb80>, <ast.Call object at 0x7da18f09cfd0>, <ast.Call object at 0x7da18f09d810>]]] assert[<ast.UnaryOp object at 0x7da18f09ffa0>] if name[full] begin[:] variable[columns] assign[=] name[full_columns] variable[row] assign[=] dictionary[[], []] for taget[name[col]] in starred[name[columns]] begin[:] <ast.Tuple object at 0x7da18f09d6f0> assign[=] name[col] variable[value] assign[=] call[name[task].get_field, parameter[name[key], name[default]]] if <ast.BoolOp object at 0x7da18f09e2f0> begin[:] call[name[row]][name[key]] assign[=] name[value] return[name[row]]
keyword[def] identifier[_prepare_row] ( identifier[task] , identifier[full] , identifier[summary] ): literal[string] identifier[row_spec] = identifier[collections] . identifier[namedtuple] ( literal[string] , [ literal[string] , literal[string] , literal[string] ]) identifier[default_columns] =[ identifier[row_spec] ( literal[string] , keyword[True] , keyword[None] ), identifier[row_spec] ( literal[string] , keyword[False] , keyword[None] ), identifier[row_spec] ( literal[string] , keyword[True] , keyword[None] ), identifier[row_spec] ( literal[string] , keyword[True] , keyword[None] ) ] identifier[full_columns] = identifier[default_columns] +[ identifier[row_spec] ( literal[string] , keyword[True] , keyword[None] ), identifier[row_spec] ( literal[string] , keyword[True] , keyword[None] ), identifier[row_spec] ( literal[string] , keyword[True] , keyword[None] ), identifier[row_spec] ( literal[string] , keyword[True] , keyword[None] ), identifier[row_spec] ( literal[string] , keyword[False] , keyword[None] ), identifier[row_spec] ( literal[string] , keyword[True] , keyword[None] ), identifier[row_spec] ( literal[string] , keyword[True] , keyword[None] ), identifier[row_spec] ( literal[string] , keyword[True] , keyword[None] ), identifier[row_spec] ( literal[string] , keyword[True] , keyword[None] ), identifier[row_spec] ( literal[string] , keyword[True] , keyword[None] ), identifier[row_spec] ( literal[string] , keyword[True] ,{}), identifier[row_spec] ( literal[string] , keyword[True] ,{}), identifier[row_spec] ( literal[string] , keyword[True] ,{}), identifier[row_spec] ( literal[string] , keyword[False] ,{}), identifier[row_spec] ( literal[string] , keyword[True] ,{}), identifier[row_spec] ( literal[string] , keyword[False] ,{}), identifier[row_spec] ( literal[string] , keyword[True] ,{}), identifier[row_spec] ( literal[string] , keyword[True] , keyword[None] ), identifier[row_spec] ( literal[string] , keyword[True] ,{}), identifier[row_spec] ( literal[string] , keyword[True] ,[]), identifier[row_spec] ( literal[string] , keyword[False] , keyword[None] ), identifier[row_spec] ( literal[string] , keyword[False] , keyword[None] ), identifier[row_spec] ( literal[string] , keyword[False] , keyword[None] ), identifier[row_spec] ( literal[string] , keyword[False] , keyword[None] ), ] identifier[summary_columns] = identifier[default_columns] +[ identifier[row_spec] ( literal[string] , keyword[True] , keyword[None] ), identifier[row_spec] ( literal[string] , keyword[True] , keyword[None] ), identifier[row_spec] ( literal[string] , keyword[True] , keyword[None] ), ] keyword[assert] keyword[not] ( identifier[full] keyword[and] identifier[summary] ), literal[string] keyword[if] identifier[full] : identifier[columns] = identifier[full_columns] keyword[elif] identifier[summary] : identifier[columns] = identifier[summary_columns] keyword[else] : identifier[columns] = identifier[default_columns] identifier[row] ={} keyword[for] identifier[col] keyword[in] identifier[columns] : identifier[key] , identifier[required] , identifier[default] = identifier[col] identifier[value] = identifier[task] . identifier[get_field] ( identifier[key] , identifier[default] ) keyword[if] identifier[required] keyword[or] identifier[value] keyword[is] keyword[not] keyword[None] : identifier[row] [ identifier[key] ]= identifier[value] keyword[return] identifier[row]
def _prepare_row(task, full, summary): """return a dict with the task's info (more if "full" is set).""" # Would like to include the Job ID in the default set of columns, but # it is a long value and would leave little room for status and update time. row_spec = collections.namedtuple('row_spec', ['key', 'required', 'default_value']) # pyformat: disable default_columns = [row_spec('job-name', True, None), row_spec('task-id', False, None), row_spec('last-update', True, None), row_spec('status-message', True, None)] full_columns = default_columns + [row_spec('job-id', True, None), row_spec('user-id', True, None), row_spec('status', True, None), row_spec('status-detail', True, None), row_spec('task-attempt', False, None), row_spec('create-time', True, None), row_spec('start-time', True, None), row_spec('end-time', True, None), row_spec('internal-id', True, None), row_spec('logging', True, None), row_spec('labels', True, {}), row_spec('envs', True, {}), row_spec('inputs', True, {}), row_spec('input-recursives', False, {}), row_spec('outputs', True, {}), row_spec('output-recursives', False, {}), row_spec('mounts', True, {}), row_spec('provider', True, None), row_spec('provider-attributes', True, {}), row_spec('events', True, []), row_spec('user-project', False, None), row_spec('dsub-version', False, None), row_spec('script-name', False, None), row_spec('script', False, None)] summary_columns = default_columns + [row_spec('job-id', True, None), row_spec('user-id', True, None), row_spec('status', True, None)] # pyformat: enable assert not (full and summary), 'Full and summary cannot both be enabled' if full: columns = full_columns # depends on [control=['if'], data=[]] elif summary: columns = summary_columns # depends on [control=['if'], data=[]] else: columns = default_columns row = {} for col in columns: (key, required, default) = col value = task.get_field(key, default) if required or value is not None: row[key] = value # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['col']] return row
def ensure_state(default_getter, exc_class, default_msg=None): """Create a decorator factory function.""" def decorator(getter=default_getter, msg=default_msg): def ensure_decorator(f): @wraps(f) def inner(self, *args, **kwargs): if not getter(self): raise exc_class(msg) if msg else exc_class() return f(self, *args, **kwargs) return inner return ensure_decorator return decorator
def function[ensure_state, parameter[default_getter, exc_class, default_msg]]: constant[Create a decorator factory function.] def function[decorator, parameter[getter, msg]]: def function[ensure_decorator, parameter[f]]: def function[inner, parameter[self]]: if <ast.UnaryOp object at 0x7da1b19b5030> begin[:] <ast.Raise object at 0x7da1b19b4e80> return[call[name[f], parameter[name[self], <ast.Starred object at 0x7da1b19b5240>]]] return[name[inner]] return[name[ensure_decorator]] return[name[decorator]]
keyword[def] identifier[ensure_state] ( identifier[default_getter] , identifier[exc_class] , identifier[default_msg] = keyword[None] ): literal[string] keyword[def] identifier[decorator] ( identifier[getter] = identifier[default_getter] , identifier[msg] = identifier[default_msg] ): keyword[def] identifier[ensure_decorator] ( identifier[f] ): @ identifier[wraps] ( identifier[f] ) keyword[def] identifier[inner] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): keyword[if] keyword[not] identifier[getter] ( identifier[self] ): keyword[raise] identifier[exc_class] ( identifier[msg] ) keyword[if] identifier[msg] keyword[else] identifier[exc_class] () keyword[return] identifier[f] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[inner] keyword[return] identifier[ensure_decorator] keyword[return] identifier[decorator]
def ensure_state(default_getter, exc_class, default_msg=None): """Create a decorator factory function.""" def decorator(getter=default_getter, msg=default_msg): def ensure_decorator(f): @wraps(f) def inner(self, *args, **kwargs): if not getter(self): raise exc_class(msg) if msg else exc_class() # depends on [control=['if'], data=[]] return f(self, *args, **kwargs) return inner return ensure_decorator return decorator
def is_installed_extension(name, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Test if a specific extension is installed CLI Example: .. code-block:: bash salt '*' postgres.is_installed_extension ''' installed_ext = get_installed_extension( name, user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) return bool(installed_ext)
def function[is_installed_extension, parameter[name, user, host, port, maintenance_db, password, runas]]: constant[ Test if a specific extension is installed CLI Example: .. code-block:: bash salt '*' postgres.is_installed_extension ] variable[installed_ext] assign[=] call[name[get_installed_extension], parameter[name[name]]] return[call[name[bool], parameter[name[installed_ext]]]]
keyword[def] identifier[is_installed_extension] ( identifier[name] , identifier[user] = keyword[None] , identifier[host] = keyword[None] , identifier[port] = keyword[None] , identifier[maintenance_db] = keyword[None] , identifier[password] = keyword[None] , identifier[runas] = keyword[None] ): literal[string] identifier[installed_ext] = identifier[get_installed_extension] ( identifier[name] , identifier[user] = identifier[user] , identifier[host] = identifier[host] , identifier[port] = identifier[port] , identifier[maintenance_db] = identifier[maintenance_db] , identifier[password] = identifier[password] , identifier[runas] = identifier[runas] ) keyword[return] identifier[bool] ( identifier[installed_ext] )
def is_installed_extension(name, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): """ Test if a specific extension is installed CLI Example: .. code-block:: bash salt '*' postgres.is_installed_extension """ installed_ext = get_installed_extension(name, user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) return bool(installed_ext)
def remove_not_allowed_in_console(): '''This function should be called from the console, when it starts. Some transformers are not allowed in the console and they could have been loaded prior to the console being activated. We effectively remove them and print an information message specific to that transformer as written in the transformer module. ''' not_allowed_in_console = [] if CONSOLE_ACTIVE: for name in transformers: tr_module = import_transformer(name) if hasattr(tr_module, "NO_CONSOLE"): not_allowed_in_console.append((name, tr_module)) for name, tr_module in not_allowed_in_console: print(tr_module.NO_CONSOLE) # Note: we do not remove them, so as to avoid seeing the # information message displayed again if an attempt is # made to re-import them from a console instruction. transformers[name] = NullTransformer()
def function[remove_not_allowed_in_console, parameter[]]: constant[This function should be called from the console, when it starts. Some transformers are not allowed in the console and they could have been loaded prior to the console being activated. We effectively remove them and print an information message specific to that transformer as written in the transformer module. ] variable[not_allowed_in_console] assign[=] list[[]] if name[CONSOLE_ACTIVE] begin[:] for taget[name[name]] in starred[name[transformers]] begin[:] variable[tr_module] assign[=] call[name[import_transformer], parameter[name[name]]] if call[name[hasattr], parameter[name[tr_module], constant[NO_CONSOLE]]] begin[:] call[name[not_allowed_in_console].append, parameter[tuple[[<ast.Name object at 0x7da1b1253b80>, <ast.Name object at 0x7da1b1253ac0>]]]] for taget[tuple[[<ast.Name object at 0x7da1b1253ee0>, <ast.Name object at 0x7da1b1253af0>]]] in starred[name[not_allowed_in_console]] begin[:] call[name[print], parameter[name[tr_module].NO_CONSOLE]] call[name[transformers]][name[name]] assign[=] call[name[NullTransformer], parameter[]]
keyword[def] identifier[remove_not_allowed_in_console] (): literal[string] identifier[not_allowed_in_console] =[] keyword[if] identifier[CONSOLE_ACTIVE] : keyword[for] identifier[name] keyword[in] identifier[transformers] : identifier[tr_module] = identifier[import_transformer] ( identifier[name] ) keyword[if] identifier[hasattr] ( identifier[tr_module] , literal[string] ): identifier[not_allowed_in_console] . identifier[append] (( identifier[name] , identifier[tr_module] )) keyword[for] identifier[name] , identifier[tr_module] keyword[in] identifier[not_allowed_in_console] : identifier[print] ( identifier[tr_module] . identifier[NO_CONSOLE] ) identifier[transformers] [ identifier[name] ]= identifier[NullTransformer] ()
def remove_not_allowed_in_console(): """This function should be called from the console, when it starts. Some transformers are not allowed in the console and they could have been loaded prior to the console being activated. We effectively remove them and print an information message specific to that transformer as written in the transformer module. """ not_allowed_in_console = [] if CONSOLE_ACTIVE: for name in transformers: tr_module = import_transformer(name) if hasattr(tr_module, 'NO_CONSOLE'): not_allowed_in_console.append((name, tr_module)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']] for (name, tr_module) in not_allowed_in_console: print(tr_module.NO_CONSOLE) # Note: we do not remove them, so as to avoid seeing the # information message displayed again if an attempt is # made to re-import them from a console instruction. transformers[name] = NullTransformer() # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
def get_options(server): """Retrieve the available HTTP verbs""" try: response = requests.options( server, allow_redirects=False, verify=False, timeout=5) except (requests.exceptions.ConnectionError, requests.exceptions.MissingSchema): return "Server {} is not available!".format(server) try: return {'allowed': response.headers['Allow']} except KeyError: return "Unable to get HTTP methods"
def function[get_options, parameter[server]]: constant[Retrieve the available HTTP verbs] <ast.Try object at 0x7da1b2261c90> <ast.Try object at 0x7da1b2263d30>
keyword[def] identifier[get_options] ( identifier[server] ): literal[string] keyword[try] : identifier[response] = identifier[requests] . identifier[options] ( identifier[server] , identifier[allow_redirects] = keyword[False] , identifier[verify] = keyword[False] , identifier[timeout] = literal[int] ) keyword[except] ( identifier[requests] . identifier[exceptions] . identifier[ConnectionError] , identifier[requests] . identifier[exceptions] . identifier[MissingSchema] ): keyword[return] literal[string] . identifier[format] ( identifier[server] ) keyword[try] : keyword[return] { literal[string] : identifier[response] . identifier[headers] [ literal[string] ]} keyword[except] identifier[KeyError] : keyword[return] literal[string]
def get_options(server): """Retrieve the available HTTP verbs""" try: response = requests.options(server, allow_redirects=False, verify=False, timeout=5) # depends on [control=['try'], data=[]] except (requests.exceptions.ConnectionError, requests.exceptions.MissingSchema): return 'Server {} is not available!'.format(server) # depends on [control=['except'], data=[]] try: return {'allowed': response.headers['Allow']} # depends on [control=['try'], data=[]] except KeyError: return 'Unable to get HTTP methods' # depends on [control=['except'], data=[]]
def _Rforce(self,R,phi=0.,t=0.): """ NAME: _Rforce PURPOSE: evaluate the radial force for this potential INPUT: R - Galactocentric cylindrical radius phi - azimuth t - time OUTPUT: the radial force HISTORY: 2010-11-24 - Written - Bovy (NYU) """ return self._A*math.exp(-(t-self._to)**2./2./self._sigma2)\ /R*math.sin(self._alpha*math.log(R) -self._m*(phi-self._omegas*t-self._gamma))
def function[_Rforce, parameter[self, R, phi, t]]: constant[ NAME: _Rforce PURPOSE: evaluate the radial force for this potential INPUT: R - Galactocentric cylindrical radius phi - azimuth t - time OUTPUT: the radial force HISTORY: 2010-11-24 - Written - Bovy (NYU) ] return[binary_operation[binary_operation[binary_operation[name[self]._A * call[name[math].exp, parameter[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b0e88490> / constant[2.0]] / name[self]._sigma2]]]] / name[R]] * call[name[math].sin, parameter[binary_operation[binary_operation[name[self]._alpha * call[name[math].log, parameter[name[R]]]] - binary_operation[name[self]._m * binary_operation[binary_operation[name[phi] - binary_operation[name[self]._omegas * name[t]]] - name[self]._gamma]]]]]]]
keyword[def] identifier[_Rforce] ( identifier[self] , identifier[R] , identifier[phi] = literal[int] , identifier[t] = literal[int] ): literal[string] keyword[return] identifier[self] . identifier[_A] * identifier[math] . identifier[exp] (-( identifier[t] - identifier[self] . identifier[_to] )** literal[int] / literal[int] / identifier[self] . identifier[_sigma2] )/ identifier[R] * identifier[math] . identifier[sin] ( identifier[self] . identifier[_alpha] * identifier[math] . identifier[log] ( identifier[R] ) - identifier[self] . identifier[_m] *( identifier[phi] - identifier[self] . identifier[_omegas] * identifier[t] - identifier[self] . identifier[_gamma] ))
def _Rforce(self, R, phi=0.0, t=0.0): """ NAME: _Rforce PURPOSE: evaluate the radial force for this potential INPUT: R - Galactocentric cylindrical radius phi - azimuth t - time OUTPUT: the radial force HISTORY: 2010-11-24 - Written - Bovy (NYU) """ return self._A * math.exp(-(t - self._to) ** 2.0 / 2.0 / self._sigma2) / R * math.sin(self._alpha * math.log(R) - self._m * (phi - self._omegas * t - self._gamma))
def _process_response(self, response): """Parse response""" forward_raw = False content_type = response.headers['Content-Type'] if content_type != 'application/json': logger.debug("headers: %s", response.headers) # API BUG: text/xml content-type with json payload # http://forum.mediafiredev.com/showthread.php?136 if content_type == 'text/xml': # we never request xml, so check it quacks like JSON if not response.text.lstrip().startswith('{'): forward_raw = True else: # _process_response can't deal with non-json, # return response as is forward_raw = True if forward_raw: response.raise_for_status() return response logger.debug("response: %s", response.text) # if we are here, then most likely have json try: response_node = response.json()['response'] except ValueError: # promised JSON but failed raise MediaFireApiError("JSON decode failure") if response_node.get('new_key', 'no') == 'yes': self._regenerate_secret_key() # check for errors if response_node['result'] != 'Success': raise MediaFireApiError(response_node['message'], response_node['error']) return response_node
def function[_process_response, parameter[self, response]]: constant[Parse response] variable[forward_raw] assign[=] constant[False] variable[content_type] assign[=] call[name[response].headers][constant[Content-Type]] if compare[name[content_type] not_equal[!=] constant[application/json]] begin[:] call[name[logger].debug, parameter[constant[headers: %s], name[response].headers]] if compare[name[content_type] equal[==] constant[text/xml]] begin[:] if <ast.UnaryOp object at 0x7da1b0e91600> begin[:] variable[forward_raw] assign[=] constant[True] if name[forward_raw] begin[:] call[name[response].raise_for_status, parameter[]] return[name[response]] call[name[logger].debug, parameter[constant[response: %s], name[response].text]] <ast.Try object at 0x7da1b0e4ec80> if compare[call[name[response_node].get, parameter[constant[new_key], constant[no]]] equal[==] constant[yes]] begin[:] call[name[self]._regenerate_secret_key, parameter[]] if compare[call[name[response_node]][constant[result]] not_equal[!=] constant[Success]] begin[:] <ast.Raise object at 0x7da1b0e4c640> return[name[response_node]]
keyword[def] identifier[_process_response] ( identifier[self] , identifier[response] ): literal[string] identifier[forward_raw] = keyword[False] identifier[content_type] = identifier[response] . identifier[headers] [ literal[string] ] keyword[if] identifier[content_type] != literal[string] : identifier[logger] . identifier[debug] ( literal[string] , identifier[response] . identifier[headers] ) keyword[if] identifier[content_type] == literal[string] : keyword[if] keyword[not] identifier[response] . identifier[text] . identifier[lstrip] (). identifier[startswith] ( literal[string] ): identifier[forward_raw] = keyword[True] keyword[else] : identifier[forward_raw] = keyword[True] keyword[if] identifier[forward_raw] : identifier[response] . identifier[raise_for_status] () keyword[return] identifier[response] identifier[logger] . identifier[debug] ( literal[string] , identifier[response] . identifier[text] ) keyword[try] : identifier[response_node] = identifier[response] . identifier[json] ()[ literal[string] ] keyword[except] identifier[ValueError] : keyword[raise] identifier[MediaFireApiError] ( literal[string] ) keyword[if] identifier[response_node] . identifier[get] ( literal[string] , literal[string] )== literal[string] : identifier[self] . identifier[_regenerate_secret_key] () keyword[if] identifier[response_node] [ literal[string] ]!= literal[string] : keyword[raise] identifier[MediaFireApiError] ( identifier[response_node] [ literal[string] ], identifier[response_node] [ literal[string] ]) keyword[return] identifier[response_node]
def _process_response(self, response): """Parse response""" forward_raw = False content_type = response.headers['Content-Type'] if content_type != 'application/json': logger.debug('headers: %s', response.headers) # API BUG: text/xml content-type with json payload # http://forum.mediafiredev.com/showthread.php?136 if content_type == 'text/xml': # we never request xml, so check it quacks like JSON if not response.text.lstrip().startswith('{'): forward_raw = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: # _process_response can't deal with non-json, # return response as is forward_raw = True # depends on [control=['if'], data=['content_type']] if forward_raw: response.raise_for_status() return response # depends on [control=['if'], data=[]] logger.debug('response: %s', response.text) # if we are here, then most likely have json try: response_node = response.json()['response'] # depends on [control=['try'], data=[]] except ValueError: # promised JSON but failed raise MediaFireApiError('JSON decode failure') # depends on [control=['except'], data=[]] if response_node.get('new_key', 'no') == 'yes': self._regenerate_secret_key() # depends on [control=['if'], data=[]] # check for errors if response_node['result'] != 'Success': raise MediaFireApiError(response_node['message'], response_node['error']) # depends on [control=['if'], data=[]] return response_node
def reset_tree(self): """ Resets the current tree to empty. """ self.tree = {} self.tree['leaves'] = [] self.tree['levels'] = [] self.tree['is_ready'] = False
def function[reset_tree, parameter[self]]: constant[ Resets the current tree to empty. ] name[self].tree assign[=] dictionary[[], []] call[name[self].tree][constant[leaves]] assign[=] list[[]] call[name[self].tree][constant[levels]] assign[=] list[[]] call[name[self].tree][constant[is_ready]] assign[=] constant[False]
keyword[def] identifier[reset_tree] ( identifier[self] ): literal[string] identifier[self] . identifier[tree] ={} identifier[self] . identifier[tree] [ literal[string] ]=[] identifier[self] . identifier[tree] [ literal[string] ]=[] identifier[self] . identifier[tree] [ literal[string] ]= keyword[False]
def reset_tree(self): """ Resets the current tree to empty. """ self.tree = {} self.tree['leaves'] = [] self.tree['levels'] = [] self.tree['is_ready'] = False
def new_annot(self): """Action: create a new file for annotations.""" if self.parent.info.filename is None: msg = 'No dataset loaded' self.parent.statusBar().showMessage(msg) lg.debug(msg) return filename = splitext(self.parent.info.filename)[0] + '_scores.xml' filename, _ = QFileDialog.getSaveFileName(self, 'Create annotation file', filename, 'Annotation File (*.xml)') if filename == '': return self.update_notes(filename, True)
def function[new_annot, parameter[self]]: constant[Action: create a new file for annotations.] if compare[name[self].parent.info.filename is constant[None]] begin[:] variable[msg] assign[=] constant[No dataset loaded] call[call[name[self].parent.statusBar, parameter[]].showMessage, parameter[name[msg]]] call[name[lg].debug, parameter[name[msg]]] return[None] variable[filename] assign[=] binary_operation[call[call[name[splitext], parameter[name[self].parent.info.filename]]][constant[0]] + constant[_scores.xml]] <ast.Tuple object at 0x7da1b0ec39a0> assign[=] call[name[QFileDialog].getSaveFileName, parameter[name[self], constant[Create annotation file], name[filename], constant[Annotation File (*.xml)]]] if compare[name[filename] equal[==] constant[]] begin[:] return[None] call[name[self].update_notes, parameter[name[filename], constant[True]]]
keyword[def] identifier[new_annot] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[parent] . identifier[info] . identifier[filename] keyword[is] keyword[None] : identifier[msg] = literal[string] identifier[self] . identifier[parent] . identifier[statusBar] (). identifier[showMessage] ( identifier[msg] ) identifier[lg] . identifier[debug] ( identifier[msg] ) keyword[return] identifier[filename] = identifier[splitext] ( identifier[self] . identifier[parent] . identifier[info] . identifier[filename] )[ literal[int] ]+ literal[string] identifier[filename] , identifier[_] = identifier[QFileDialog] . identifier[getSaveFileName] ( identifier[self] , literal[string] , identifier[filename] , literal[string] ) keyword[if] identifier[filename] == literal[string] : keyword[return] identifier[self] . identifier[update_notes] ( identifier[filename] , keyword[True] )
def new_annot(self): """Action: create a new file for annotations.""" if self.parent.info.filename is None: msg = 'No dataset loaded' self.parent.statusBar().showMessage(msg) lg.debug(msg) return # depends on [control=['if'], data=[]] filename = splitext(self.parent.info.filename)[0] + '_scores.xml' (filename, _) = QFileDialog.getSaveFileName(self, 'Create annotation file', filename, 'Annotation File (*.xml)') if filename == '': return # depends on [control=['if'], data=[]] self.update_notes(filename, True)
def list_default_storage_policy_of_datastore(datastore, service_instance=None): ''' Returns a list of datastores assign the the storage policies. datastore Name of the datastore to assign. The datastore needs to be visible to the VMware entity the proxy points to. service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.list_default_storage_policy_of_datastore datastore=ds1 ''' log.trace('Listing the default storage policy of datastore \'%s\'', datastore) # Find datastore target_ref = _get_proxy_target(service_instance) ds_refs = salt.utils.vmware.get_datastores(service_instance, target_ref, datastore_names=[datastore]) if not ds_refs: raise VMwareObjectRetrievalError('Datastore \'{0}\' was not ' 'found'.format(datastore)) profile_manager = salt.utils.pbm.get_profile_manager(service_instance) policy = salt.utils.pbm.get_default_storage_policy_of_datastore( profile_manager, ds_refs[0]) return _get_policy_dict(policy)
def function[list_default_storage_policy_of_datastore, parameter[datastore, service_instance]]: constant[ Returns a list of datastores assign the the storage policies. datastore Name of the datastore to assign. The datastore needs to be visible to the VMware entity the proxy points to. service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.list_default_storage_policy_of_datastore datastore=ds1 ] call[name[log].trace, parameter[constant[Listing the default storage policy of datastore '%s'], name[datastore]]] variable[target_ref] assign[=] call[name[_get_proxy_target], parameter[name[service_instance]]] variable[ds_refs] assign[=] call[name[salt].utils.vmware.get_datastores, parameter[name[service_instance], name[target_ref]]] if <ast.UnaryOp object at 0x7da1b21852a0> begin[:] <ast.Raise object at 0x7da1b2186230> variable[profile_manager] assign[=] call[name[salt].utils.pbm.get_profile_manager, parameter[name[service_instance]]] variable[policy] assign[=] call[name[salt].utils.pbm.get_default_storage_policy_of_datastore, parameter[name[profile_manager], call[name[ds_refs]][constant[0]]]] return[call[name[_get_policy_dict], parameter[name[policy]]]]
keyword[def] identifier[list_default_storage_policy_of_datastore] ( identifier[datastore] , identifier[service_instance] = keyword[None] ): literal[string] identifier[log] . identifier[trace] ( literal[string] , identifier[datastore] ) identifier[target_ref] = identifier[_get_proxy_target] ( identifier[service_instance] ) identifier[ds_refs] = identifier[salt] . identifier[utils] . identifier[vmware] . identifier[get_datastores] ( identifier[service_instance] , identifier[target_ref] , identifier[datastore_names] =[ identifier[datastore] ]) keyword[if] keyword[not] identifier[ds_refs] : keyword[raise] identifier[VMwareObjectRetrievalError] ( literal[string] literal[string] . identifier[format] ( identifier[datastore] )) identifier[profile_manager] = identifier[salt] . identifier[utils] . identifier[pbm] . identifier[get_profile_manager] ( identifier[service_instance] ) identifier[policy] = identifier[salt] . identifier[utils] . identifier[pbm] . identifier[get_default_storage_policy_of_datastore] ( identifier[profile_manager] , identifier[ds_refs] [ literal[int] ]) keyword[return] identifier[_get_policy_dict] ( identifier[policy] )
def list_default_storage_policy_of_datastore(datastore, service_instance=None): """ Returns a list of datastores assign the the storage policies. datastore Name of the datastore to assign. The datastore needs to be visible to the VMware entity the proxy points to. service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.list_default_storage_policy_of_datastore datastore=ds1 """ log.trace("Listing the default storage policy of datastore '%s'", datastore) # Find datastore target_ref = _get_proxy_target(service_instance) ds_refs = salt.utils.vmware.get_datastores(service_instance, target_ref, datastore_names=[datastore]) if not ds_refs: raise VMwareObjectRetrievalError("Datastore '{0}' was not found".format(datastore)) # depends on [control=['if'], data=[]] profile_manager = salt.utils.pbm.get_profile_manager(service_instance) policy = salt.utils.pbm.get_default_storage_policy_of_datastore(profile_manager, ds_refs[0]) return _get_policy_dict(policy)
def _cleanSessions(self): """ Clean expired sesisons. """ tooOld = extime.Time() - timedelta(seconds=PERSISTENT_SESSION_LIFETIME) self.store.query( PersistentSession, PersistentSession.lastUsed < tooOld).deleteFromStore() self._lastClean = self._clock.seconds()
def function[_cleanSessions, parameter[self]]: constant[ Clean expired sesisons. ] variable[tooOld] assign[=] binary_operation[call[name[extime].Time, parameter[]] - call[name[timedelta], parameter[]]] call[call[name[self].store.query, parameter[name[PersistentSession], compare[name[PersistentSession].lastUsed less[<] name[tooOld]]]].deleteFromStore, parameter[]] name[self]._lastClean assign[=] call[name[self]._clock.seconds, parameter[]]
keyword[def] identifier[_cleanSessions] ( identifier[self] ): literal[string] identifier[tooOld] = identifier[extime] . identifier[Time] ()- identifier[timedelta] ( identifier[seconds] = identifier[PERSISTENT_SESSION_LIFETIME] ) identifier[self] . identifier[store] . identifier[query] ( identifier[PersistentSession] , identifier[PersistentSession] . identifier[lastUsed] < identifier[tooOld] ). identifier[deleteFromStore] () identifier[self] . identifier[_lastClean] = identifier[self] . identifier[_clock] . identifier[seconds] ()
def _cleanSessions(self): """ Clean expired sesisons. """ tooOld = extime.Time() - timedelta(seconds=PERSISTENT_SESSION_LIFETIME) self.store.query(PersistentSession, PersistentSession.lastUsed < tooOld).deleteFromStore() self._lastClean = self._clock.seconds()
def update_data_disk(self, service_name, deployment_name, role_name, lun, host_caching=None, media_link=None, updated_lun=None, disk_label=None, disk_name=None, logical_disk_size_in_gb=None): ''' Updates the specified data disk attached to the specified virtual machine. service_name: The name of the service. deployment_name: The name of the deployment. role_name: The name of the role. lun: Specifies the Logical Unit Number (LUN) for the disk. The LUN specifies the slot in which the data drive appears when mounted for usage by the virtual machine. Valid LUN values are 0 through 15. host_caching: Specifies the platform caching behavior of data disk blob for read/write efficiency. The default vault is ReadOnly. Possible values are: None, ReadOnly, ReadWrite media_link: Specifies the location of the blob in Windows Azure blob store where the media for the disk is located. The blob location must belong to the storage account in the subscription specified by the <subscription-id> value in the operation call. Example: http://example.blob.core.windows.net/disks/mydisk.vhd updated_lun: Specifies the Logical Unit Number (LUN) for the disk. The LUN specifies the slot in which the data drive appears when mounted for usage by the virtual machine. Valid LUN values are 0 through 15. disk_label: Specifies the description of the data disk. When you attach a disk, either by directly referencing a media using the MediaLink element or specifying the target disk size, you can use the DiskLabel element to customize the name property of the target data disk. disk_name: Specifies the name of the disk. Windows Azure uses the specified disk to create the data disk for the machine and populates this field with the disk name. logical_disk_size_in_gb: Specifies the size, in GB, of an empty disk to be attached to the role. The disk can be created as part of disk attach or create VM role call by specifying the value for this property. Windows Azure creates the empty disk based on size preference and attaches the newly created disk to the Role. ''' _validate_not_none('service_name', service_name) _validate_not_none('deployment_name', deployment_name) _validate_not_none('role_name', role_name) _validate_not_none('lun', lun) return self._perform_put( self._get_data_disk_path( service_name, deployment_name, role_name, lun), _XmlSerializer.data_virtual_hard_disk_to_xml( host_caching, disk_label, disk_name, updated_lun, logical_disk_size_in_gb, media_link, None), as_async=True)
def function[update_data_disk, parameter[self, service_name, deployment_name, role_name, lun, host_caching, media_link, updated_lun, disk_label, disk_name, logical_disk_size_in_gb]]: constant[ Updates the specified data disk attached to the specified virtual machine. service_name: The name of the service. deployment_name: The name of the deployment. role_name: The name of the role. lun: Specifies the Logical Unit Number (LUN) for the disk. The LUN specifies the slot in which the data drive appears when mounted for usage by the virtual machine. Valid LUN values are 0 through 15. host_caching: Specifies the platform caching behavior of data disk blob for read/write efficiency. The default vault is ReadOnly. Possible values are: None, ReadOnly, ReadWrite media_link: Specifies the location of the blob in Windows Azure blob store where the media for the disk is located. The blob location must belong to the storage account in the subscription specified by the <subscription-id> value in the operation call. Example: http://example.blob.core.windows.net/disks/mydisk.vhd updated_lun: Specifies the Logical Unit Number (LUN) for the disk. The LUN specifies the slot in which the data drive appears when mounted for usage by the virtual machine. Valid LUN values are 0 through 15. disk_label: Specifies the description of the data disk. When you attach a disk, either by directly referencing a media using the MediaLink element or specifying the target disk size, you can use the DiskLabel element to customize the name property of the target data disk. disk_name: Specifies the name of the disk. Windows Azure uses the specified disk to create the data disk for the machine and populates this field with the disk name. logical_disk_size_in_gb: Specifies the size, in GB, of an empty disk to be attached to the role. The disk can be created as part of disk attach or create VM role call by specifying the value for this property. Windows Azure creates the empty disk based on size preference and attaches the newly created disk to the Role. ] call[name[_validate_not_none], parameter[constant[service_name], name[service_name]]] call[name[_validate_not_none], parameter[constant[deployment_name], name[deployment_name]]] call[name[_validate_not_none], parameter[constant[role_name], name[role_name]]] call[name[_validate_not_none], parameter[constant[lun], name[lun]]] return[call[name[self]._perform_put, parameter[call[name[self]._get_data_disk_path, parameter[name[service_name], name[deployment_name], name[role_name], name[lun]]], call[name[_XmlSerializer].data_virtual_hard_disk_to_xml, parameter[name[host_caching], name[disk_label], name[disk_name], name[updated_lun], name[logical_disk_size_in_gb], name[media_link], constant[None]]]]]]
keyword[def] identifier[update_data_disk] ( identifier[self] , identifier[service_name] , identifier[deployment_name] , identifier[role_name] , identifier[lun] , identifier[host_caching] = keyword[None] , identifier[media_link] = keyword[None] , identifier[updated_lun] = keyword[None] , identifier[disk_label] = keyword[None] , identifier[disk_name] = keyword[None] , identifier[logical_disk_size_in_gb] = keyword[None] ): literal[string] identifier[_validate_not_none] ( literal[string] , identifier[service_name] ) identifier[_validate_not_none] ( literal[string] , identifier[deployment_name] ) identifier[_validate_not_none] ( literal[string] , identifier[role_name] ) identifier[_validate_not_none] ( literal[string] , identifier[lun] ) keyword[return] identifier[self] . identifier[_perform_put] ( identifier[self] . identifier[_get_data_disk_path] ( identifier[service_name] , identifier[deployment_name] , identifier[role_name] , identifier[lun] ), identifier[_XmlSerializer] . identifier[data_virtual_hard_disk_to_xml] ( identifier[host_caching] , identifier[disk_label] , identifier[disk_name] , identifier[updated_lun] , identifier[logical_disk_size_in_gb] , identifier[media_link] , keyword[None] ), identifier[as_async] = keyword[True] )
def update_data_disk(self, service_name, deployment_name, role_name, lun, host_caching=None, media_link=None, updated_lun=None, disk_label=None, disk_name=None, logical_disk_size_in_gb=None): """ Updates the specified data disk attached to the specified virtual machine. service_name: The name of the service. deployment_name: The name of the deployment. role_name: The name of the role. lun: Specifies the Logical Unit Number (LUN) for the disk. The LUN specifies the slot in which the data drive appears when mounted for usage by the virtual machine. Valid LUN values are 0 through 15. host_caching: Specifies the platform caching behavior of data disk blob for read/write efficiency. The default vault is ReadOnly. Possible values are: None, ReadOnly, ReadWrite media_link: Specifies the location of the blob in Windows Azure blob store where the media for the disk is located. The blob location must belong to the storage account in the subscription specified by the <subscription-id> value in the operation call. Example: http://example.blob.core.windows.net/disks/mydisk.vhd updated_lun: Specifies the Logical Unit Number (LUN) for the disk. The LUN specifies the slot in which the data drive appears when mounted for usage by the virtual machine. Valid LUN values are 0 through 15. disk_label: Specifies the description of the data disk. When you attach a disk, either by directly referencing a media using the MediaLink element or specifying the target disk size, you can use the DiskLabel element to customize the name property of the target data disk. disk_name: Specifies the name of the disk. Windows Azure uses the specified disk to create the data disk for the machine and populates this field with the disk name. logical_disk_size_in_gb: Specifies the size, in GB, of an empty disk to be attached to the role. The disk can be created as part of disk attach or create VM role call by specifying the value for this property. Windows Azure creates the empty disk based on size preference and attaches the newly created disk to the Role. """ _validate_not_none('service_name', service_name) _validate_not_none('deployment_name', deployment_name) _validate_not_none('role_name', role_name) _validate_not_none('lun', lun) return self._perform_put(self._get_data_disk_path(service_name, deployment_name, role_name, lun), _XmlSerializer.data_virtual_hard_disk_to_xml(host_caching, disk_label, disk_name, updated_lun, logical_disk_size_in_gb, media_link, None), as_async=True)
def sign(self, data, entropy=None, hashfunc=None, sigencode=sigencode_string, k=None): """ hashfunc= should behave like hashlib.sha1 . The output length of the hash (in bytes) must not be longer than the length of the curve order (rounded up to the nearest byte), so using SHA256 with nist256p is ok, but SHA256 with nist192p is not. (In the 2**-96ish unlikely event of a hash output larger than the curve order, the hash will effectively be wrapped mod n). Use hashfunc=hashlib.sha1 to match openssl's -ecdsa-with-SHA1 mode, or hashfunc=hashlib.sha256 for openssl-1.0.0's -ecdsa-with-SHA256. """ hashfunc = hashfunc or self.default_hashfunc h = hashfunc(data).digest() return self.sign_digest(h, entropy, sigencode, k)
def function[sign, parameter[self, data, entropy, hashfunc, sigencode, k]]: constant[ hashfunc= should behave like hashlib.sha1 . The output length of the hash (in bytes) must not be longer than the length of the curve order (rounded up to the nearest byte), so using SHA256 with nist256p is ok, but SHA256 with nist192p is not. (In the 2**-96ish unlikely event of a hash output larger than the curve order, the hash will effectively be wrapped mod n). Use hashfunc=hashlib.sha1 to match openssl's -ecdsa-with-SHA1 mode, or hashfunc=hashlib.sha256 for openssl-1.0.0's -ecdsa-with-SHA256. ] variable[hashfunc] assign[=] <ast.BoolOp object at 0x7da1b2344850> variable[h] assign[=] call[call[name[hashfunc], parameter[name[data]]].digest, parameter[]] return[call[name[self].sign_digest, parameter[name[h], name[entropy], name[sigencode], name[k]]]]
keyword[def] identifier[sign] ( identifier[self] , identifier[data] , identifier[entropy] = keyword[None] , identifier[hashfunc] = keyword[None] , identifier[sigencode] = identifier[sigencode_string] , identifier[k] = keyword[None] ): literal[string] identifier[hashfunc] = identifier[hashfunc] keyword[or] identifier[self] . identifier[default_hashfunc] identifier[h] = identifier[hashfunc] ( identifier[data] ). identifier[digest] () keyword[return] identifier[self] . identifier[sign_digest] ( identifier[h] , identifier[entropy] , identifier[sigencode] , identifier[k] )
def sign(self, data, entropy=None, hashfunc=None, sigencode=sigencode_string, k=None): """ hashfunc= should behave like hashlib.sha1 . The output length of the hash (in bytes) must not be longer than the length of the curve order (rounded up to the nearest byte), so using SHA256 with nist256p is ok, but SHA256 with nist192p is not. (In the 2**-96ish unlikely event of a hash output larger than the curve order, the hash will effectively be wrapped mod n). Use hashfunc=hashlib.sha1 to match openssl's -ecdsa-with-SHA1 mode, or hashfunc=hashlib.sha256 for openssl-1.0.0's -ecdsa-with-SHA256. """ hashfunc = hashfunc or self.default_hashfunc h = hashfunc(data).digest() return self.sign_digest(h, entropy, sigencode, k)
def resample_dataset(dataset, destination_area, **kwargs): """Resample *dataset* and return the resampled version. Args: dataset (xarray.DataArray): Data to be resampled. destination_area: The destination onto which to project the data, either a full blown area definition or a string corresponding to the name of the area as defined in the area file. **kwargs: The extra parameters to pass to the resampler objects. Returns: A resampled DataArray with updated ``.attrs["area"]`` field. The dtype of the array is preserved. """ # call the projection stuff here try: source_area = dataset.attrs["area"] except KeyError: LOG.info("Cannot reproject dataset %s, missing area info", dataset.attrs['name']) return dataset fill_value = kwargs.pop('fill_value', get_fill_value(dataset)) new_data = resample(source_area, dataset, destination_area, fill_value=fill_value, **kwargs) new_attrs = new_data.attrs new_data.attrs = dataset.attrs.copy() new_data.attrs.update(new_attrs) new_data.attrs.update(area=destination_area) return new_data
def function[resample_dataset, parameter[dataset, destination_area]]: constant[Resample *dataset* and return the resampled version. Args: dataset (xarray.DataArray): Data to be resampled. destination_area: The destination onto which to project the data, either a full blown area definition or a string corresponding to the name of the area as defined in the area file. **kwargs: The extra parameters to pass to the resampler objects. Returns: A resampled DataArray with updated ``.attrs["area"]`` field. The dtype of the array is preserved. ] <ast.Try object at 0x7da1b22af280> variable[fill_value] assign[=] call[name[kwargs].pop, parameter[constant[fill_value], call[name[get_fill_value], parameter[name[dataset]]]]] variable[new_data] assign[=] call[name[resample], parameter[name[source_area], name[dataset], name[destination_area]]] variable[new_attrs] assign[=] name[new_data].attrs name[new_data].attrs assign[=] call[name[dataset].attrs.copy, parameter[]] call[name[new_data].attrs.update, parameter[name[new_attrs]]] call[name[new_data].attrs.update, parameter[]] return[name[new_data]]
keyword[def] identifier[resample_dataset] ( identifier[dataset] , identifier[destination_area] ,** identifier[kwargs] ): literal[string] keyword[try] : identifier[source_area] = identifier[dataset] . identifier[attrs] [ literal[string] ] keyword[except] identifier[KeyError] : identifier[LOG] . identifier[info] ( literal[string] , identifier[dataset] . identifier[attrs] [ literal[string] ]) keyword[return] identifier[dataset] identifier[fill_value] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[get_fill_value] ( identifier[dataset] )) identifier[new_data] = identifier[resample] ( identifier[source_area] , identifier[dataset] , identifier[destination_area] , identifier[fill_value] = identifier[fill_value] ,** identifier[kwargs] ) identifier[new_attrs] = identifier[new_data] . identifier[attrs] identifier[new_data] . identifier[attrs] = identifier[dataset] . identifier[attrs] . identifier[copy] () identifier[new_data] . identifier[attrs] . identifier[update] ( identifier[new_attrs] ) identifier[new_data] . identifier[attrs] . identifier[update] ( identifier[area] = identifier[destination_area] ) keyword[return] identifier[new_data]
def resample_dataset(dataset, destination_area, **kwargs): """Resample *dataset* and return the resampled version. Args: dataset (xarray.DataArray): Data to be resampled. destination_area: The destination onto which to project the data, either a full blown area definition or a string corresponding to the name of the area as defined in the area file. **kwargs: The extra parameters to pass to the resampler objects. Returns: A resampled DataArray with updated ``.attrs["area"]`` field. The dtype of the array is preserved. """ # call the projection stuff here try: source_area = dataset.attrs['area'] # depends on [control=['try'], data=[]] except KeyError: LOG.info('Cannot reproject dataset %s, missing area info', dataset.attrs['name']) return dataset # depends on [control=['except'], data=[]] fill_value = kwargs.pop('fill_value', get_fill_value(dataset)) new_data = resample(source_area, dataset, destination_area, fill_value=fill_value, **kwargs) new_attrs = new_data.attrs new_data.attrs = dataset.attrs.copy() new_data.attrs.update(new_attrs) new_data.attrs.update(area=destination_area) return new_data
def Y_dist(self, new_y_distance): """Use preset values for the distance between lines.""" self.parent.value('y_distance', new_y_distance) self.parent.traces.display()
def function[Y_dist, parameter[self, new_y_distance]]: constant[Use preset values for the distance between lines.] call[name[self].parent.value, parameter[constant[y_distance], name[new_y_distance]]] call[name[self].parent.traces.display, parameter[]]
keyword[def] identifier[Y_dist] ( identifier[self] , identifier[new_y_distance] ): literal[string] identifier[self] . identifier[parent] . identifier[value] ( literal[string] , identifier[new_y_distance] ) identifier[self] . identifier[parent] . identifier[traces] . identifier[display] ()
def Y_dist(self, new_y_distance): """Use preset values for the distance between lines.""" self.parent.value('y_distance', new_y_distance) self.parent.traces.display()
def get_stub(self, number, arch, abi_list=()): """ Pretty much the intersection of SimLibrary.get_stub() and SimSyscallLibrary.get(). :param number: The syscall number :param arch: The architecture being worked with, as either a string name or an archinfo.Arch :param abi_list: A list of ABI names that could be used :return: A SimProcedure representing a plausable stub that could model the syscall """ name, arch, abi = self._canonicalize(number, arch, abi_list) proc = super(SimSyscallLibrary, self).get_stub(name, arch) self._apply_numerical_metadata(proc, number, arch, abi) l.debug("unsupported syscall: %s", number) return proc
def function[get_stub, parameter[self, number, arch, abi_list]]: constant[ Pretty much the intersection of SimLibrary.get_stub() and SimSyscallLibrary.get(). :param number: The syscall number :param arch: The architecture being worked with, as either a string name or an archinfo.Arch :param abi_list: A list of ABI names that could be used :return: A SimProcedure representing a plausable stub that could model the syscall ] <ast.Tuple object at 0x7da18bc73430> assign[=] call[name[self]._canonicalize, parameter[name[number], name[arch], name[abi_list]]] variable[proc] assign[=] call[call[name[super], parameter[name[SimSyscallLibrary], name[self]]].get_stub, parameter[name[name], name[arch]]] call[name[self]._apply_numerical_metadata, parameter[name[proc], name[number], name[arch], name[abi]]] call[name[l].debug, parameter[constant[unsupported syscall: %s], name[number]]] return[name[proc]]
keyword[def] identifier[get_stub] ( identifier[self] , identifier[number] , identifier[arch] , identifier[abi_list] =()): literal[string] identifier[name] , identifier[arch] , identifier[abi] = identifier[self] . identifier[_canonicalize] ( identifier[number] , identifier[arch] , identifier[abi_list] ) identifier[proc] = identifier[super] ( identifier[SimSyscallLibrary] , identifier[self] ). identifier[get_stub] ( identifier[name] , identifier[arch] ) identifier[self] . identifier[_apply_numerical_metadata] ( identifier[proc] , identifier[number] , identifier[arch] , identifier[abi] ) identifier[l] . identifier[debug] ( literal[string] , identifier[number] ) keyword[return] identifier[proc]
def get_stub(self, number, arch, abi_list=()): """ Pretty much the intersection of SimLibrary.get_stub() and SimSyscallLibrary.get(). :param number: The syscall number :param arch: The architecture being worked with, as either a string name or an archinfo.Arch :param abi_list: A list of ABI names that could be used :return: A SimProcedure representing a plausable stub that could model the syscall """ (name, arch, abi) = self._canonicalize(number, arch, abi_list) proc = super(SimSyscallLibrary, self).get_stub(name, arch) self._apply_numerical_metadata(proc, number, arch, abi) l.debug('unsupported syscall: %s', number) return proc
def unregister_plotter(identifier, sorter=True, plot_func=True): """ Unregister a :class:`psyplot.plotter.Plotter` for the projects Parameters ---------- identifier: str Name of the attribute that is used to filter for the instances belonging to this plotter or to create plots with this plotter sorter: bool If True, the identifier will be unregistered from the :class:`Project` class plot_func: bool If True, the identifier will be unregistered from the :class:`ProjectPlotter` class """ d = registered_plotters.get(identifier, {}) if sorter and hasattr(Project, identifier): delattr(Project, identifier) d['sorter'] = False if plot_func and hasattr(ProjectPlotter, identifier): for cls in [ProjectPlotter, DatasetPlotter, DataArrayPlotter]: delattr(cls, identifier) try: delattr(plot, '_' + identifier) except AttributeError: pass d['plot_func'] = False if sorter and plot_func: registered_plotters.pop(identifier, None)
def function[unregister_plotter, parameter[identifier, sorter, plot_func]]: constant[ Unregister a :class:`psyplot.plotter.Plotter` for the projects Parameters ---------- identifier: str Name of the attribute that is used to filter for the instances belonging to this plotter or to create plots with this plotter sorter: bool If True, the identifier will be unregistered from the :class:`Project` class plot_func: bool If True, the identifier will be unregistered from the :class:`ProjectPlotter` class ] variable[d] assign[=] call[name[registered_plotters].get, parameter[name[identifier], dictionary[[], []]]] if <ast.BoolOp object at 0x7da18fe93430> begin[:] call[name[delattr], parameter[name[Project], name[identifier]]] call[name[d]][constant[sorter]] assign[=] constant[False] if <ast.BoolOp object at 0x7da18fe91690> begin[:] for taget[name[cls]] in starred[list[[<ast.Name object at 0x7da18fe90cd0>, <ast.Name object at 0x7da18fe91060>, <ast.Name object at 0x7da18fe910f0>]]] begin[:] call[name[delattr], parameter[name[cls], name[identifier]]] <ast.Try object at 0x7da18fe91780> call[name[d]][constant[plot_func]] assign[=] constant[False] if <ast.BoolOp object at 0x7da20e74bf40> begin[:] call[name[registered_plotters].pop, parameter[name[identifier], constant[None]]]
keyword[def] identifier[unregister_plotter] ( identifier[identifier] , identifier[sorter] = keyword[True] , identifier[plot_func] = keyword[True] ): literal[string] identifier[d] = identifier[registered_plotters] . identifier[get] ( identifier[identifier] ,{}) keyword[if] identifier[sorter] keyword[and] identifier[hasattr] ( identifier[Project] , identifier[identifier] ): identifier[delattr] ( identifier[Project] , identifier[identifier] ) identifier[d] [ literal[string] ]= keyword[False] keyword[if] identifier[plot_func] keyword[and] identifier[hasattr] ( identifier[ProjectPlotter] , identifier[identifier] ): keyword[for] identifier[cls] keyword[in] [ identifier[ProjectPlotter] , identifier[DatasetPlotter] , identifier[DataArrayPlotter] ]: identifier[delattr] ( identifier[cls] , identifier[identifier] ) keyword[try] : identifier[delattr] ( identifier[plot] , literal[string] + identifier[identifier] ) keyword[except] identifier[AttributeError] : keyword[pass] identifier[d] [ literal[string] ]= keyword[False] keyword[if] identifier[sorter] keyword[and] identifier[plot_func] : identifier[registered_plotters] . identifier[pop] ( identifier[identifier] , keyword[None] )
def unregister_plotter(identifier, sorter=True, plot_func=True): """ Unregister a :class:`psyplot.plotter.Plotter` for the projects Parameters ---------- identifier: str Name of the attribute that is used to filter for the instances belonging to this plotter or to create plots with this plotter sorter: bool If True, the identifier will be unregistered from the :class:`Project` class plot_func: bool If True, the identifier will be unregistered from the :class:`ProjectPlotter` class """ d = registered_plotters.get(identifier, {}) if sorter and hasattr(Project, identifier): delattr(Project, identifier) d['sorter'] = False # depends on [control=['if'], data=[]] if plot_func and hasattr(ProjectPlotter, identifier): for cls in [ProjectPlotter, DatasetPlotter, DataArrayPlotter]: delattr(cls, identifier) # depends on [control=['for'], data=['cls']] try: delattr(plot, '_' + identifier) # depends on [control=['try'], data=[]] except AttributeError: pass # depends on [control=['except'], data=[]] d['plot_func'] = False # depends on [control=['if'], data=[]] if sorter and plot_func: registered_plotters.pop(identifier, None) # depends on [control=['if'], data=[]]
def download(self, name, ids, datas=None, context=None): """Download a report from the server and return it as a remote file. For instance, to download the "Quotation / Order" report of sale orders identified by the IDs ``[2, 3]``: .. doctest:: :options: +SKIP >>> report = odoo.report.download('sale.report_saleorder', [2, 3]) .. doctest:: :hide: >>> report = odoo.report.download('sale.report_saleorder', [2]) Write it on the file system: .. doctest:: :options: +SKIP >>> with open('sale_orders.pdf', 'wb') as report_file: ... report_file.write(report.read()) ... .. doctest:: :hide: >>> with open('sale_orders.pdf', 'wb') as report_file: ... fileno = report_file.write(report.read()) # Python 3 ... *Python 2:* :return: `io.BytesIO` :raise: :class:`odoorpc.error.RPCError` (wrong parameters) :raise: `ValueError` (received invalid data) :raise: `urllib2.URLError` (connection error) *Python 3:* :return: `io.BytesIO` :raise: :class:`odoorpc.error.RPCError` (wrong parameters) :raise: `ValueError` (received invalid data) :raise: `urllib.error.URLError` (connection error) """ if context is None: context = self._odoo.env.context def check_report(name): report_model = 'ir.actions.report' if v(self._odoo.version)[0] < 11: report_model = 'ir.actions.report.xml' IrReport = self._odoo.env[report_model] report_ids = IrReport.search([('report_name', '=', name)]) report_id = report_ids and report_ids[0] or False if not report_id: raise ValueError("The report '%s' does not exist." % name) return report_id report_id = check_report(name) # Odoo >= 11.0 if v(self._odoo.version)[0] >= 11: IrReport = self._odoo.env['ir.actions.report'] report = IrReport.browse(report_id) response = report.with_context(context).render(ids, data=datas) content = response[0] # On the server the result is a bytes string, # but the RPC layer of Odoo returns it as a unicode string, # so we encode it again as bytes result = content.encode('latin1') return io.BytesIO(result) # Odoo < 11.0 else: args_to_send = [self._odoo.env.db, self._odoo.env.uid, self._odoo._password, name, ids, datas, context] data = self._odoo.json( '/jsonrpc', {'service': 'report', 'method': 'render_report', 'args': args_to_send}) if 'result' not in data and not data['result'].get('result'): raise ValueError("Received invalid data.") # Encode to bytes forced to be compatible with Python 3.2 # (its 'base64.standard_b64decode()' function only accepts bytes) result = encode2bytes(data['result']['result']) content = base64.standard_b64decode(result) return io.BytesIO(content)
def function[download, parameter[self, name, ids, datas, context]]: constant[Download a report from the server and return it as a remote file. For instance, to download the "Quotation / Order" report of sale orders identified by the IDs ``[2, 3]``: .. doctest:: :options: +SKIP >>> report = odoo.report.download('sale.report_saleorder', [2, 3]) .. doctest:: :hide: >>> report = odoo.report.download('sale.report_saleorder', [2]) Write it on the file system: .. doctest:: :options: +SKIP >>> with open('sale_orders.pdf', 'wb') as report_file: ... report_file.write(report.read()) ... .. doctest:: :hide: >>> with open('sale_orders.pdf', 'wb') as report_file: ... fileno = report_file.write(report.read()) # Python 3 ... *Python 2:* :return: `io.BytesIO` :raise: :class:`odoorpc.error.RPCError` (wrong parameters) :raise: `ValueError` (received invalid data) :raise: `urllib2.URLError` (connection error) *Python 3:* :return: `io.BytesIO` :raise: :class:`odoorpc.error.RPCError` (wrong parameters) :raise: `ValueError` (received invalid data) :raise: `urllib.error.URLError` (connection error) ] if compare[name[context] is constant[None]] begin[:] variable[context] assign[=] name[self]._odoo.env.context def function[check_report, parameter[name]]: variable[report_model] assign[=] constant[ir.actions.report] if compare[call[call[name[v], parameter[name[self]._odoo.version]]][constant[0]] less[<] constant[11]] begin[:] variable[report_model] assign[=] constant[ir.actions.report.xml] variable[IrReport] assign[=] call[name[self]._odoo.env][name[report_model]] variable[report_ids] assign[=] call[name[IrReport].search, parameter[list[[<ast.Tuple object at 0x7da18ede5060>]]]] variable[report_id] assign[=] <ast.BoolOp object at 0x7da18ede5450> if <ast.UnaryOp object at 0x7da18ede44c0> begin[:] <ast.Raise object at 0x7da18ede6b60> return[name[report_id]] variable[report_id] assign[=] call[name[check_report], parameter[name[name]]] if compare[call[call[name[v], parameter[name[self]._odoo.version]]][constant[0]] greater_or_equal[>=] constant[11]] begin[:] variable[IrReport] assign[=] call[name[self]._odoo.env][constant[ir.actions.report]] variable[report] assign[=] call[name[IrReport].browse, parameter[name[report_id]]] variable[response] assign[=] call[call[name[report].with_context, parameter[name[context]]].render, parameter[name[ids]]] variable[content] assign[=] call[name[response]][constant[0]] variable[result] assign[=] call[name[content].encode, parameter[constant[latin1]]] return[call[name[io].BytesIO, parameter[name[result]]]]
keyword[def] identifier[download] ( identifier[self] , identifier[name] , identifier[ids] , identifier[datas] = keyword[None] , identifier[context] = keyword[None] ): literal[string] keyword[if] identifier[context] keyword[is] keyword[None] : identifier[context] = identifier[self] . identifier[_odoo] . identifier[env] . identifier[context] keyword[def] identifier[check_report] ( identifier[name] ): identifier[report_model] = literal[string] keyword[if] identifier[v] ( identifier[self] . identifier[_odoo] . identifier[version] )[ literal[int] ]< literal[int] : identifier[report_model] = literal[string] identifier[IrReport] = identifier[self] . identifier[_odoo] . identifier[env] [ identifier[report_model] ] identifier[report_ids] = identifier[IrReport] . identifier[search] ([( literal[string] , literal[string] , identifier[name] )]) identifier[report_id] = identifier[report_ids] keyword[and] identifier[report_ids] [ literal[int] ] keyword[or] keyword[False] keyword[if] keyword[not] identifier[report_id] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[name] ) keyword[return] identifier[report_id] identifier[report_id] = identifier[check_report] ( identifier[name] ) keyword[if] identifier[v] ( identifier[self] . identifier[_odoo] . identifier[version] )[ literal[int] ]>= literal[int] : identifier[IrReport] = identifier[self] . identifier[_odoo] . identifier[env] [ literal[string] ] identifier[report] = identifier[IrReport] . identifier[browse] ( identifier[report_id] ) identifier[response] = identifier[report] . identifier[with_context] ( identifier[context] ). identifier[render] ( identifier[ids] , identifier[data] = identifier[datas] ) identifier[content] = identifier[response] [ literal[int] ] identifier[result] = identifier[content] . identifier[encode] ( literal[string] ) keyword[return] identifier[io] . identifier[BytesIO] ( identifier[result] ) keyword[else] : identifier[args_to_send] =[ identifier[self] . identifier[_odoo] . identifier[env] . identifier[db] , identifier[self] . identifier[_odoo] . identifier[env] . identifier[uid] , identifier[self] . identifier[_odoo] . identifier[_password] , identifier[name] , identifier[ids] , identifier[datas] , identifier[context] ] identifier[data] = identifier[self] . identifier[_odoo] . identifier[json] ( literal[string] , { literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : identifier[args_to_send] }) keyword[if] literal[string] keyword[not] keyword[in] identifier[data] keyword[and] keyword[not] identifier[data] [ literal[string] ]. identifier[get] ( literal[string] ): keyword[raise] identifier[ValueError] ( literal[string] ) identifier[result] = identifier[encode2bytes] ( identifier[data] [ literal[string] ][ literal[string] ]) identifier[content] = identifier[base64] . identifier[standard_b64decode] ( identifier[result] ) keyword[return] identifier[io] . identifier[BytesIO] ( identifier[content] )
def download(self, name, ids, datas=None, context=None): """Download a report from the server and return it as a remote file. For instance, to download the "Quotation / Order" report of sale orders identified by the IDs ``[2, 3]``: .. doctest:: :options: +SKIP >>> report = odoo.report.download('sale.report_saleorder', [2, 3]) .. doctest:: :hide: >>> report = odoo.report.download('sale.report_saleorder', [2]) Write it on the file system: .. doctest:: :options: +SKIP >>> with open('sale_orders.pdf', 'wb') as report_file: ... report_file.write(report.read()) ... .. doctest:: :hide: >>> with open('sale_orders.pdf', 'wb') as report_file: ... fileno = report_file.write(report.read()) # Python 3 ... *Python 2:* :return: `io.BytesIO` :raise: :class:`odoorpc.error.RPCError` (wrong parameters) :raise: `ValueError` (received invalid data) :raise: `urllib2.URLError` (connection error) *Python 3:* :return: `io.BytesIO` :raise: :class:`odoorpc.error.RPCError` (wrong parameters) :raise: `ValueError` (received invalid data) :raise: `urllib.error.URLError` (connection error) """ if context is None: context = self._odoo.env.context # depends on [control=['if'], data=['context']] def check_report(name): report_model = 'ir.actions.report' if v(self._odoo.version)[0] < 11: report_model = 'ir.actions.report.xml' # depends on [control=['if'], data=[]] IrReport = self._odoo.env[report_model] report_ids = IrReport.search([('report_name', '=', name)]) report_id = report_ids and report_ids[0] or False if not report_id: raise ValueError("The report '%s' does not exist." % name) # depends on [control=['if'], data=[]] return report_id report_id = check_report(name) # Odoo >= 11.0 if v(self._odoo.version)[0] >= 11: IrReport = self._odoo.env['ir.actions.report'] report = IrReport.browse(report_id) response = report.with_context(context).render(ids, data=datas) content = response[0] # On the server the result is a bytes string, # but the RPC layer of Odoo returns it as a unicode string, # so we encode it again as bytes result = content.encode('latin1') return io.BytesIO(result) # depends on [control=['if'], data=[]] else: # Odoo < 11.0 args_to_send = [self._odoo.env.db, self._odoo.env.uid, self._odoo._password, name, ids, datas, context] data = self._odoo.json('/jsonrpc', {'service': 'report', 'method': 'render_report', 'args': args_to_send}) if 'result' not in data and (not data['result'].get('result')): raise ValueError('Received invalid data.') # depends on [control=['if'], data=[]] # Encode to bytes forced to be compatible with Python 3.2 # (its 'base64.standard_b64decode()' function only accepts bytes) result = encode2bytes(data['result']['result']) content = base64.standard_b64decode(result) return io.BytesIO(content)
def reply_audio( self, audio: str, quote: bool = None, caption: str = "", parse_mode: str = "", duration: int = 0, performer: str = None, title: str = None, thumb: str = None, disable_notification: bool = None, reply_to_message_id: int = None, reply_markup: Union[ "pyrogram.InlineKeyboardMarkup", "pyrogram.ReplyKeyboardMarkup", "pyrogram.ReplyKeyboardRemove", "pyrogram.ForceReply" ] = None, progress: callable = None, progress_args: tuple = () ) -> "Message": """Bound method *reply_audio* of :obj:`Message <pyrogram.Message>`. Use as a shortcut for: .. code-block:: python client.send_audio( chat_id=message.chat.id, audio=audio ) Example: .. code-block:: python message.reply_audio(audio) Args: audio (``str``): Audio file to send. Pass a file_id as string to send an audio file that exists on the Telegram servers, pass an HTTP URL as a string for Telegram to get an audio file from the Internet, or pass a file path as string to upload a new audio file that exists on your local machine. quote (``bool``, *optional*): If ``True``, the message will be sent as a reply to this message. If *reply_to_message_id* is passed, this parameter will be ignored. Defaults to ``True`` in group chats and ``False`` in private chats. caption (``str``, *optional*): Audio caption, 0-1024 characters. parse_mode (``str``, *optional*): Use :obj:`MARKDOWN <pyrogram.ParseMode.MARKDOWN>` or :obj:`HTML <pyrogram.ParseMode.HTML>` if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your caption. Defaults to Markdown. duration (``int``, *optional*): Duration of the audio in seconds. performer (``str``, *optional*): Performer. title (``str``, *optional*): Track name. thumb (``str``, *optional*): Thumbnail of the music file album cover. The thumbnail should be in JPEG format and less than 200 KB in size. A thumbnail's width and height should not exceed 90 pixels. Thumbnails can't be reused and can be only uploaded as a new file. disable_notification (``bool``, *optional*): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (``int``, *optional*): If the message is a reply, ID of the original message. reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*): Additional interface options. An object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. progress (``callable``, *optional*): Pass a callback function to view the upload progress. The function must take *(client, current, total, \*args)* as positional arguments (look at the section below for a detailed description). progress_args (``tuple``, *optional*): Extra custom arguments for the progress callback function. Useful, for example, if you want to pass a chat_id and a message_id in order to edit a message with the updated progress. Other Parameters: client (:obj:`Client <pyrogram.Client>`): The Client itself, useful when you want to call other API methods inside the callback function. current (``int``): The amount of bytes uploaded so far. total (``int``): The size of the file. *args (``tuple``, *optional*): Extra custom arguments as defined in the *progress_args* parameter. You can either keep *\*args* or add every single extra argument in your function signature. Returns: On success, the sent :obj:`Message <pyrogram.Message>` is returned. In case the upload is deliberately stopped with :meth:`stop_transmission`, None is returned instead. Raises: :class:`RPCError <pyrogram.RPCError>` """ if quote is None: quote = self.chat.type != "private" if reply_to_message_id is None and quote: reply_to_message_id = self.message_id return self._client.send_audio( chat_id=self.chat.id, audio=audio, caption=caption, parse_mode=parse_mode, duration=duration, performer=performer, title=title, thumb=thumb, disable_notification=disable_notification, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup, progress=progress, progress_args=progress_args )
def function[reply_audio, parameter[self, audio, quote, caption, parse_mode, duration, performer, title, thumb, disable_notification, reply_to_message_id, reply_markup, progress, progress_args]]: constant[Bound method *reply_audio* of :obj:`Message <pyrogram.Message>`. Use as a shortcut for: .. code-block:: python client.send_audio( chat_id=message.chat.id, audio=audio ) Example: .. code-block:: python message.reply_audio(audio) Args: audio (``str``): Audio file to send. Pass a file_id as string to send an audio file that exists on the Telegram servers, pass an HTTP URL as a string for Telegram to get an audio file from the Internet, or pass a file path as string to upload a new audio file that exists on your local machine. quote (``bool``, *optional*): If ``True``, the message will be sent as a reply to this message. If *reply_to_message_id* is passed, this parameter will be ignored. Defaults to ``True`` in group chats and ``False`` in private chats. caption (``str``, *optional*): Audio caption, 0-1024 characters. parse_mode (``str``, *optional*): Use :obj:`MARKDOWN <pyrogram.ParseMode.MARKDOWN>` or :obj:`HTML <pyrogram.ParseMode.HTML>` if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your caption. Defaults to Markdown. duration (``int``, *optional*): Duration of the audio in seconds. performer (``str``, *optional*): Performer. title (``str``, *optional*): Track name. thumb (``str``, *optional*): Thumbnail of the music file album cover. The thumbnail should be in JPEG format and less than 200 KB in size. A thumbnail's width and height should not exceed 90 pixels. Thumbnails can't be reused and can be only uploaded as a new file. disable_notification (``bool``, *optional*): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (``int``, *optional*): If the message is a reply, ID of the original message. reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*): Additional interface options. An object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. progress (``callable``, *optional*): Pass a callback function to view the upload progress. The function must take *(client, current, total, \*args)* as positional arguments (look at the section below for a detailed description). progress_args (``tuple``, *optional*): Extra custom arguments for the progress callback function. Useful, for example, if you want to pass a chat_id and a message_id in order to edit a message with the updated progress. Other Parameters: client (:obj:`Client <pyrogram.Client>`): The Client itself, useful when you want to call other API methods inside the callback function. current (``int``): The amount of bytes uploaded so far. total (``int``): The size of the file. *args (``tuple``, *optional*): Extra custom arguments as defined in the *progress_args* parameter. You can either keep *\*args* or add every single extra argument in your function signature. Returns: On success, the sent :obj:`Message <pyrogram.Message>` is returned. In case the upload is deliberately stopped with :meth:`stop_transmission`, None is returned instead. Raises: :class:`RPCError <pyrogram.RPCError>` ] if compare[name[quote] is constant[None]] begin[:] variable[quote] assign[=] compare[name[self].chat.type not_equal[!=] constant[private]] if <ast.BoolOp object at 0x7da1b1c6e500> begin[:] variable[reply_to_message_id] assign[=] name[self].message_id return[call[name[self]._client.send_audio, parameter[]]]
keyword[def] identifier[reply_audio] ( identifier[self] , identifier[audio] : identifier[str] , identifier[quote] : identifier[bool] = keyword[None] , identifier[caption] : identifier[str] = literal[string] , identifier[parse_mode] : identifier[str] = literal[string] , identifier[duration] : identifier[int] = literal[int] , identifier[performer] : identifier[str] = keyword[None] , identifier[title] : identifier[str] = keyword[None] , identifier[thumb] : identifier[str] = keyword[None] , identifier[disable_notification] : identifier[bool] = keyword[None] , identifier[reply_to_message_id] : identifier[int] = keyword[None] , identifier[reply_markup] : identifier[Union] [ literal[string] , literal[string] , literal[string] , literal[string] ]= keyword[None] , identifier[progress] : identifier[callable] = keyword[None] , identifier[progress_args] : identifier[tuple] =() )-> literal[string] : literal[string] keyword[if] identifier[quote] keyword[is] keyword[None] : identifier[quote] = identifier[self] . identifier[chat] . identifier[type] != literal[string] keyword[if] identifier[reply_to_message_id] keyword[is] keyword[None] keyword[and] identifier[quote] : identifier[reply_to_message_id] = identifier[self] . identifier[message_id] keyword[return] identifier[self] . identifier[_client] . identifier[send_audio] ( identifier[chat_id] = identifier[self] . identifier[chat] . identifier[id] , identifier[audio] = identifier[audio] , identifier[caption] = identifier[caption] , identifier[parse_mode] = identifier[parse_mode] , identifier[duration] = identifier[duration] , identifier[performer] = identifier[performer] , identifier[title] = identifier[title] , identifier[thumb] = identifier[thumb] , identifier[disable_notification] = identifier[disable_notification] , identifier[reply_to_message_id] = identifier[reply_to_message_id] , identifier[reply_markup] = identifier[reply_markup] , identifier[progress] = identifier[progress] , identifier[progress_args] = identifier[progress_args] )
def reply_audio(self, audio: str, quote: bool=None, caption: str='', parse_mode: str='', duration: int=0, performer: str=None, title: str=None, thumb: str=None, disable_notification: bool=None, reply_to_message_id: int=None, reply_markup: Union['pyrogram.InlineKeyboardMarkup', 'pyrogram.ReplyKeyboardMarkup', 'pyrogram.ReplyKeyboardRemove', 'pyrogram.ForceReply']=None, progress: callable=None, progress_args: tuple=()) -> 'Message': """Bound method *reply_audio* of :obj:`Message <pyrogram.Message>`. Use as a shortcut for: .. code-block:: python client.send_audio( chat_id=message.chat.id, audio=audio ) Example: .. code-block:: python message.reply_audio(audio) Args: audio (``str``): Audio file to send. Pass a file_id as string to send an audio file that exists on the Telegram servers, pass an HTTP URL as a string for Telegram to get an audio file from the Internet, or pass a file path as string to upload a new audio file that exists on your local machine. quote (``bool``, *optional*): If ``True``, the message will be sent as a reply to this message. If *reply_to_message_id* is passed, this parameter will be ignored. Defaults to ``True`` in group chats and ``False`` in private chats. caption (``str``, *optional*): Audio caption, 0-1024 characters. parse_mode (``str``, *optional*): Use :obj:`MARKDOWN <pyrogram.ParseMode.MARKDOWN>` or :obj:`HTML <pyrogram.ParseMode.HTML>` if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your caption. Defaults to Markdown. duration (``int``, *optional*): Duration of the audio in seconds. performer (``str``, *optional*): Performer. title (``str``, *optional*): Track name. thumb (``str``, *optional*): Thumbnail of the music file album cover. The thumbnail should be in JPEG format and less than 200 KB in size. A thumbnail's width and height should not exceed 90 pixels. Thumbnails can't be reused and can be only uploaded as a new file. disable_notification (``bool``, *optional*): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (``int``, *optional*): If the message is a reply, ID of the original message. reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*): Additional interface options. An object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. progress (``callable``, *optional*): Pass a callback function to view the upload progress. The function must take *(client, current, total, \\*args)* as positional arguments (look at the section below for a detailed description). progress_args (``tuple``, *optional*): Extra custom arguments for the progress callback function. Useful, for example, if you want to pass a chat_id and a message_id in order to edit a message with the updated progress. Other Parameters: client (:obj:`Client <pyrogram.Client>`): The Client itself, useful when you want to call other API methods inside the callback function. current (``int``): The amount of bytes uploaded so far. total (``int``): The size of the file. *args (``tuple``, *optional*): Extra custom arguments as defined in the *progress_args* parameter. You can either keep *\\*args* or add every single extra argument in your function signature. Returns: On success, the sent :obj:`Message <pyrogram.Message>` is returned. In case the upload is deliberately stopped with :meth:`stop_transmission`, None is returned instead. Raises: :class:`RPCError <pyrogram.RPCError>` """ if quote is None: quote = self.chat.type != 'private' # depends on [control=['if'], data=['quote']] if reply_to_message_id is None and quote: reply_to_message_id = self.message_id # depends on [control=['if'], data=[]] return self._client.send_audio(chat_id=self.chat.id, audio=audio, caption=caption, parse_mode=parse_mode, duration=duration, performer=performer, title=title, thumb=thumb, disable_notification=disable_notification, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup, progress=progress, progress_args=progress_args)
def json_unicode_to_utf8(data): """Change all strings in a JSON structure to UTF-8.""" if isinstance(data, unicode): return data.encode('utf-8') elif isinstance(data, dict): newdict = {} for key in data: newdict[json_unicode_to_utf8( key)] = json_unicode_to_utf8(data[key]) return newdict elif isinstance(data, list): return [json_unicode_to_utf8(elem) for elem in data] else: return data
def function[json_unicode_to_utf8, parameter[data]]: constant[Change all strings in a JSON structure to UTF-8.] if call[name[isinstance], parameter[name[data], name[unicode]]] begin[:] return[call[name[data].encode, parameter[constant[utf-8]]]]
keyword[def] identifier[json_unicode_to_utf8] ( identifier[data] ): literal[string] keyword[if] identifier[isinstance] ( identifier[data] , identifier[unicode] ): keyword[return] identifier[data] . identifier[encode] ( literal[string] ) keyword[elif] identifier[isinstance] ( identifier[data] , identifier[dict] ): identifier[newdict] ={} keyword[for] identifier[key] keyword[in] identifier[data] : identifier[newdict] [ identifier[json_unicode_to_utf8] ( identifier[key] )]= identifier[json_unicode_to_utf8] ( identifier[data] [ identifier[key] ]) keyword[return] identifier[newdict] keyword[elif] identifier[isinstance] ( identifier[data] , identifier[list] ): keyword[return] [ identifier[json_unicode_to_utf8] ( identifier[elem] ) keyword[for] identifier[elem] keyword[in] identifier[data] ] keyword[else] : keyword[return] identifier[data]
def json_unicode_to_utf8(data): """Change all strings in a JSON structure to UTF-8.""" if isinstance(data, unicode): return data.encode('utf-8') # depends on [control=['if'], data=[]] elif isinstance(data, dict): newdict = {} for key in data: newdict[json_unicode_to_utf8(key)] = json_unicode_to_utf8(data[key]) # depends on [control=['for'], data=['key']] return newdict # depends on [control=['if'], data=[]] elif isinstance(data, list): return [json_unicode_to_utf8(elem) for elem in data] # depends on [control=['if'], data=[]] else: return data
def kline_echarts(self, code=None): def kline_formater(param): return param.name + ':' + vars(param) """plot the market_data""" if code is None: path_name = '.' + os.sep + 'QA_' + self.type + \ '_codepackage_' + self.if_fq + '.html' kline = Kline( 'CodePackage_' + self.if_fq + '_' + self.type, width=1360, height=700, page_title='QUANTAXIS' ) bar = Bar() data_splits = self.splits() for ds in data_splits: data = [] axis = [] if ds.type[-3:] == 'day': datetime = np.array(ds.date.map(str)) else: datetime = np.array(ds.datetime.map(str)) ohlc = np.array( ds.data.loc[:, ['open', 'close', 'low', 'high']] ) kline.add( ds.code[0], datetime, ohlc, mark_point=["max", "min"], is_datazoom_show=True, datazoom_orient='horizontal' ) return kline else: data = [] axis = [] ds = self.select_code(code) data = [] #axis = [] if self.type[-3:] == 'day': datetime = np.array(ds.date.map(str)) else: datetime = np.array(ds.datetime.map(str)) ohlc = np.array(ds.data.loc[:, ['open', 'close', 'low', 'high']]) vol = np.array(ds.volume) kline = Kline( '{}__{}__{}'.format(code, self.if_fq, self.type), width=1360, height=700, page_title='QUANTAXIS' ) bar = Bar() kline.add(self.code, datetime, ohlc, mark_point=["max", "min"], # is_label_show=True, is_datazoom_show=True, is_xaxis_show=False, # is_toolbox_show=True, tooltip_formatter='{b}:{c}', # kline_formater, # is_more_utils=True, datazoom_orient='horizontal') bar.add( self.code, datetime, vol, is_datazoom_show=True, datazoom_xaxis_index=[0, 1] ) grid = Grid(width=1360, height=700, page_title='QUANTAXIS') grid.add(bar, grid_top="80%") grid.add(kline, grid_bottom="30%") return grid
def function[kline_echarts, parameter[self, code]]: def function[kline_formater, parameter[param]]: return[binary_operation[binary_operation[name[param].name + constant[:]] + call[name[vars], parameter[name[param]]]]] constant[plot the market_data] if compare[name[code] is constant[None]] begin[:] variable[path_name] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[.] + name[os].sep] + constant[QA_]] + name[self].type] + constant[_codepackage_]] + name[self].if_fq] + constant[.html]] variable[kline] assign[=] call[name[Kline], parameter[binary_operation[binary_operation[binary_operation[constant[CodePackage_] + name[self].if_fq] + constant[_]] + name[self].type]]] variable[bar] assign[=] call[name[Bar], parameter[]] variable[data_splits] assign[=] call[name[self].splits, parameter[]] for taget[name[ds]] in starred[name[data_splits]] begin[:] variable[data] assign[=] list[[]] variable[axis] assign[=] list[[]] if compare[call[name[ds].type][<ast.Slice object at 0x7da1b1f74c70>] equal[==] constant[day]] begin[:] variable[datetime] assign[=] call[name[np].array, parameter[call[name[ds].date.map, parameter[name[str]]]]] variable[ohlc] assign[=] call[name[np].array, parameter[call[name[ds].data.loc][tuple[[<ast.Slice object at 0x7da1b1f76b00>, <ast.List object at 0x7da1b1f76770>]]]]] call[name[kline].add, parameter[call[name[ds].code][constant[0]], name[datetime], name[ohlc]]] return[name[kline]]
keyword[def] identifier[kline_echarts] ( identifier[self] , identifier[code] = keyword[None] ): keyword[def] identifier[kline_formater] ( identifier[param] ): keyword[return] identifier[param] . identifier[name] + literal[string] + identifier[vars] ( identifier[param] ) literal[string] keyword[if] identifier[code] keyword[is] keyword[None] : identifier[path_name] = literal[string] + identifier[os] . identifier[sep] + literal[string] + identifier[self] . identifier[type] + literal[string] + identifier[self] . identifier[if_fq] + literal[string] identifier[kline] = identifier[Kline] ( literal[string] + identifier[self] . identifier[if_fq] + literal[string] + identifier[self] . identifier[type] , identifier[width] = literal[int] , identifier[height] = literal[int] , identifier[page_title] = literal[string] ) identifier[bar] = identifier[Bar] () identifier[data_splits] = identifier[self] . identifier[splits] () keyword[for] identifier[ds] keyword[in] identifier[data_splits] : identifier[data] =[] identifier[axis] =[] keyword[if] identifier[ds] . identifier[type] [- literal[int] :]== literal[string] : identifier[datetime] = identifier[np] . identifier[array] ( identifier[ds] . identifier[date] . identifier[map] ( identifier[str] )) keyword[else] : identifier[datetime] = identifier[np] . identifier[array] ( identifier[ds] . identifier[datetime] . identifier[map] ( identifier[str] )) identifier[ohlc] = identifier[np] . identifier[array] ( identifier[ds] . identifier[data] . identifier[loc] [:, [ literal[string] , literal[string] , literal[string] , literal[string] ]] ) identifier[kline] . identifier[add] ( identifier[ds] . identifier[code] [ literal[int] ], identifier[datetime] , identifier[ohlc] , identifier[mark_point] =[ literal[string] , literal[string] ], identifier[is_datazoom_show] = keyword[True] , identifier[datazoom_orient] = literal[string] ) keyword[return] identifier[kline] keyword[else] : identifier[data] =[] identifier[axis] =[] identifier[ds] = identifier[self] . identifier[select_code] ( identifier[code] ) identifier[data] =[] keyword[if] identifier[self] . identifier[type] [- literal[int] :]== literal[string] : identifier[datetime] = identifier[np] . identifier[array] ( identifier[ds] . identifier[date] . identifier[map] ( identifier[str] )) keyword[else] : identifier[datetime] = identifier[np] . identifier[array] ( identifier[ds] . identifier[datetime] . identifier[map] ( identifier[str] )) identifier[ohlc] = identifier[np] . identifier[array] ( identifier[ds] . identifier[data] . identifier[loc] [:,[ literal[string] , literal[string] , literal[string] , literal[string] ]]) identifier[vol] = identifier[np] . identifier[array] ( identifier[ds] . identifier[volume] ) identifier[kline] = identifier[Kline] ( literal[string] . identifier[format] ( identifier[code] , identifier[self] . identifier[if_fq] , identifier[self] . identifier[type] ), identifier[width] = literal[int] , identifier[height] = literal[int] , identifier[page_title] = literal[string] ) identifier[bar] = identifier[Bar] () identifier[kline] . identifier[add] ( identifier[self] . identifier[code] , identifier[datetime] , identifier[ohlc] , identifier[mark_point] =[ literal[string] , literal[string] ], identifier[is_datazoom_show] = keyword[True] , identifier[is_xaxis_show] = keyword[False] , identifier[tooltip_formatter] = literal[string] , identifier[datazoom_orient] = literal[string] ) identifier[bar] . identifier[add] ( identifier[self] . identifier[code] , identifier[datetime] , identifier[vol] , identifier[is_datazoom_show] = keyword[True] , identifier[datazoom_xaxis_index] =[ literal[int] , literal[int] ] ) identifier[grid] = identifier[Grid] ( identifier[width] = literal[int] , identifier[height] = literal[int] , identifier[page_title] = literal[string] ) identifier[grid] . identifier[add] ( identifier[bar] , identifier[grid_top] = literal[string] ) identifier[grid] . identifier[add] ( identifier[kline] , identifier[grid_bottom] = literal[string] ) keyword[return] identifier[grid]
def kline_echarts(self, code=None): def kline_formater(param): return param.name + ':' + vars(param) 'plot the market_data' if code is None: path_name = '.' + os.sep + 'QA_' + self.type + '_codepackage_' + self.if_fq + '.html' kline = Kline('CodePackage_' + self.if_fq + '_' + self.type, width=1360, height=700, page_title='QUANTAXIS') bar = Bar() data_splits = self.splits() for ds in data_splits: data = [] axis = [] if ds.type[-3:] == 'day': datetime = np.array(ds.date.map(str)) # depends on [control=['if'], data=[]] else: datetime = np.array(ds.datetime.map(str)) ohlc = np.array(ds.data.loc[:, ['open', 'close', 'low', 'high']]) kline.add(ds.code[0], datetime, ohlc, mark_point=['max', 'min'], is_datazoom_show=True, datazoom_orient='horizontal') # depends on [control=['for'], data=['ds']] return kline # depends on [control=['if'], data=[]] else: data = [] axis = [] ds = self.select_code(code) data = [] #axis = [] if self.type[-3:] == 'day': datetime = np.array(ds.date.map(str)) # depends on [control=['if'], data=[]] else: datetime = np.array(ds.datetime.map(str)) ohlc = np.array(ds.data.loc[:, ['open', 'close', 'low', 'high']]) vol = np.array(ds.volume) kline = Kline('{}__{}__{}'.format(code, self.if_fq, self.type), width=1360, height=700, page_title='QUANTAXIS') bar = Bar() # is_label_show=True, # is_toolbox_show=True, # kline_formater, # is_more_utils=True, kline.add(self.code, datetime, ohlc, mark_point=['max', 'min'], is_datazoom_show=True, is_xaxis_show=False, tooltip_formatter='{b}:{c}', datazoom_orient='horizontal') bar.add(self.code, datetime, vol, is_datazoom_show=True, datazoom_xaxis_index=[0, 1]) grid = Grid(width=1360, height=700, page_title='QUANTAXIS') grid.add(bar, grid_top='80%') grid.add(kline, grid_bottom='30%') return grid
def _histogram(self, which, mu, sigma, data): """plot a histogram. For internal use only""" weights = np.ones_like(data)/len(data) # make bar heights sum to 100% n, bins, patches = plt.hist(data, bins=25, weights=weights, facecolor='blue', alpha=0.5) plt.title(r'%s %s: $\mu=%.2f$, $\sigma=%.2f$' % (self.name, which.capitalize(), mu, sigma)) plt.xlabel('Items' if which == 'count' else 'Seconds') plt.ylabel('Frequency') plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda y, position: "{:.1f}%".format(y*100)))
def function[_histogram, parameter[self, which, mu, sigma, data]]: constant[plot a histogram. For internal use only] variable[weights] assign[=] binary_operation[call[name[np].ones_like, parameter[name[data]]] / call[name[len], parameter[name[data]]]] <ast.Tuple object at 0x7da20c990070> assign[=] call[name[plt].hist, parameter[name[data]]] call[name[plt].title, parameter[binary_operation[constant[%s %s: $\mu=%.2f$, $\sigma=%.2f$] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20c993b20>, <ast.Call object at 0x7da20c990430>, <ast.Name object at 0x7da20c992650>, <ast.Name object at 0x7da20c993eb0>]]]]] call[name[plt].xlabel, parameter[<ast.IfExp object at 0x7da20c993070>]] call[name[plt].ylabel, parameter[constant[Frequency]]] call[call[name[plt].gca, parameter[]].yaxis.set_major_formatter, parameter[call[name[FuncFormatter], parameter[<ast.Lambda object at 0x7da20c993fd0>]]]]
keyword[def] identifier[_histogram] ( identifier[self] , identifier[which] , identifier[mu] , identifier[sigma] , identifier[data] ): literal[string] identifier[weights] = identifier[np] . identifier[ones_like] ( identifier[data] )/ identifier[len] ( identifier[data] ) identifier[n] , identifier[bins] , identifier[patches] = identifier[plt] . identifier[hist] ( identifier[data] , identifier[bins] = literal[int] , identifier[weights] = identifier[weights] , identifier[facecolor] = literal[string] , identifier[alpha] = literal[int] ) identifier[plt] . identifier[title] ( literal[string] %( identifier[self] . identifier[name] , identifier[which] . identifier[capitalize] (), identifier[mu] , identifier[sigma] )) identifier[plt] . identifier[xlabel] ( literal[string] keyword[if] identifier[which] == literal[string] keyword[else] literal[string] ) identifier[plt] . identifier[ylabel] ( literal[string] ) identifier[plt] . identifier[gca] (). identifier[yaxis] . identifier[set_major_formatter] ( identifier[FuncFormatter] ( keyword[lambda] identifier[y] , identifier[position] : literal[string] . identifier[format] ( identifier[y] * literal[int] )))
def _histogram(self, which, mu, sigma, data): """plot a histogram. For internal use only""" weights = np.ones_like(data) / len(data) # make bar heights sum to 100% (n, bins, patches) = plt.hist(data, bins=25, weights=weights, facecolor='blue', alpha=0.5) plt.title('%s %s: $\\mu=%.2f$, $\\sigma=%.2f$' % (self.name, which.capitalize(), mu, sigma)) plt.xlabel('Items' if which == 'count' else 'Seconds') plt.ylabel('Frequency') plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda y, position: '{:.1f}%'.format(y * 100)))
def build_on_condition(self, runnable, regime, on_condition): """ Build OnCondition event handler code. @param on_condition: OnCondition event handler object @type on_condition: lems.model.dynamics.OnCondition @return: Generated OnCondition code @rtype: list(string) """ on_condition_code = [] on_condition_code += ['if {0}:'.format(\ self.build_expression_from_tree(runnable, regime, on_condition.expression_tree))] for action in on_condition.actions: code = self.build_action(runnable, regime, action) for line in code: on_condition_code += [' ' + line] return on_condition_code
def function[build_on_condition, parameter[self, runnable, regime, on_condition]]: constant[ Build OnCondition event handler code. @param on_condition: OnCondition event handler object @type on_condition: lems.model.dynamics.OnCondition @return: Generated OnCondition code @rtype: list(string) ] variable[on_condition_code] assign[=] list[[]] <ast.AugAssign object at 0x7da1b24af400> for taget[name[action]] in starred[name[on_condition].actions] begin[:] variable[code] assign[=] call[name[self].build_action, parameter[name[runnable], name[regime], name[action]]] for taget[name[line]] in starred[name[code]] begin[:] <ast.AugAssign object at 0x7da1b24ae110> return[name[on_condition_code]]
keyword[def] identifier[build_on_condition] ( identifier[self] , identifier[runnable] , identifier[regime] , identifier[on_condition] ): literal[string] identifier[on_condition_code] =[] identifier[on_condition_code] +=[ literal[string] . identifier[format] ( identifier[self] . identifier[build_expression_from_tree] ( identifier[runnable] , identifier[regime] , identifier[on_condition] . identifier[expression_tree] ))] keyword[for] identifier[action] keyword[in] identifier[on_condition] . identifier[actions] : identifier[code] = identifier[self] . identifier[build_action] ( identifier[runnable] , identifier[regime] , identifier[action] ) keyword[for] identifier[line] keyword[in] identifier[code] : identifier[on_condition_code] +=[ literal[string] + identifier[line] ] keyword[return] identifier[on_condition_code]
def build_on_condition(self, runnable, regime, on_condition): """ Build OnCondition event handler code. @param on_condition: OnCondition event handler object @type on_condition: lems.model.dynamics.OnCondition @return: Generated OnCondition code @rtype: list(string) """ on_condition_code = [] on_condition_code += ['if {0}:'.format(self.build_expression_from_tree(runnable, regime, on_condition.expression_tree))] for action in on_condition.actions: code = self.build_action(runnable, regime, action) for line in code: on_condition_code += [' ' + line] # depends on [control=['for'], data=['line']] # depends on [control=['for'], data=['action']] return on_condition_code
def match(self, **kwargs): """ Traverse relationships with properties matching the given parameters. e.g: `.match(price__lt=10)` :param kwargs: see `NodeSet.filter()` for syntax :return: self """ if kwargs: if self.definition.get('model') is None: raise ValueError("match() with filter only available on relationships with a model") output = process_filter_args(self.definition['model'], kwargs) if output: self.filters.append(output) return self
def function[match, parameter[self]]: constant[ Traverse relationships with properties matching the given parameters. e.g: `.match(price__lt=10)` :param kwargs: see `NodeSet.filter()` for syntax :return: self ] if name[kwargs] begin[:] if compare[call[name[self].definition.get, parameter[constant[model]]] is constant[None]] begin[:] <ast.Raise object at 0x7da18f720520> variable[output] assign[=] call[name[process_filter_args], parameter[call[name[self].definition][constant[model]], name[kwargs]]] if name[output] begin[:] call[name[self].filters.append, parameter[name[output]]] return[name[self]]
keyword[def] identifier[match] ( identifier[self] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[kwargs] : keyword[if] identifier[self] . identifier[definition] . identifier[get] ( literal[string] ) keyword[is] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[output] = identifier[process_filter_args] ( identifier[self] . identifier[definition] [ literal[string] ], identifier[kwargs] ) keyword[if] identifier[output] : identifier[self] . identifier[filters] . identifier[append] ( identifier[output] ) keyword[return] identifier[self]
def match(self, **kwargs): """ Traverse relationships with properties matching the given parameters. e.g: `.match(price__lt=10)` :param kwargs: see `NodeSet.filter()` for syntax :return: self """ if kwargs: if self.definition.get('model') is None: raise ValueError('match() with filter only available on relationships with a model') # depends on [control=['if'], data=[]] output = process_filter_args(self.definition['model'], kwargs) if output: self.filters.append(output) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return self
def PCO_protocol_dispatcher(s): """Choose the correct PCO element.""" proto_num = orb(s[0]) * 256 + orb(s[1]) cls = PCO_PROTOCOL_CLASSES.get(proto_num, Raw) return cls(s)
def function[PCO_protocol_dispatcher, parameter[s]]: constant[Choose the correct PCO element.] variable[proto_num] assign[=] binary_operation[binary_operation[call[name[orb], parameter[call[name[s]][constant[0]]]] * constant[256]] + call[name[orb], parameter[call[name[s]][constant[1]]]]] variable[cls] assign[=] call[name[PCO_PROTOCOL_CLASSES].get, parameter[name[proto_num], name[Raw]]] return[call[name[cls], parameter[name[s]]]]
keyword[def] identifier[PCO_protocol_dispatcher] ( identifier[s] ): literal[string] identifier[proto_num] = identifier[orb] ( identifier[s] [ literal[int] ])* literal[int] + identifier[orb] ( identifier[s] [ literal[int] ]) identifier[cls] = identifier[PCO_PROTOCOL_CLASSES] . identifier[get] ( identifier[proto_num] , identifier[Raw] ) keyword[return] identifier[cls] ( identifier[s] )
def PCO_protocol_dispatcher(s): """Choose the correct PCO element.""" proto_num = orb(s[0]) * 256 + orb(s[1]) cls = PCO_PROTOCOL_CLASSES.get(proto_num, Raw) return cls(s)
def get_bcbio_timings(path): """Fetch timing information from a bcbio log file.""" with open(path, 'r') as file_handle: steps = {} for line in file_handle: matches = re.search(r'^\[([^\]]+)\] ([^:]+: .*)', line) if not matches: continue tstamp = matches.group(1) msg = matches.group(2) # XXX: new special logs do not have this #if not msg.find('Timing: ') >= 0: # continue when = datetime.strptime(tstamp, '%Y-%m-%dT%H:%MZ').replace( tzinfo=pytz.timezone('UTC')) step = msg.split(":")[-1].strip() steps[when] = step return steps
def function[get_bcbio_timings, parameter[path]]: constant[Fetch timing information from a bcbio log file.] with call[name[open], parameter[name[path], constant[r]]] begin[:] variable[steps] assign[=] dictionary[[], []] for taget[name[line]] in starred[name[file_handle]] begin[:] variable[matches] assign[=] call[name[re].search, parameter[constant[^\[([^\]]+)\] ([^:]+: .*)], name[line]]] if <ast.UnaryOp object at 0x7da1b19875b0> begin[:] continue variable[tstamp] assign[=] call[name[matches].group, parameter[constant[1]]] variable[msg] assign[=] call[name[matches].group, parameter[constant[2]]] variable[when] assign[=] call[call[name[datetime].strptime, parameter[name[tstamp], constant[%Y-%m-%dT%H:%MZ]]].replace, parameter[]] variable[step] assign[=] call[call[call[name[msg].split, parameter[constant[:]]]][<ast.UnaryOp object at 0x7da1b1984580>].strip, parameter[]] call[name[steps]][name[when]] assign[=] name[step] return[name[steps]]
keyword[def] identifier[get_bcbio_timings] ( identifier[path] ): literal[string] keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[file_handle] : identifier[steps] ={} keyword[for] identifier[line] keyword[in] identifier[file_handle] : identifier[matches] = identifier[re] . identifier[search] ( literal[string] , identifier[line] ) keyword[if] keyword[not] identifier[matches] : keyword[continue] identifier[tstamp] = identifier[matches] . identifier[group] ( literal[int] ) identifier[msg] = identifier[matches] . identifier[group] ( literal[int] ) identifier[when] = identifier[datetime] . identifier[strptime] ( identifier[tstamp] , literal[string] ). identifier[replace] ( identifier[tzinfo] = identifier[pytz] . identifier[timezone] ( literal[string] )) identifier[step] = identifier[msg] . identifier[split] ( literal[string] )[- literal[int] ]. identifier[strip] () identifier[steps] [ identifier[when] ]= identifier[step] keyword[return] identifier[steps]
def get_bcbio_timings(path): """Fetch timing information from a bcbio log file.""" with open(path, 'r') as file_handle: steps = {} for line in file_handle: matches = re.search('^\\[([^\\]]+)\\] ([^:]+: .*)', line) if not matches: continue # depends on [control=['if'], data=[]] tstamp = matches.group(1) msg = matches.group(2) # XXX: new special logs do not have this #if not msg.find('Timing: ') >= 0: # continue when = datetime.strptime(tstamp, '%Y-%m-%dT%H:%MZ').replace(tzinfo=pytz.timezone('UTC')) step = msg.split(':')[-1].strip() steps[when] = step # depends on [control=['for'], data=['line']] return steps # depends on [control=['with'], data=['file_handle']]
def port_profile_global_port_profile_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile_global = ET.SubElement(config, "port-profile-global", xmlns="urn:brocade.com:mgmt:brocade-port-profile") port_profile = ET.SubElement(port_profile_global, "port-profile") name = ET.SubElement(port_profile, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[port_profile_global_port_profile_name, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[port_profile_global] assign[=] call[name[ET].SubElement, parameter[name[config], constant[port-profile-global]]] variable[port_profile] assign[=] call[name[ET].SubElement, parameter[name[port_profile_global], constant[port-profile]]] variable[name] assign[=] call[name[ET].SubElement, parameter[name[port_profile], constant[name]]] name[name].text assign[=] call[name[kwargs].pop, parameter[constant[name]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[port_profile_global_port_profile_name] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[port_profile_global] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] ) identifier[port_profile] = identifier[ET] . identifier[SubElement] ( identifier[port_profile_global] , literal[string] ) identifier[name] = identifier[ET] . identifier[SubElement] ( identifier[port_profile] , literal[string] ) identifier[name] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def port_profile_global_port_profile_name(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') port_profile_global = ET.SubElement(config, 'port-profile-global', xmlns='urn:brocade.com:mgmt:brocade-port-profile') port_profile = ET.SubElement(port_profile_global, 'port-profile') name = ET.SubElement(port_profile, 'name') name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
def sg_rnn(tensor, opt): r"""Applies a simple rnn. Args: tensor: A 3-D `Tensor` (automatically passed by decorator). opt: in_dim: A positive `integer`. The size of input dimension. dim: A positive `integer`. The size of output dimension. bias: Boolean. If True, biases are added. ln: Boolean. If True, layer normalization is applied. init_state: A 2-D `Tensor`. If None, the initial state is set to zeros. last_only: Boolean. If True, the outputs in the last time step are returned. mask: Boolean 2-D `Tensor` or None(default). For false elements values are excluded from the calculation. As a result, the outputs for the locations become 0. summary: If True, summaries are added. The default is True. Returns: A `Tensor`. If last_only is True, the output tensor has shape [batch size, dim]. Otherwise, [batch size, time steps, dim]. """ # layer normalization # noinspection PyPep8 ln = lambda v: _ln_rnn(v, gamma, beta) if opt.ln else v # step function def step(hh, x): # simple rnn y = ln(tf.matmul(x, w) + tf.matmul(hh, u) + (b if opt.bias else 0)) return y # parameter initialize w = tf.sg_initializer.orthogonal('W', (opt.in_dim, opt.dim), summary=opt.summary) u = tf.sg_initializer.identity('U', opt.dim, summary=opt.summary) if opt.bias: b = tf.sg_initializer.constant('b', opt.dim, summary=opt.summary) # layer normalization parameters if opt.ln: # offset, scale parameter beta = tf.sg_initializer.constant('beta', opt.dim, summary=opt.summary) gamma = tf.sg_initializer.constant('gamma', opt.dim, value=1, summary=opt.summary) # initial state init_h = opt.init_state if opt.init_state is not None \ else tf.zeros((tensor.get_shape().as_list()[0], opt.dim), dtype=tf.sg_floatx) # do rnn loop h, out = init_h, [] for i in range(tensor.get_shape().as_list()[1]): # apply step func h = step(h, tensor[:, i, :]) # save result out.append(h.sg_expand_dims(axis=1)) # merge tensor out = tf.concat(out, 1) # apply mask if opt.mask is None: if opt.last_only: return out[:, -1, :] else: return out else: # apply mask out *= opt.mask.sg_expand_dims(axis=2).sg_float() if opt.last_only: # calc sequence length using given mask seq_len = opt.mask.sg_int().sg_sum(axis=1) # get last output rev = tf.reverse_sequence(out, seq_len, seq_axis=1) return rev[:, 0, :] else: return out
def function[sg_rnn, parameter[tensor, opt]]: constant[Applies a simple rnn. Args: tensor: A 3-D `Tensor` (automatically passed by decorator). opt: in_dim: A positive `integer`. The size of input dimension. dim: A positive `integer`. The size of output dimension. bias: Boolean. If True, biases are added. ln: Boolean. If True, layer normalization is applied. init_state: A 2-D `Tensor`. If None, the initial state is set to zeros. last_only: Boolean. If True, the outputs in the last time step are returned. mask: Boolean 2-D `Tensor` or None(default). For false elements values are excluded from the calculation. As a result, the outputs for the locations become 0. summary: If True, summaries are added. The default is True. Returns: A `Tensor`. If last_only is True, the output tensor has shape [batch size, dim]. Otherwise, [batch size, time steps, dim]. ] variable[ln] assign[=] <ast.Lambda object at 0x7da1b1293880> def function[step, parameter[hh, x]]: variable[y] assign[=] call[name[ln], parameter[binary_operation[binary_operation[call[name[tf].matmul, parameter[name[x], name[w]]] + call[name[tf].matmul, parameter[name[hh], name[u]]]] + <ast.IfExp object at 0x7da1b12917e0>]]] return[name[y]] variable[w] assign[=] call[name[tf].sg_initializer.orthogonal, parameter[constant[W], tuple[[<ast.Attribute object at 0x7da1b1292170>, <ast.Attribute object at 0x7da1b1291d20>]]]] variable[u] assign[=] call[name[tf].sg_initializer.identity, parameter[constant[U], name[opt].dim]] if name[opt].bias begin[:] variable[b] assign[=] call[name[tf].sg_initializer.constant, parameter[constant[b], name[opt].dim]] if name[opt].ln begin[:] variable[beta] assign[=] call[name[tf].sg_initializer.constant, parameter[constant[beta], name[opt].dim]] variable[gamma] assign[=] call[name[tf].sg_initializer.constant, parameter[constant[gamma], name[opt].dim]] variable[init_h] assign[=] <ast.IfExp object at 0x7da1b1291c60> <ast.Tuple object at 0x7da1b1291bd0> assign[=] tuple[[<ast.Name object at 0x7da1b12918d0>, <ast.List object at 0x7da1b12904f0>]] for taget[name[i]] in starred[call[name[range], parameter[call[call[call[name[tensor].get_shape, parameter[]].as_list, parameter[]]][constant[1]]]]] begin[:] variable[h] assign[=] call[name[step], parameter[name[h], call[name[tensor]][tuple[[<ast.Slice object at 0x7da1b12927a0>, <ast.Name object at 0x7da1b1291db0>, <ast.Slice object at 0x7da1b1290d00>]]]]] call[name[out].append, parameter[call[name[h].sg_expand_dims, parameter[]]]] variable[out] assign[=] call[name[tf].concat, parameter[name[out], constant[1]]] if compare[name[opt].mask is constant[None]] begin[:] if name[opt].last_only begin[:] return[call[name[out]][tuple[[<ast.Slice object at 0x7da1b1255630>, <ast.UnaryOp object at 0x7da1b1254b20>, <ast.Slice object at 0x7da1b1255870>]]]]
keyword[def] identifier[sg_rnn] ( identifier[tensor] , identifier[opt] ): literal[string] identifier[ln] = keyword[lambda] identifier[v] : identifier[_ln_rnn] ( identifier[v] , identifier[gamma] , identifier[beta] ) keyword[if] identifier[opt] . identifier[ln] keyword[else] identifier[v] keyword[def] identifier[step] ( identifier[hh] , identifier[x] ): identifier[y] = identifier[ln] ( identifier[tf] . identifier[matmul] ( identifier[x] , identifier[w] )+ identifier[tf] . identifier[matmul] ( identifier[hh] , identifier[u] )+( identifier[b] keyword[if] identifier[opt] . identifier[bias] keyword[else] literal[int] )) keyword[return] identifier[y] identifier[w] = identifier[tf] . identifier[sg_initializer] . identifier[orthogonal] ( literal[string] ,( identifier[opt] . identifier[in_dim] , identifier[opt] . identifier[dim] ), identifier[summary] = identifier[opt] . identifier[summary] ) identifier[u] = identifier[tf] . identifier[sg_initializer] . identifier[identity] ( literal[string] , identifier[opt] . identifier[dim] , identifier[summary] = identifier[opt] . identifier[summary] ) keyword[if] identifier[opt] . identifier[bias] : identifier[b] = identifier[tf] . identifier[sg_initializer] . identifier[constant] ( literal[string] , identifier[opt] . identifier[dim] , identifier[summary] = identifier[opt] . identifier[summary] ) keyword[if] identifier[opt] . identifier[ln] : identifier[beta] = identifier[tf] . identifier[sg_initializer] . identifier[constant] ( literal[string] , identifier[opt] . identifier[dim] , identifier[summary] = identifier[opt] . identifier[summary] ) identifier[gamma] = identifier[tf] . identifier[sg_initializer] . identifier[constant] ( literal[string] , identifier[opt] . identifier[dim] , identifier[value] = literal[int] , identifier[summary] = identifier[opt] . identifier[summary] ) identifier[init_h] = identifier[opt] . identifier[init_state] keyword[if] identifier[opt] . identifier[init_state] keyword[is] keyword[not] keyword[None] keyword[else] identifier[tf] . identifier[zeros] (( identifier[tensor] . identifier[get_shape] (). identifier[as_list] ()[ literal[int] ], identifier[opt] . identifier[dim] ), identifier[dtype] = identifier[tf] . identifier[sg_floatx] ) identifier[h] , identifier[out] = identifier[init_h] ,[] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[tensor] . identifier[get_shape] (). identifier[as_list] ()[ literal[int] ]): identifier[h] = identifier[step] ( identifier[h] , identifier[tensor] [:, identifier[i] ,:]) identifier[out] . identifier[append] ( identifier[h] . identifier[sg_expand_dims] ( identifier[axis] = literal[int] )) identifier[out] = identifier[tf] . identifier[concat] ( identifier[out] , literal[int] ) keyword[if] identifier[opt] . identifier[mask] keyword[is] keyword[None] : keyword[if] identifier[opt] . identifier[last_only] : keyword[return] identifier[out] [:,- literal[int] ,:] keyword[else] : keyword[return] identifier[out] keyword[else] : identifier[out] *= identifier[opt] . identifier[mask] . identifier[sg_expand_dims] ( identifier[axis] = literal[int] ). identifier[sg_float] () keyword[if] identifier[opt] . identifier[last_only] : identifier[seq_len] = identifier[opt] . identifier[mask] . identifier[sg_int] (). identifier[sg_sum] ( identifier[axis] = literal[int] ) identifier[rev] = identifier[tf] . identifier[reverse_sequence] ( identifier[out] , identifier[seq_len] , identifier[seq_axis] = literal[int] ) keyword[return] identifier[rev] [:, literal[int] ,:] keyword[else] : keyword[return] identifier[out]
def sg_rnn(tensor, opt): """Applies a simple rnn. Args: tensor: A 3-D `Tensor` (automatically passed by decorator). opt: in_dim: A positive `integer`. The size of input dimension. dim: A positive `integer`. The size of output dimension. bias: Boolean. If True, biases are added. ln: Boolean. If True, layer normalization is applied. init_state: A 2-D `Tensor`. If None, the initial state is set to zeros. last_only: Boolean. If True, the outputs in the last time step are returned. mask: Boolean 2-D `Tensor` or None(default). For false elements values are excluded from the calculation. As a result, the outputs for the locations become 0. summary: If True, summaries are added. The default is True. Returns: A `Tensor`. If last_only is True, the output tensor has shape [batch size, dim]. Otherwise, [batch size, time steps, dim]. """ # layer normalization # noinspection PyPep8 ln = lambda v: _ln_rnn(v, gamma, beta) if opt.ln else v # step function def step(hh, x): # simple rnn y = ln(tf.matmul(x, w) + tf.matmul(hh, u) + (b if opt.bias else 0)) return y # parameter initialize w = tf.sg_initializer.orthogonal('W', (opt.in_dim, opt.dim), summary=opt.summary) u = tf.sg_initializer.identity('U', opt.dim, summary=opt.summary) if opt.bias: b = tf.sg_initializer.constant('b', opt.dim, summary=opt.summary) # depends on [control=['if'], data=[]] # layer normalization parameters if opt.ln: # offset, scale parameter beta = tf.sg_initializer.constant('beta', opt.dim, summary=opt.summary) gamma = tf.sg_initializer.constant('gamma', opt.dim, value=1, summary=opt.summary) # depends on [control=['if'], data=[]] # initial state init_h = opt.init_state if opt.init_state is not None else tf.zeros((tensor.get_shape().as_list()[0], opt.dim), dtype=tf.sg_floatx) # do rnn loop (h, out) = (init_h, []) for i in range(tensor.get_shape().as_list()[1]): # apply step func h = step(h, tensor[:, i, :]) # save result out.append(h.sg_expand_dims(axis=1)) # depends on [control=['for'], data=['i']] # merge tensor out = tf.concat(out, 1) # apply mask if opt.mask is None: if opt.last_only: return out[:, -1, :] # depends on [control=['if'], data=[]] else: return out # depends on [control=['if'], data=[]] else: # apply mask out *= opt.mask.sg_expand_dims(axis=2).sg_float() if opt.last_only: # calc sequence length using given mask seq_len = opt.mask.sg_int().sg_sum(axis=1) # get last output rev = tf.reverse_sequence(out, seq_len, seq_axis=1) return rev[:, 0, :] # depends on [control=['if'], data=[]] else: return out
def get_publisher(self, publisher_id): """GetPublisher. Get a specific service hooks publisher. :param str publisher_id: ID for a publisher. :rtype: :class:`<Publisher> <azure.devops.v5_0.service_hooks.models.Publisher>` """ route_values = {} if publisher_id is not None: route_values['publisherId'] = self._serialize.url('publisher_id', publisher_id, 'str') response = self._send(http_method='GET', location_id='1e83a210-5b53-43bc-90f0-d476a4e5d731', version='5.0', route_values=route_values) return self._deserialize('Publisher', response)
def function[get_publisher, parameter[self, publisher_id]]: constant[GetPublisher. Get a specific service hooks publisher. :param str publisher_id: ID for a publisher. :rtype: :class:`<Publisher> <azure.devops.v5_0.service_hooks.models.Publisher>` ] variable[route_values] assign[=] dictionary[[], []] if compare[name[publisher_id] is_not constant[None]] begin[:] call[name[route_values]][constant[publisherId]] assign[=] call[name[self]._serialize.url, parameter[constant[publisher_id], name[publisher_id], constant[str]]] variable[response] assign[=] call[name[self]._send, parameter[]] return[call[name[self]._deserialize, parameter[constant[Publisher], name[response]]]]
keyword[def] identifier[get_publisher] ( identifier[self] , identifier[publisher_id] ): literal[string] identifier[route_values] ={} keyword[if] identifier[publisher_id] keyword[is] keyword[not] keyword[None] : identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[publisher_id] , literal[string] ) identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] , identifier[location_id] = literal[string] , identifier[version] = literal[string] , identifier[route_values] = identifier[route_values] ) keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[response] )
def get_publisher(self, publisher_id): """GetPublisher. Get a specific service hooks publisher. :param str publisher_id: ID for a publisher. :rtype: :class:`<Publisher> <azure.devops.v5_0.service_hooks.models.Publisher>` """ route_values = {} if publisher_id is not None: route_values['publisherId'] = self._serialize.url('publisher_id', publisher_id, 'str') # depends on [control=['if'], data=['publisher_id']] response = self._send(http_method='GET', location_id='1e83a210-5b53-43bc-90f0-d476a4e5d731', version='5.0', route_values=route_values) return self._deserialize('Publisher', response)
def format(self, data: Iterable[_FormatArg]) -> bytes: """String interpolation into the format string. Args: data: The data interpolated into the format string. Examples: :: BytesFormat(b'Hello, %b!') % b'World' BytesFormat(b'%b, %b!') % (b'Hello', b'World') """ fix_arg = self._fix_format_arg return self.how % tuple(fix_arg(item) for item in data)
def function[format, parameter[self, data]]: constant[String interpolation into the format string. Args: data: The data interpolated into the format string. Examples: :: BytesFormat(b'Hello, %b!') % b'World' BytesFormat(b'%b, %b!') % (b'Hello', b'World') ] variable[fix_arg] assign[=] name[self]._fix_format_arg return[binary_operation[name[self].how <ast.Mod object at 0x7da2590d6920> call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da20c7943a0>]]]]
keyword[def] identifier[format] ( identifier[self] , identifier[data] : identifier[Iterable] [ identifier[_FormatArg] ])-> identifier[bytes] : literal[string] identifier[fix_arg] = identifier[self] . identifier[_fix_format_arg] keyword[return] identifier[self] . identifier[how] % identifier[tuple] ( identifier[fix_arg] ( identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[data] )
def format(self, data: Iterable[_FormatArg]) -> bytes: """String interpolation into the format string. Args: data: The data interpolated into the format string. Examples: :: BytesFormat(b'Hello, %b!') % b'World' BytesFormat(b'%b, %b!') % (b'Hello', b'World') """ fix_arg = self._fix_format_arg return self.how % tuple((fix_arg(item) for item in data))
def make_shell_context(self) -> dict: """Create a context for interactive shell usage. The :attr:`shell_context_processors` can be used to add additional context. """ context = {'app': self, 'g': g} for processor in self.shell_context_processors: context.update(processor()) return context
def function[make_shell_context, parameter[self]]: constant[Create a context for interactive shell usage. The :attr:`shell_context_processors` can be used to add additional context. ] variable[context] assign[=] dictionary[[<ast.Constant object at 0x7da18bc734f0>, <ast.Constant object at 0x7da18bc72cb0>], [<ast.Name object at 0x7da18bc72950>, <ast.Name object at 0x7da18bc73b80>]] for taget[name[processor]] in starred[name[self].shell_context_processors] begin[:] call[name[context].update, parameter[call[name[processor], parameter[]]]] return[name[context]]
keyword[def] identifier[make_shell_context] ( identifier[self] )-> identifier[dict] : literal[string] identifier[context] ={ literal[string] : identifier[self] , literal[string] : identifier[g] } keyword[for] identifier[processor] keyword[in] identifier[self] . identifier[shell_context_processors] : identifier[context] . identifier[update] ( identifier[processor] ()) keyword[return] identifier[context]
def make_shell_context(self) -> dict: """Create a context for interactive shell usage. The :attr:`shell_context_processors` can be used to add additional context. """ context = {'app': self, 'g': g} for processor in self.shell_context_processors: context.update(processor()) # depends on [control=['for'], data=['processor']] return context
def del_permission_role(self, role, perm_view): """ Remove permission-ViewMenu object to Role :param role: The role object :param perm_view: The PermissionViewMenu object """ if perm_view in role.permissions: try: role.permissions.remove(perm_view) self.get_session.merge(role) self.get_session.commit() log.info( c.LOGMSG_INF_SEC_DEL_PERMROLE.format(str(perm_view), role.name) ) except Exception as e: log.error(c.LOGMSG_ERR_SEC_DEL_PERMROLE.format(str(e))) self.get_session.rollback()
def function[del_permission_role, parameter[self, role, perm_view]]: constant[ Remove permission-ViewMenu object to Role :param role: The role object :param perm_view: The PermissionViewMenu object ] if compare[name[perm_view] in name[role].permissions] begin[:] <ast.Try object at 0x7da20c7cbb80>
keyword[def] identifier[del_permission_role] ( identifier[self] , identifier[role] , identifier[perm_view] ): literal[string] keyword[if] identifier[perm_view] keyword[in] identifier[role] . identifier[permissions] : keyword[try] : identifier[role] . identifier[permissions] . identifier[remove] ( identifier[perm_view] ) identifier[self] . identifier[get_session] . identifier[merge] ( identifier[role] ) identifier[self] . identifier[get_session] . identifier[commit] () identifier[log] . identifier[info] ( identifier[c] . identifier[LOGMSG_INF_SEC_DEL_PERMROLE] . identifier[format] ( identifier[str] ( identifier[perm_view] ), identifier[role] . identifier[name] ) ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[log] . identifier[error] ( identifier[c] . identifier[LOGMSG_ERR_SEC_DEL_PERMROLE] . identifier[format] ( identifier[str] ( identifier[e] ))) identifier[self] . identifier[get_session] . identifier[rollback] ()
def del_permission_role(self, role, perm_view): """ Remove permission-ViewMenu object to Role :param role: The role object :param perm_view: The PermissionViewMenu object """ if perm_view in role.permissions: try: role.permissions.remove(perm_view) self.get_session.merge(role) self.get_session.commit() log.info(c.LOGMSG_INF_SEC_DEL_PERMROLE.format(str(perm_view), role.name)) # depends on [control=['try'], data=[]] except Exception as e: log.error(c.LOGMSG_ERR_SEC_DEL_PERMROLE.format(str(e))) self.get_session.rollback() # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=['perm_view']]
def prebuild_arch(self, arch): '''Run any pre-build tasks for the Recipe. By default, this checks if any prebuild_archname methods exist for the archname of the current architecture, and runs them if so.''' prebuild = "prebuild_{}".format(arch.arch.replace('-', '_')) if hasattr(self, prebuild): getattr(self, prebuild)() else: info('{} has no {}, skipping'.format(self.name, prebuild))
def function[prebuild_arch, parameter[self, arch]]: constant[Run any pre-build tasks for the Recipe. By default, this checks if any prebuild_archname methods exist for the archname of the current architecture, and runs them if so.] variable[prebuild] assign[=] call[constant[prebuild_{}].format, parameter[call[name[arch].arch.replace, parameter[constant[-], constant[_]]]]] if call[name[hasattr], parameter[name[self], name[prebuild]]] begin[:] call[call[name[getattr], parameter[name[self], name[prebuild]]], parameter[]]
keyword[def] identifier[prebuild_arch] ( identifier[self] , identifier[arch] ): literal[string] identifier[prebuild] = literal[string] . identifier[format] ( identifier[arch] . identifier[arch] . identifier[replace] ( literal[string] , literal[string] )) keyword[if] identifier[hasattr] ( identifier[self] , identifier[prebuild] ): identifier[getattr] ( identifier[self] , identifier[prebuild] )() keyword[else] : identifier[info] ( literal[string] . identifier[format] ( identifier[self] . identifier[name] , identifier[prebuild] ))
def prebuild_arch(self, arch): """Run any pre-build tasks for the Recipe. By default, this checks if any prebuild_archname methods exist for the archname of the current architecture, and runs them if so.""" prebuild = 'prebuild_{}'.format(arch.arch.replace('-', '_')) if hasattr(self, prebuild): getattr(self, prebuild)() # depends on [control=['if'], data=[]] else: info('{} has no {}, skipping'.format(self.name, prebuild))
def discrete(self, *args): """ Set fields to be discrete. :rtype: DataFrame :Example: >>> # Table schema is create table test(f1 double, f2 string) >>> # Original continuity: f1=CONTINUOUS, f2=CONTINUOUS >>> # Now we want to set ``f1`` and ``f2`` into continuous >>> new_ds = df.discrete('f1 f2') """ new_df = copy_df(self) fields = _render_field_set(args) self._assert_ml_fields_valid(*fields) new_df._perform_operation(op.FieldContinuityOperation(dict((_get_field_name(f), False) for f in fields))) return new_df
def function[discrete, parameter[self]]: constant[ Set fields to be discrete. :rtype: DataFrame :Example: >>> # Table schema is create table test(f1 double, f2 string) >>> # Original continuity: f1=CONTINUOUS, f2=CONTINUOUS >>> # Now we want to set ``f1`` and ``f2`` into continuous >>> new_ds = df.discrete('f1 f2') ] variable[new_df] assign[=] call[name[copy_df], parameter[name[self]]] variable[fields] assign[=] call[name[_render_field_set], parameter[name[args]]] call[name[self]._assert_ml_fields_valid, parameter[<ast.Starred object at 0x7da18eb56bf0>]] call[name[new_df]._perform_operation, parameter[call[name[op].FieldContinuityOperation, parameter[call[name[dict], parameter[<ast.GeneratorExp object at 0x7da18eb57880>]]]]]] return[name[new_df]]
keyword[def] identifier[discrete] ( identifier[self] ,* identifier[args] ): literal[string] identifier[new_df] = identifier[copy_df] ( identifier[self] ) identifier[fields] = identifier[_render_field_set] ( identifier[args] ) identifier[self] . identifier[_assert_ml_fields_valid] (* identifier[fields] ) identifier[new_df] . identifier[_perform_operation] ( identifier[op] . identifier[FieldContinuityOperation] ( identifier[dict] (( identifier[_get_field_name] ( identifier[f] ), keyword[False] ) keyword[for] identifier[f] keyword[in] identifier[fields] ))) keyword[return] identifier[new_df]
def discrete(self, *args): """ Set fields to be discrete. :rtype: DataFrame :Example: >>> # Table schema is create table test(f1 double, f2 string) >>> # Original continuity: f1=CONTINUOUS, f2=CONTINUOUS >>> # Now we want to set ``f1`` and ``f2`` into continuous >>> new_ds = df.discrete('f1 f2') """ new_df = copy_df(self) fields = _render_field_set(args) self._assert_ml_fields_valid(*fields) new_df._perform_operation(op.FieldContinuityOperation(dict(((_get_field_name(f), False) for f in fields)))) return new_df
def build_model(self, token_encoder_model, sentence_encoder_model, trainable_embeddings=True, output_activation='softmax'): """Builds a model that first encodes all words within sentences using `token_encoder_model`, followed by `sentence_encoder_model`. Args: token_encoder_model: An instance of `SequenceEncoderBase` for encoding tokens within sentences. This model will be applied across all sentences to create a sentence encoding. sentence_encoder_model: An instance of `SequenceEncoderBase` operating on sentence encoding generated by `token_encoder_model`. This encoding is then fed into a final `Dense` layer for classification. trainable_embeddings: Whether or not to fine tune embeddings. output_activation: The output activation to use. (Default value: 'softmax') Use: - `softmax` for binary or multi-class. - `sigmoid` for multi-label classification. - `linear` for regression output. Returns: The model output tensor. """ if not isinstance(token_encoder_model, SequenceEncoderBase): raise ValueError("`token_encoder_model` should be an instance of `{}`".format( SequenceEncoderBase)) if not isinstance(sentence_encoder_model, SequenceEncoderBase): raise ValueError("`sentence_encoder_model` should be an instance of `{}`".format( SequenceEncoderBase)) if not sentence_encoder_model.allows_dynamic_length() and self.max_sents is None: raise ValueError("Sentence encoder model '{}' requires padding. " "You need to provide `max_sents`") if self.embeddings_index is None: # The +1 is for unknown token index 0. embedding_layer = Embedding(len(self.token_index), self.embedding_dims, input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings) else: embedding_layer = Embedding(len(self.token_index), self.embedding_dims, weights=[build_embedding_weights( self.token_index, self.embeddings_index)], input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings) word_input = Input(shape=(self.max_tokens,), dtype='int32') x = embedding_layer(word_input) word_encoding = token_encoder_model(x) token_encoder_model = Model( word_input, word_encoding, name='word_encoder') doc_input = Input( shape=(self.max_sents, self.max_tokens), dtype='int32') sent_encoding = TimeDistributed(token_encoder_model)(doc_input) x = sentence_encoder_model(sent_encoding) x = Dense(self.num_classes, activation=output_activation)(x) return Model(doc_input, x)
def function[build_model, parameter[self, token_encoder_model, sentence_encoder_model, trainable_embeddings, output_activation]]: constant[Builds a model that first encodes all words within sentences using `token_encoder_model`, followed by `sentence_encoder_model`. Args: token_encoder_model: An instance of `SequenceEncoderBase` for encoding tokens within sentences. This model will be applied across all sentences to create a sentence encoding. sentence_encoder_model: An instance of `SequenceEncoderBase` operating on sentence encoding generated by `token_encoder_model`. This encoding is then fed into a final `Dense` layer for classification. trainable_embeddings: Whether or not to fine tune embeddings. output_activation: The output activation to use. (Default value: 'softmax') Use: - `softmax` for binary or multi-class. - `sigmoid` for multi-label classification. - `linear` for regression output. Returns: The model output tensor. ] if <ast.UnaryOp object at 0x7da1b11e2500> begin[:] <ast.Raise object at 0x7da1b11e0310> if <ast.UnaryOp object at 0x7da1b11e22f0> begin[:] <ast.Raise object at 0x7da1b11e1bd0> if <ast.BoolOp object at 0x7da1b11e18a0> begin[:] <ast.Raise object at 0x7da1b11e33a0> if compare[name[self].embeddings_index is constant[None]] begin[:] variable[embedding_layer] assign[=] call[name[Embedding], parameter[call[name[len], parameter[name[self].token_index]], name[self].embedding_dims]] variable[word_input] assign[=] call[name[Input], parameter[]] variable[x] assign[=] call[name[embedding_layer], parameter[name[word_input]]] variable[word_encoding] assign[=] call[name[token_encoder_model], parameter[name[x]]] variable[token_encoder_model] assign[=] call[name[Model], parameter[name[word_input], name[word_encoding]]] variable[doc_input] assign[=] call[name[Input], parameter[]] variable[sent_encoding] assign[=] call[call[name[TimeDistributed], parameter[name[token_encoder_model]]], parameter[name[doc_input]]] variable[x] assign[=] call[name[sentence_encoder_model], parameter[name[sent_encoding]]] variable[x] assign[=] call[call[name[Dense], parameter[name[self].num_classes]], parameter[name[x]]] return[call[name[Model], parameter[name[doc_input], name[x]]]]
keyword[def] identifier[build_model] ( identifier[self] , identifier[token_encoder_model] , identifier[sentence_encoder_model] , identifier[trainable_embeddings] = keyword[True] , identifier[output_activation] = literal[string] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[token_encoder_model] , identifier[SequenceEncoderBase] ): keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[SequenceEncoderBase] )) keyword[if] keyword[not] identifier[isinstance] ( identifier[sentence_encoder_model] , identifier[SequenceEncoderBase] ): keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[SequenceEncoderBase] )) keyword[if] keyword[not] identifier[sentence_encoder_model] . identifier[allows_dynamic_length] () keyword[and] identifier[self] . identifier[max_sents] keyword[is] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) keyword[if] identifier[self] . identifier[embeddings_index] keyword[is] keyword[None] : identifier[embedding_layer] = identifier[Embedding] ( identifier[len] ( identifier[self] . identifier[token_index] ), identifier[self] . identifier[embedding_dims] , identifier[input_length] = identifier[self] . identifier[max_tokens] , identifier[mask_zero] = identifier[token_encoder_model] . identifier[allows_dynamic_length] (), identifier[trainable] = identifier[trainable_embeddings] ) keyword[else] : identifier[embedding_layer] = identifier[Embedding] ( identifier[len] ( identifier[self] . identifier[token_index] ), identifier[self] . identifier[embedding_dims] , identifier[weights] =[ identifier[build_embedding_weights] ( identifier[self] . identifier[token_index] , identifier[self] . identifier[embeddings_index] )], identifier[input_length] = identifier[self] . identifier[max_tokens] , identifier[mask_zero] = identifier[token_encoder_model] . identifier[allows_dynamic_length] (), identifier[trainable] = identifier[trainable_embeddings] ) identifier[word_input] = identifier[Input] ( identifier[shape] =( identifier[self] . identifier[max_tokens] ,), identifier[dtype] = literal[string] ) identifier[x] = identifier[embedding_layer] ( identifier[word_input] ) identifier[word_encoding] = identifier[token_encoder_model] ( identifier[x] ) identifier[token_encoder_model] = identifier[Model] ( identifier[word_input] , identifier[word_encoding] , identifier[name] = literal[string] ) identifier[doc_input] = identifier[Input] ( identifier[shape] =( identifier[self] . identifier[max_sents] , identifier[self] . identifier[max_tokens] ), identifier[dtype] = literal[string] ) identifier[sent_encoding] = identifier[TimeDistributed] ( identifier[token_encoder_model] )( identifier[doc_input] ) identifier[x] = identifier[sentence_encoder_model] ( identifier[sent_encoding] ) identifier[x] = identifier[Dense] ( identifier[self] . identifier[num_classes] , identifier[activation] = identifier[output_activation] )( identifier[x] ) keyword[return] identifier[Model] ( identifier[doc_input] , identifier[x] )
def build_model(self, token_encoder_model, sentence_encoder_model, trainable_embeddings=True, output_activation='softmax'): """Builds a model that first encodes all words within sentences using `token_encoder_model`, followed by `sentence_encoder_model`. Args: token_encoder_model: An instance of `SequenceEncoderBase` for encoding tokens within sentences. This model will be applied across all sentences to create a sentence encoding. sentence_encoder_model: An instance of `SequenceEncoderBase` operating on sentence encoding generated by `token_encoder_model`. This encoding is then fed into a final `Dense` layer for classification. trainable_embeddings: Whether or not to fine tune embeddings. output_activation: The output activation to use. (Default value: 'softmax') Use: - `softmax` for binary or multi-class. - `sigmoid` for multi-label classification. - `linear` for regression output. Returns: The model output tensor. """ if not isinstance(token_encoder_model, SequenceEncoderBase): raise ValueError('`token_encoder_model` should be an instance of `{}`'.format(SequenceEncoderBase)) # depends on [control=['if'], data=[]] if not isinstance(sentence_encoder_model, SequenceEncoderBase): raise ValueError('`sentence_encoder_model` should be an instance of `{}`'.format(SequenceEncoderBase)) # depends on [control=['if'], data=[]] if not sentence_encoder_model.allows_dynamic_length() and self.max_sents is None: raise ValueError("Sentence encoder model '{}' requires padding. You need to provide `max_sents`") # depends on [control=['if'], data=[]] if self.embeddings_index is None: # The +1 is for unknown token index 0. embedding_layer = Embedding(len(self.token_index), self.embedding_dims, input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings) # depends on [control=['if'], data=[]] else: embedding_layer = Embedding(len(self.token_index), self.embedding_dims, weights=[build_embedding_weights(self.token_index, self.embeddings_index)], input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings) word_input = Input(shape=(self.max_tokens,), dtype='int32') x = embedding_layer(word_input) word_encoding = token_encoder_model(x) token_encoder_model = Model(word_input, word_encoding, name='word_encoder') doc_input = Input(shape=(self.max_sents, self.max_tokens), dtype='int32') sent_encoding = TimeDistributed(token_encoder_model)(doc_input) x = sentence_encoder_model(sent_encoding) x = Dense(self.num_classes, activation=output_activation)(x) return Model(doc_input, x)
def pyquil_to_image(program: pyquil.Program) -> PIL.Image: # pragma: no cover """Returns an image of a pyquil circuit. See circuit_to_latex() for more details. """ circ = pyquil_to_circuit(program) latex = circuit_to_latex(circ) img = render_latex(latex) return img
def function[pyquil_to_image, parameter[program]]: constant[Returns an image of a pyquil circuit. See circuit_to_latex() for more details. ] variable[circ] assign[=] call[name[pyquil_to_circuit], parameter[name[program]]] variable[latex] assign[=] call[name[circuit_to_latex], parameter[name[circ]]] variable[img] assign[=] call[name[render_latex], parameter[name[latex]]] return[name[img]]
keyword[def] identifier[pyquil_to_image] ( identifier[program] : identifier[pyquil] . identifier[Program] )-> identifier[PIL] . identifier[Image] : literal[string] identifier[circ] = identifier[pyquil_to_circuit] ( identifier[program] ) identifier[latex] = identifier[circuit_to_latex] ( identifier[circ] ) identifier[img] = identifier[render_latex] ( identifier[latex] ) keyword[return] identifier[img]
def pyquil_to_image(program: pyquil.Program) -> PIL.Image: # pragma: no cover 'Returns an image of a pyquil circuit.\n\n See circuit_to_latex() for more details.\n ' circ = pyquil_to_circuit(program) latex = circuit_to_latex(circ) img = render_latex(latex) return img
def append_child ( self, object, child ): """ Appends a child to the object's children. """ if isinstance( child, Subgraph ): object.subgraphs.append( child ) elif isinstance( child, Cluster ): object.clusters.append( child ) elif isinstance( child, Node ): object.nodes.append( child ) elif isinstance( child, Edge ): object.edges.append( child ) else: pass
def function[append_child, parameter[self, object, child]]: constant[ Appends a child to the object's children. ] if call[name[isinstance], parameter[name[child], name[Subgraph]]] begin[:] call[name[object].subgraphs.append, parameter[name[child]]]
keyword[def] identifier[append_child] ( identifier[self] , identifier[object] , identifier[child] ): literal[string] keyword[if] identifier[isinstance] ( identifier[child] , identifier[Subgraph] ): identifier[object] . identifier[subgraphs] . identifier[append] ( identifier[child] ) keyword[elif] identifier[isinstance] ( identifier[child] , identifier[Cluster] ): identifier[object] . identifier[clusters] . identifier[append] ( identifier[child] ) keyword[elif] identifier[isinstance] ( identifier[child] , identifier[Node] ): identifier[object] . identifier[nodes] . identifier[append] ( identifier[child] ) keyword[elif] identifier[isinstance] ( identifier[child] , identifier[Edge] ): identifier[object] . identifier[edges] . identifier[append] ( identifier[child] ) keyword[else] : keyword[pass]
def append_child(self, object, child): """ Appends a child to the object's children. """ if isinstance(child, Subgraph): object.subgraphs.append(child) # depends on [control=['if'], data=[]] elif isinstance(child, Cluster): object.clusters.append(child) # depends on [control=['if'], data=[]] elif isinstance(child, Node): object.nodes.append(child) # depends on [control=['if'], data=[]] elif isinstance(child, Edge): object.edges.append(child) # depends on [control=['if'], data=[]] else: pass
def ArcTan(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex: """ Takes the inverse tan of a vertex, Arctan(vertex) :param input_vertex: the vertex """ return Double(context.jvm_view().ArcTanVertex, label, cast_to_double_vertex(input_vertex))
def function[ArcTan, parameter[input_vertex, label]]: constant[ Takes the inverse tan of a vertex, Arctan(vertex) :param input_vertex: the vertex ] return[call[name[Double], parameter[call[name[context].jvm_view, parameter[]].ArcTanVertex, name[label], call[name[cast_to_double_vertex], parameter[name[input_vertex]]]]]]
keyword[def] identifier[ArcTan] ( identifier[input_vertex] : identifier[vertex_constructor_param_types] , identifier[label] : identifier[Optional] [ identifier[str] ]= keyword[None] )-> identifier[Vertex] : literal[string] keyword[return] identifier[Double] ( identifier[context] . identifier[jvm_view] (). identifier[ArcTanVertex] , identifier[label] , identifier[cast_to_double_vertex] ( identifier[input_vertex] ))
def ArcTan(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex: """ Takes the inverse tan of a vertex, Arctan(vertex) :param input_vertex: the vertex """ return Double(context.jvm_view().ArcTanVertex, label, cast_to_double_vertex(input_vertex))
def from_text(cls, filename, **kwargs): ''' Create a constellation by reading a catalog in from a text file, as long as it's formated as in to_text() with identifiers, coordinates, magnitudes. Parameters ---------- filename : str The filename to read in. **kwargs are passed to astropy.io.ascii.read() ''' # FIXME -- add something here to parse id, mag, errors from the table? # load the table t = ascii.read(filename, **kwargs) ''' # which columns is the coordinates? i_coordinates = t.colnames.index('ra') # everything before the coordinates is an identifier identifiers = Table(t.columns[:i_coordinates]) # the complete coordinates are stored in one c = t.columns[i_coordinates:i_coordinates+6] coordinates = coord.SkyCoord(**c) coordinates.obstime=Time(cls.epoch, format='decimalyear') # everything after coordinates is magnitudes magnitudes = Table(t.columns[i_coordinates+1:]) newtable = hstack([Table(identifiers), Table({'coordinates':coordinates}), Table(magnitudes)]) ''' this = cls(t) this.speak('loaded constellation from {}'.format(filename)) return this
def function[from_text, parameter[cls, filename]]: constant[ Create a constellation by reading a catalog in from a text file, as long as it's formated as in to_text() with identifiers, coordinates, magnitudes. Parameters ---------- filename : str The filename to read in. **kwargs are passed to astropy.io.ascii.read() ] variable[t] assign[=] call[name[ascii].read, parameter[name[filename]]] constant[ # which columns is the coordinates? i_coordinates = t.colnames.index('ra') # everything before the coordinates is an identifier identifiers = Table(t.columns[:i_coordinates]) # the complete coordinates are stored in one c = t.columns[i_coordinates:i_coordinates+6] coordinates = coord.SkyCoord(**c) coordinates.obstime=Time(cls.epoch, format='decimalyear') # everything after coordinates is magnitudes magnitudes = Table(t.columns[i_coordinates+1:]) newtable = hstack([Table(identifiers), Table({'coordinates':coordinates}), Table(magnitudes)]) ] variable[this] assign[=] call[name[cls], parameter[name[t]]] call[name[this].speak, parameter[call[constant[loaded constellation from {}].format, parameter[name[filename]]]]] return[name[this]]
keyword[def] identifier[from_text] ( identifier[cls] , identifier[filename] ,** identifier[kwargs] ): literal[string] identifier[t] = identifier[ascii] . identifier[read] ( identifier[filename] ,** identifier[kwargs] ) literal[string] identifier[this] = identifier[cls] ( identifier[t] ) identifier[this] . identifier[speak] ( literal[string] . identifier[format] ( identifier[filename] )) keyword[return] identifier[this]
def from_text(cls, filename, **kwargs): """ Create a constellation by reading a catalog in from a text file, as long as it's formated as in to_text() with identifiers, coordinates, magnitudes. Parameters ---------- filename : str The filename to read in. **kwargs are passed to astropy.io.ascii.read() """ # FIXME -- add something here to parse id, mag, errors from the table? # load the table t = ascii.read(filename, **kwargs) "\n # which columns is the coordinates?\n i_coordinates = t.colnames.index('ra')\n\n # everything before the coordinates is an identifier\n identifiers = Table(t.columns[:i_coordinates])\n\n # the complete coordinates are stored in one\n c = t.columns[i_coordinates:i_coordinates+6]\n coordinates = coord.SkyCoord(**c)\n coordinates.obstime=Time(cls.epoch, format='decimalyear')\n\n # everything after coordinates is magnitudes\n magnitudes = Table(t.columns[i_coordinates+1:])\n\n newtable = hstack([Table(identifiers),\n Table({'coordinates':coordinates}),\n Table(magnitudes)])\n " this = cls(t) this.speak('loaded constellation from {}'.format(filename)) return this