code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def run(self): """Start the exchange""" self.started_queue.put('STARTED') while True: event = self.publisher_queue.get() if event == POISON_PILL: return else: self.dispatch(event)
def function[run, parameter[self]]: constant[Start the exchange] call[name[self].started_queue.put, parameter[constant[STARTED]]] while constant[True] begin[:] variable[event] assign[=] call[name[self].publisher_queue.get, parameter[]] if compare[name[event] equal[==] name[POISON_PILL]] begin[:] return[None]
keyword[def] identifier[run] ( identifier[self] ): literal[string] identifier[self] . identifier[started_queue] . identifier[put] ( literal[string] ) keyword[while] keyword[True] : identifier[event] = identifier[self] . identifier[publisher_queue] . identifier[get] () keyword[if] identifier[event] == identifier[POISON_PILL] : keyword[return] keyword[else] : identifier[self] . identifier[dispatch] ( identifier[event] )
def run(self): """Start the exchange""" self.started_queue.put('STARTED') while True: event = self.publisher_queue.get() if event == POISON_PILL: return # depends on [control=['if'], data=[]] else: self.dispatch(event) # depends on [control=['while'], data=[]]
def get_msg_login(self, username): """message for welcome. """ account = self.get_account(username) if account: account.update_last_login() account.save() return 'welcome.'
def function[get_msg_login, parameter[self, username]]: constant[message for welcome. ] variable[account] assign[=] call[name[self].get_account, parameter[name[username]]] if name[account] begin[:] call[name[account].update_last_login, parameter[]] call[name[account].save, parameter[]] return[constant[welcome.]]
keyword[def] identifier[get_msg_login] ( identifier[self] , identifier[username] ): literal[string] identifier[account] = identifier[self] . identifier[get_account] ( identifier[username] ) keyword[if] identifier[account] : identifier[account] . identifier[update_last_login] () identifier[account] . identifier[save] () keyword[return] literal[string]
def get_msg_login(self, username): """message for welcome. """ account = self.get_account(username) if account: account.update_last_login() account.save() # depends on [control=['if'], data=[]] return 'welcome.'
def remove_start_NaN(self, data, var=None): """ Remove start NaN. CHECK: Note issue with multi-column df. Parameters ---------- data : pd.DataFrame() Input dataframe. var : list(str) List that specifies specific columns of dataframe. Returns ------- pd.DataFrame() Dataframe starting from its first valid index. """ # Limit to one or some variables if var: start_ok_data = data[var].first_valid_index() else: start_ok_data = data.first_valid_index() data = data.loc[start_ok_data:, :] return data
def function[remove_start_NaN, parameter[self, data, var]]: constant[ Remove start NaN. CHECK: Note issue with multi-column df. Parameters ---------- data : pd.DataFrame() Input dataframe. var : list(str) List that specifies specific columns of dataframe. Returns ------- pd.DataFrame() Dataframe starting from its first valid index. ] if name[var] begin[:] variable[start_ok_data] assign[=] call[call[name[data]][name[var]].first_valid_index, parameter[]] variable[data] assign[=] call[name[data].loc][tuple[[<ast.Slice object at 0x7da204344f70>, <ast.Slice object at 0x7da204346a70>]]] return[name[data]]
keyword[def] identifier[remove_start_NaN] ( identifier[self] , identifier[data] , identifier[var] = keyword[None] ): literal[string] keyword[if] identifier[var] : identifier[start_ok_data] = identifier[data] [ identifier[var] ]. identifier[first_valid_index] () keyword[else] : identifier[start_ok_data] = identifier[data] . identifier[first_valid_index] () identifier[data] = identifier[data] . identifier[loc] [ identifier[start_ok_data] :,:] keyword[return] identifier[data]
def remove_start_NaN(self, data, var=None): """ Remove start NaN. CHECK: Note issue with multi-column df. Parameters ---------- data : pd.DataFrame() Input dataframe. var : list(str) List that specifies specific columns of dataframe. Returns ------- pd.DataFrame() Dataframe starting from its first valid index. """ # Limit to one or some variables if var: start_ok_data = data[var].first_valid_index() # depends on [control=['if'], data=[]] else: start_ok_data = data.first_valid_index() data = data.loc[start_ok_data:, :] return data
def _readable_part_size(num_bytes): "Returns the file size in readable form." B = num_bytes KB = float(1024) MB = float(KB * 1024) GB = float(MB * 1024) TB = float(GB * 1024) if B < KB: return '{0} {1}'.format(B, 'bytes' if B != 1 else 'byte') elif KB <= B < MB: return '{0:.2f} KiB'.format(B/KB) elif MB <= B < GB: return '{0:.2f} MiB'.format(B/MB) elif GB <= B < TB: return '{0:.2f} GiB'.format(B/GB) elif TB <= B: return '{0:.2f} TiB'.format(B/TB)
def function[_readable_part_size, parameter[num_bytes]]: constant[Returns the file size in readable form.] variable[B] assign[=] name[num_bytes] variable[KB] assign[=] call[name[float], parameter[constant[1024]]] variable[MB] assign[=] call[name[float], parameter[binary_operation[name[KB] * constant[1024]]]] variable[GB] assign[=] call[name[float], parameter[binary_operation[name[MB] * constant[1024]]]] variable[TB] assign[=] call[name[float], parameter[binary_operation[name[GB] * constant[1024]]]] if compare[name[B] less[<] name[KB]] begin[:] return[call[constant[{0} {1}].format, parameter[name[B], <ast.IfExp object at 0x7da20c6c7f10>]]]
keyword[def] identifier[_readable_part_size] ( identifier[num_bytes] ): literal[string] identifier[B] = identifier[num_bytes] identifier[KB] = identifier[float] ( literal[int] ) identifier[MB] = identifier[float] ( identifier[KB] * literal[int] ) identifier[GB] = identifier[float] ( identifier[MB] * literal[int] ) identifier[TB] = identifier[float] ( identifier[GB] * literal[int] ) keyword[if] identifier[B] < identifier[KB] : keyword[return] literal[string] . identifier[format] ( identifier[B] , literal[string] keyword[if] identifier[B] != literal[int] keyword[else] literal[string] ) keyword[elif] identifier[KB] <= identifier[B] < identifier[MB] : keyword[return] literal[string] . identifier[format] ( identifier[B] / identifier[KB] ) keyword[elif] identifier[MB] <= identifier[B] < identifier[GB] : keyword[return] literal[string] . identifier[format] ( identifier[B] / identifier[MB] ) keyword[elif] identifier[GB] <= identifier[B] < identifier[TB] : keyword[return] literal[string] . identifier[format] ( identifier[B] / identifier[GB] ) keyword[elif] identifier[TB] <= identifier[B] : keyword[return] literal[string] . identifier[format] ( identifier[B] / identifier[TB] )
def _readable_part_size(num_bytes): """Returns the file size in readable form.""" B = num_bytes KB = float(1024) MB = float(KB * 1024) GB = float(MB * 1024) TB = float(GB * 1024) if B < KB: return '{0} {1}'.format(B, 'bytes' if B != 1 else 'byte') # depends on [control=['if'], data=['B']] elif KB <= B < MB: return '{0:.2f} KiB'.format(B / KB) # depends on [control=['if'], data=['KB', 'B']] elif MB <= B < GB: return '{0:.2f} MiB'.format(B / MB) # depends on [control=['if'], data=['MB', 'B']] elif GB <= B < TB: return '{0:.2f} GiB'.format(B / GB) # depends on [control=['if'], data=['GB', 'B']] elif TB <= B: return '{0:.2f} TiB'.format(B / TB) # depends on [control=['if'], data=['TB', 'B']]
def run_subprocess(command: str, verbose: bool = True, blocking: bool = True) \ -> Optional[subprocess.Popen]: """Execute the given command in a new process. Only when both `verbose` and `blocking` are |True|, |run_subprocess| prints all responses to the current value of |sys.stdout|: >>> from hydpy import run_subprocess >>> import platform >>> esc = '' if 'windows' in platform.platform().lower() else '\\\\' >>> run_subprocess(f'python -c print{esc}(1+1{esc})') 2 With verbose being |False|, |run_subprocess| does never print out anything: >>> run_subprocess(f'python -c print{esc}(1+1{esc})', verbose=False) >>> process = run_subprocess('python', blocking=False, verbose=False) >>> process.kill() >>> _ = process.communicate() When `verbose` is |True| and `blocking` is |False|, |run_subprocess| prints all responses to the console ("invisible" for doctests): >>> process = run_subprocess('python', blocking=False) >>> process.kill() >>> _ = process.communicate() """ if blocking: result1 = subprocess.run( command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8', shell=True) if verbose: # due to doctest replacing sys.stdout for output in (result1.stdout, result1.stderr): output = output.strip() if output: print(output) return None stdouterr = None if verbose else subprocess.DEVNULL result2 = subprocess.Popen( command, stdout=stdouterr, stderr=stdouterr, encoding='utf-8', shell=True) return result2
def function[run_subprocess, parameter[command, verbose, blocking]]: constant[Execute the given command in a new process. Only when both `verbose` and `blocking` are |True|, |run_subprocess| prints all responses to the current value of |sys.stdout|: >>> from hydpy import run_subprocess >>> import platform >>> esc = '' if 'windows' in platform.platform().lower() else '\\' >>> run_subprocess(f'python -c print{esc}(1+1{esc})') 2 With verbose being |False|, |run_subprocess| does never print out anything: >>> run_subprocess(f'python -c print{esc}(1+1{esc})', verbose=False) >>> process = run_subprocess('python', blocking=False, verbose=False) >>> process.kill() >>> _ = process.communicate() When `verbose` is |True| and `blocking` is |False|, |run_subprocess| prints all responses to the console ("invisible" for doctests): >>> process = run_subprocess('python', blocking=False) >>> process.kill() >>> _ = process.communicate() ] if name[blocking] begin[:] variable[result1] assign[=] call[name[subprocess].run, parameter[name[command]]] if name[verbose] begin[:] for taget[name[output]] in starred[tuple[[<ast.Attribute object at 0x7da2041dabc0>, <ast.Attribute object at 0x7da2041db040>]]] begin[:] variable[output] assign[=] call[name[output].strip, parameter[]] if name[output] begin[:] call[name[print], parameter[name[output]]] return[constant[None]] variable[stdouterr] assign[=] <ast.IfExp object at 0x7da2041dbd00> variable[result2] assign[=] call[name[subprocess].Popen, parameter[name[command]]] return[name[result2]]
keyword[def] identifier[run_subprocess] ( identifier[command] : identifier[str] , identifier[verbose] : identifier[bool] = keyword[True] , identifier[blocking] : identifier[bool] = keyword[True] )-> identifier[Optional] [ identifier[subprocess] . identifier[Popen] ]: literal[string] keyword[if] identifier[blocking] : identifier[result1] = identifier[subprocess] . identifier[run] ( identifier[command] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] , identifier[stderr] = identifier[subprocess] . identifier[PIPE] , identifier[encoding] = literal[string] , identifier[shell] = keyword[True] ) keyword[if] identifier[verbose] : keyword[for] identifier[output] keyword[in] ( identifier[result1] . identifier[stdout] , identifier[result1] . identifier[stderr] ): identifier[output] = identifier[output] . identifier[strip] () keyword[if] identifier[output] : identifier[print] ( identifier[output] ) keyword[return] keyword[None] identifier[stdouterr] = keyword[None] keyword[if] identifier[verbose] keyword[else] identifier[subprocess] . identifier[DEVNULL] identifier[result2] = identifier[subprocess] . identifier[Popen] ( identifier[command] , identifier[stdout] = identifier[stdouterr] , identifier[stderr] = identifier[stdouterr] , identifier[encoding] = literal[string] , identifier[shell] = keyword[True] ) keyword[return] identifier[result2]
def run_subprocess(command: str, verbose: bool=True, blocking: bool=True) -> Optional[subprocess.Popen]: """Execute the given command in a new process. Only when both `verbose` and `blocking` are |True|, |run_subprocess| prints all responses to the current value of |sys.stdout|: >>> from hydpy import run_subprocess >>> import platform >>> esc = '' if 'windows' in platform.platform().lower() else '\\\\' >>> run_subprocess(f'python -c print{esc}(1+1{esc})') 2 With verbose being |False|, |run_subprocess| does never print out anything: >>> run_subprocess(f'python -c print{esc}(1+1{esc})', verbose=False) >>> process = run_subprocess('python', blocking=False, verbose=False) >>> process.kill() >>> _ = process.communicate() When `verbose` is |True| and `blocking` is |False|, |run_subprocess| prints all responses to the console ("invisible" for doctests): >>> process = run_subprocess('python', blocking=False) >>> process.kill() >>> _ = process.communicate() """ if blocking: result1 = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8', shell=True) if verbose: # due to doctest replacing sys.stdout for output in (result1.stdout, result1.stderr): output = output.strip() if output: print(output) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['output']] # depends on [control=['if'], data=[]] return None # depends on [control=['if'], data=[]] stdouterr = None if verbose else subprocess.DEVNULL result2 = subprocess.Popen(command, stdout=stdouterr, stderr=stdouterr, encoding='utf-8', shell=True) return result2
def _load_items_from_file(keychain, path): """ Given a single file, loads all the trust objects from it into arrays and the keychain. Returns a tuple of lists: the first list is a list of identities, the second a list of certs. """ certificates = [] identities = [] result_array = None with open(path, 'rb') as f: raw_filedata = f.read() try: filedata = CoreFoundation.CFDataCreate( CoreFoundation.kCFAllocatorDefault, raw_filedata, len(raw_filedata) ) result_array = CoreFoundation.CFArrayRef() result = Security.SecItemImport( filedata, # cert data None, # Filename, leaving it out for now None, # What the type of the file is, we don't care None, # what's in the file, we don't care 0, # import flags None, # key params, can include passphrase in the future keychain, # The keychain to insert into ctypes.byref(result_array) # Results ) _assert_no_error(result) # A CFArray is not very useful to us as an intermediary # representation, so we are going to extract the objects we want # and then free the array. We don't need to keep hold of keys: the # keychain already has them! result_count = CoreFoundation.CFArrayGetCount(result_array) for index in range(result_count): item = CoreFoundation.CFArrayGetValueAtIndex( result_array, index ) item = ctypes.cast(item, CoreFoundation.CFTypeRef) if _is_cert(item): CoreFoundation.CFRetain(item) certificates.append(item) elif _is_identity(item): CoreFoundation.CFRetain(item) identities.append(item) finally: if result_array: CoreFoundation.CFRelease(result_array) CoreFoundation.CFRelease(filedata) return (identities, certificates)
def function[_load_items_from_file, parameter[keychain, path]]: constant[ Given a single file, loads all the trust objects from it into arrays and the keychain. Returns a tuple of lists: the first list is a list of identities, the second a list of certs. ] variable[certificates] assign[=] list[[]] variable[identities] assign[=] list[[]] variable[result_array] assign[=] constant[None] with call[name[open], parameter[name[path], constant[rb]]] begin[:] variable[raw_filedata] assign[=] call[name[f].read, parameter[]] <ast.Try object at 0x7da2041d9240> return[tuple[[<ast.Name object at 0x7da18eb57640>, <ast.Name object at 0x7da18eb556c0>]]]
keyword[def] identifier[_load_items_from_file] ( identifier[keychain] , identifier[path] ): literal[string] identifier[certificates] =[] identifier[identities] =[] identifier[result_array] = keyword[None] keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[f] : identifier[raw_filedata] = identifier[f] . identifier[read] () keyword[try] : identifier[filedata] = identifier[CoreFoundation] . identifier[CFDataCreate] ( identifier[CoreFoundation] . identifier[kCFAllocatorDefault] , identifier[raw_filedata] , identifier[len] ( identifier[raw_filedata] ) ) identifier[result_array] = identifier[CoreFoundation] . identifier[CFArrayRef] () identifier[result] = identifier[Security] . identifier[SecItemImport] ( identifier[filedata] , keyword[None] , keyword[None] , keyword[None] , literal[int] , keyword[None] , identifier[keychain] , identifier[ctypes] . identifier[byref] ( identifier[result_array] ) ) identifier[_assert_no_error] ( identifier[result] ) identifier[result_count] = identifier[CoreFoundation] . identifier[CFArrayGetCount] ( identifier[result_array] ) keyword[for] identifier[index] keyword[in] identifier[range] ( identifier[result_count] ): identifier[item] = identifier[CoreFoundation] . identifier[CFArrayGetValueAtIndex] ( identifier[result_array] , identifier[index] ) identifier[item] = identifier[ctypes] . identifier[cast] ( identifier[item] , identifier[CoreFoundation] . identifier[CFTypeRef] ) keyword[if] identifier[_is_cert] ( identifier[item] ): identifier[CoreFoundation] . identifier[CFRetain] ( identifier[item] ) identifier[certificates] . identifier[append] ( identifier[item] ) keyword[elif] identifier[_is_identity] ( identifier[item] ): identifier[CoreFoundation] . identifier[CFRetain] ( identifier[item] ) identifier[identities] . identifier[append] ( identifier[item] ) keyword[finally] : keyword[if] identifier[result_array] : identifier[CoreFoundation] . identifier[CFRelease] ( identifier[result_array] ) identifier[CoreFoundation] . identifier[CFRelease] ( identifier[filedata] ) keyword[return] ( identifier[identities] , identifier[certificates] )
def _load_items_from_file(keychain, path): """ Given a single file, loads all the trust objects from it into arrays and the keychain. Returns a tuple of lists: the first list is a list of identities, the second a list of certs. """ certificates = [] identities = [] result_array = None with open(path, 'rb') as f: raw_filedata = f.read() # depends on [control=['with'], data=['f']] try: filedata = CoreFoundation.CFDataCreate(CoreFoundation.kCFAllocatorDefault, raw_filedata, len(raw_filedata)) result_array = CoreFoundation.CFArrayRef() # cert data # Filename, leaving it out for now # What the type of the file is, we don't care # what's in the file, we don't care # import flags # key params, can include passphrase in the future # The keychain to insert into # Results result = Security.SecItemImport(filedata, None, None, None, 0, None, keychain, ctypes.byref(result_array)) _assert_no_error(result) # A CFArray is not very useful to us as an intermediary # representation, so we are going to extract the objects we want # and then free the array. We don't need to keep hold of keys: the # keychain already has them! result_count = CoreFoundation.CFArrayGetCount(result_array) for index in range(result_count): item = CoreFoundation.CFArrayGetValueAtIndex(result_array, index) item = ctypes.cast(item, CoreFoundation.CFTypeRef) if _is_cert(item): CoreFoundation.CFRetain(item) certificates.append(item) # depends on [control=['if'], data=[]] elif _is_identity(item): CoreFoundation.CFRetain(item) identities.append(item) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['index']] # depends on [control=['try'], data=[]] finally: if result_array: CoreFoundation.CFRelease(result_array) # depends on [control=['if'], data=[]] CoreFoundation.CFRelease(filedata) return (identities, certificates)
def run(locations, random, bikes, crime, nearby, json, update_bikes, api_server, cross_origin, host, port, db_path, verbose): """ Runs the program. Takes a list of postcodes or coordinates and returns various information about them. If using the cli, make sure to update the bikes database with the -u command. Locations can be either a specific postcode, or a pair of coordinates. Coordinates are passed in the form "55.948824,-3.196425". :param locations: The list of postcodes or coordinates to search. :param random: The number of random postcodes to include. :param bikes: Includes a list of stolen bikes in that area. :param crime: Includes a list of committed crimes in that area. :param nearby: Includes a list of wikipedia articles in that area. :param json: Returns the data in json format. :param update_bikes: Whether to force update bikes. :param api_server: If given, the program will instead run a rest api. :param cross_origin: :param host: :param port: Defines the port to run the rest api on. :param db_path: The path to the sqlite db to use. :param verbose: The verbosity. """ log_levels = [logging.WARNING, logging.INFO, logging.DEBUG] logging.basicConfig(level=log_levels[min(verbose, 2)]) initialize_database(db_path) loop = get_event_loop() if update_bikes: logger.info("Force updating bikes.") loop.run_until_complete(util.update_bikes()) if api_server: if cross_origin: enable_cross_origin(app) try: web.run_app(app, host=host, port=port) except CancelledError as e: if e.__context__ is not None: click.echo(Fore.RED + ( f"Could not bind to address {host}:{port}" if e.__context__.errno == 48 else e.__context__)) exit(1) else: click.echo("Goodbye!") elif len(locations) > 0 or random > 0: exit(loop.run_until_complete(cli(locations, random, bikes=bikes, crime=crime, nearby=nearby, as_json=json))) else: click.echo(Fore.RED + "Either include a post code, or the --api-server flag.")
def function[run, parameter[locations, random, bikes, crime, nearby, json, update_bikes, api_server, cross_origin, host, port, db_path, verbose]]: constant[ Runs the program. Takes a list of postcodes or coordinates and returns various information about them. If using the cli, make sure to update the bikes database with the -u command. Locations can be either a specific postcode, or a pair of coordinates. Coordinates are passed in the form "55.948824,-3.196425". :param locations: The list of postcodes or coordinates to search. :param random: The number of random postcodes to include. :param bikes: Includes a list of stolen bikes in that area. :param crime: Includes a list of committed crimes in that area. :param nearby: Includes a list of wikipedia articles in that area. :param json: Returns the data in json format. :param update_bikes: Whether to force update bikes. :param api_server: If given, the program will instead run a rest api. :param cross_origin: :param host: :param port: Defines the port to run the rest api on. :param db_path: The path to the sqlite db to use. :param verbose: The verbosity. ] variable[log_levels] assign[=] list[[<ast.Attribute object at 0x7da2054a7490>, <ast.Attribute object at 0x7da2054a6590>, <ast.Attribute object at 0x7da2054a7be0>]] call[name[logging].basicConfig, parameter[]] call[name[initialize_database], parameter[name[db_path]]] variable[loop] assign[=] call[name[get_event_loop], parameter[]] if name[update_bikes] begin[:] call[name[logger].info, parameter[constant[Force updating bikes.]]] call[name[loop].run_until_complete, parameter[call[name[util].update_bikes, parameter[]]]] if name[api_server] begin[:] if name[cross_origin] begin[:] call[name[enable_cross_origin], parameter[name[app]]] <ast.Try object at 0x7da2054a7bb0>
keyword[def] identifier[run] ( identifier[locations] , identifier[random] , identifier[bikes] , identifier[crime] , identifier[nearby] , identifier[json] , identifier[update_bikes] , identifier[api_server] , identifier[cross_origin] , identifier[host] , identifier[port] , identifier[db_path] , identifier[verbose] ): literal[string] identifier[log_levels] =[ identifier[logging] . identifier[WARNING] , identifier[logging] . identifier[INFO] , identifier[logging] . identifier[DEBUG] ] identifier[logging] . identifier[basicConfig] ( identifier[level] = identifier[log_levels] [ identifier[min] ( identifier[verbose] , literal[int] )]) identifier[initialize_database] ( identifier[db_path] ) identifier[loop] = identifier[get_event_loop] () keyword[if] identifier[update_bikes] : identifier[logger] . identifier[info] ( literal[string] ) identifier[loop] . identifier[run_until_complete] ( identifier[util] . identifier[update_bikes] ()) keyword[if] identifier[api_server] : keyword[if] identifier[cross_origin] : identifier[enable_cross_origin] ( identifier[app] ) keyword[try] : identifier[web] . identifier[run_app] ( identifier[app] , identifier[host] = identifier[host] , identifier[port] = identifier[port] ) keyword[except] identifier[CancelledError] keyword[as] identifier[e] : keyword[if] identifier[e] . identifier[__context__] keyword[is] keyword[not] keyword[None] : identifier[click] . identifier[echo] ( identifier[Fore] . identifier[RED] +( literal[string] keyword[if] identifier[e] . identifier[__context__] . identifier[errno] == literal[int] keyword[else] identifier[e] . identifier[__context__] )) identifier[exit] ( literal[int] ) keyword[else] : identifier[click] . identifier[echo] ( literal[string] ) keyword[elif] identifier[len] ( identifier[locations] )> literal[int] keyword[or] identifier[random] > literal[int] : identifier[exit] ( identifier[loop] . identifier[run_until_complete] ( identifier[cli] ( identifier[locations] , identifier[random] , identifier[bikes] = identifier[bikes] , identifier[crime] = identifier[crime] , identifier[nearby] = identifier[nearby] , identifier[as_json] = identifier[json] ))) keyword[else] : identifier[click] . identifier[echo] ( identifier[Fore] . identifier[RED] + literal[string] )
def run(locations, random, bikes, crime, nearby, json, update_bikes, api_server, cross_origin, host, port, db_path, verbose): """ Runs the program. Takes a list of postcodes or coordinates and returns various information about them. If using the cli, make sure to update the bikes database with the -u command. Locations can be either a specific postcode, or a pair of coordinates. Coordinates are passed in the form "55.948824,-3.196425". :param locations: The list of postcodes or coordinates to search. :param random: The number of random postcodes to include. :param bikes: Includes a list of stolen bikes in that area. :param crime: Includes a list of committed crimes in that area. :param nearby: Includes a list of wikipedia articles in that area. :param json: Returns the data in json format. :param update_bikes: Whether to force update bikes. :param api_server: If given, the program will instead run a rest api. :param cross_origin: :param host: :param port: Defines the port to run the rest api on. :param db_path: The path to the sqlite db to use. :param verbose: The verbosity. """ log_levels = [logging.WARNING, logging.INFO, logging.DEBUG] logging.basicConfig(level=log_levels[min(verbose, 2)]) initialize_database(db_path) loop = get_event_loop() if update_bikes: logger.info('Force updating bikes.') loop.run_until_complete(util.update_bikes()) # depends on [control=['if'], data=[]] if api_server: if cross_origin: enable_cross_origin(app) # depends on [control=['if'], data=[]] try: web.run_app(app, host=host, port=port) # depends on [control=['try'], data=[]] except CancelledError as e: if e.__context__ is not None: click.echo(Fore.RED + (f'Could not bind to address {host}:{port}' if e.__context__.errno == 48 else e.__context__)) exit(1) # depends on [control=['if'], data=[]] else: click.echo('Goodbye!') # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] elif len(locations) > 0 or random > 0: exit(loop.run_until_complete(cli(locations, random, bikes=bikes, crime=crime, nearby=nearby, as_json=json))) # depends on [control=['if'], data=[]] else: click.echo(Fore.RED + 'Either include a post code, or the --api-server flag.')
def parse_end_date(self, request, start_date): """ Return period in days after the start date to show event occurrences, which is one of the following in order of priority: - `end_date` GET parameter value, if given and valid. The filtering will be *inclusive* of the end date: until end-of-day of this date - `days_to_show` GET parameter value, if given and valid - page's `default_days_to_show` if set - the value of the app setting `DEFAULT_DAYS_TO_SHOW` """ if request.GET.get('end_date'): try: return djtz.parse('%s 00:00' % request.GET.get('end_date')) except ValueError: pass days_to_show = self.default_days_to_show or \ appsettings.DEFAULT_DAYS_TO_SHOW if 'days_to_show' in request.GET: try: days_to_show = int(request.GET.get('days_to_show')) except ValueError: pass return start_date + timedelta(days=days_to_show)
def function[parse_end_date, parameter[self, request, start_date]]: constant[ Return period in days after the start date to show event occurrences, which is one of the following in order of priority: - `end_date` GET parameter value, if given and valid. The filtering will be *inclusive* of the end date: until end-of-day of this date - `days_to_show` GET parameter value, if given and valid - page's `default_days_to_show` if set - the value of the app setting `DEFAULT_DAYS_TO_SHOW` ] if call[name[request].GET.get, parameter[constant[end_date]]] begin[:] <ast.Try object at 0x7da204347100> variable[days_to_show] assign[=] <ast.BoolOp object at 0x7da204344fa0> if compare[constant[days_to_show] in name[request].GET] begin[:] <ast.Try object at 0x7da204347a60> return[binary_operation[name[start_date] + call[name[timedelta], parameter[]]]]
keyword[def] identifier[parse_end_date] ( identifier[self] , identifier[request] , identifier[start_date] ): literal[string] keyword[if] identifier[request] . identifier[GET] . identifier[get] ( literal[string] ): keyword[try] : keyword[return] identifier[djtz] . identifier[parse] ( literal[string] % identifier[request] . identifier[GET] . identifier[get] ( literal[string] )) keyword[except] identifier[ValueError] : keyword[pass] identifier[days_to_show] = identifier[self] . identifier[default_days_to_show] keyword[or] identifier[appsettings] . identifier[DEFAULT_DAYS_TO_SHOW] keyword[if] literal[string] keyword[in] identifier[request] . identifier[GET] : keyword[try] : identifier[days_to_show] = identifier[int] ( identifier[request] . identifier[GET] . identifier[get] ( literal[string] )) keyword[except] identifier[ValueError] : keyword[pass] keyword[return] identifier[start_date] + identifier[timedelta] ( identifier[days] = identifier[days_to_show] )
def parse_end_date(self, request, start_date): """ Return period in days after the start date to show event occurrences, which is one of the following in order of priority: - `end_date` GET parameter value, if given and valid. The filtering will be *inclusive* of the end date: until end-of-day of this date - `days_to_show` GET parameter value, if given and valid - page's `default_days_to_show` if set - the value of the app setting `DEFAULT_DAYS_TO_SHOW` """ if request.GET.get('end_date'): try: return djtz.parse('%s 00:00' % request.GET.get('end_date')) # depends on [control=['try'], data=[]] except ValueError: pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] days_to_show = self.default_days_to_show or appsettings.DEFAULT_DAYS_TO_SHOW if 'days_to_show' in request.GET: try: days_to_show = int(request.GET.get('days_to_show')) # depends on [control=['try'], data=[]] except ValueError: pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] return start_date + timedelta(days=days_to_show)
def _get_current_minute(self): """ Internal utility method to get the current simulation time. Possible answers are: - whatever the algorithm's get_datetime() method returns (this is what `self.simulation_dt_func()` points to) - sometimes we're knowingly not in a market minute, like if we're in before_trading_start. In that case, `self._adjust_minutes` is True, and we get the previous market minute. - if we're in daily mode, get the session label for this minute. """ dt = self.datetime if self._adjust_minutes: dt = \ self.data_portal.trading_calendar.previous_minute(dt) if self._daily_mode: # if we're in daily mode, take the given dt (which is the last # minute of the session) and get the session label for it. dt = self.data_portal.trading_calendar.minute_to_session_label(dt) return dt
def function[_get_current_minute, parameter[self]]: constant[ Internal utility method to get the current simulation time. Possible answers are: - whatever the algorithm's get_datetime() method returns (this is what `self.simulation_dt_func()` points to) - sometimes we're knowingly not in a market minute, like if we're in before_trading_start. In that case, `self._adjust_minutes` is True, and we get the previous market minute. - if we're in daily mode, get the session label for this minute. ] variable[dt] assign[=] name[self].datetime if name[self]._adjust_minutes begin[:] variable[dt] assign[=] call[name[self].data_portal.trading_calendar.previous_minute, parameter[name[dt]]] if name[self]._daily_mode begin[:] variable[dt] assign[=] call[name[self].data_portal.trading_calendar.minute_to_session_label, parameter[name[dt]]] return[name[dt]]
keyword[def] identifier[_get_current_minute] ( identifier[self] ): literal[string] identifier[dt] = identifier[self] . identifier[datetime] keyword[if] identifier[self] . identifier[_adjust_minutes] : identifier[dt] = identifier[self] . identifier[data_portal] . identifier[trading_calendar] . identifier[previous_minute] ( identifier[dt] ) keyword[if] identifier[self] . identifier[_daily_mode] : identifier[dt] = identifier[self] . identifier[data_portal] . identifier[trading_calendar] . identifier[minute_to_session_label] ( identifier[dt] ) keyword[return] identifier[dt]
def _get_current_minute(self): """ Internal utility method to get the current simulation time. Possible answers are: - whatever the algorithm's get_datetime() method returns (this is what `self.simulation_dt_func()` points to) - sometimes we're knowingly not in a market minute, like if we're in before_trading_start. In that case, `self._adjust_minutes` is True, and we get the previous market minute. - if we're in daily mode, get the session label for this minute. """ dt = self.datetime if self._adjust_minutes: dt = self.data_portal.trading_calendar.previous_minute(dt) # depends on [control=['if'], data=[]] if self._daily_mode: # if we're in daily mode, take the given dt (which is the last # minute of the session) and get the session label for it. dt = self.data_portal.trading_calendar.minute_to_session_label(dt) # depends on [control=['if'], data=[]] return dt
def doc_metadata(doc): """Create a metadata dict from a MetatabDoc, for Document conversion""" r = doc['Root'].as_dict() r.update(doc['Contacts'].as_dict()) r['author'] = r.get('author', r.get('creator', r.get('wrangler'))) return r
def function[doc_metadata, parameter[doc]]: constant[Create a metadata dict from a MetatabDoc, for Document conversion] variable[r] assign[=] call[call[name[doc]][constant[Root]].as_dict, parameter[]] call[name[r].update, parameter[call[call[name[doc]][constant[Contacts]].as_dict, parameter[]]]] call[name[r]][constant[author]] assign[=] call[name[r].get, parameter[constant[author], call[name[r].get, parameter[constant[creator], call[name[r].get, parameter[constant[wrangler]]]]]]] return[name[r]]
keyword[def] identifier[doc_metadata] ( identifier[doc] ): literal[string] identifier[r] = identifier[doc] [ literal[string] ]. identifier[as_dict] () identifier[r] . identifier[update] ( identifier[doc] [ literal[string] ]. identifier[as_dict] ()) identifier[r] [ literal[string] ]= identifier[r] . identifier[get] ( literal[string] , identifier[r] . identifier[get] ( literal[string] , identifier[r] . identifier[get] ( literal[string] ))) keyword[return] identifier[r]
def doc_metadata(doc): """Create a metadata dict from a MetatabDoc, for Document conversion""" r = doc['Root'].as_dict() r.update(doc['Contacts'].as_dict()) r['author'] = r.get('author', r.get('creator', r.get('wrangler'))) return r
def get_input_grads(self, merge_multi_context=True): """Get the gradients with respect to the inputs of the module. Parameters ---------- merge_multi_context : bool Defaults to ``True``. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A `True` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- If `merge_multi_context` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output elements are `NDArray`. """ assert self.inputs_need_grad if merge_multi_context: return _merge_multi_context(self.input_grad_arrays, self.data_layouts) return self.input_grad_arrays
def function[get_input_grads, parameter[self, merge_multi_context]]: constant[Get the gradients with respect to the inputs of the module. Parameters ---------- merge_multi_context : bool Defaults to ``True``. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A `True` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- If `merge_multi_context` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output elements are `NDArray`. ] assert[name[self].inputs_need_grad] if name[merge_multi_context] begin[:] return[call[name[_merge_multi_context], parameter[name[self].input_grad_arrays, name[self].data_layouts]]] return[name[self].input_grad_arrays]
keyword[def] identifier[get_input_grads] ( identifier[self] , identifier[merge_multi_context] = keyword[True] ): literal[string] keyword[assert] identifier[self] . identifier[inputs_need_grad] keyword[if] identifier[merge_multi_context] : keyword[return] identifier[_merge_multi_context] ( identifier[self] . identifier[input_grad_arrays] , identifier[self] . identifier[data_layouts] ) keyword[return] identifier[self] . identifier[input_grad_arrays]
def get_input_grads(self, merge_multi_context=True): """Get the gradients with respect to the inputs of the module. Parameters ---------- merge_multi_context : bool Defaults to ``True``. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A `True` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- If `merge_multi_context` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output elements are `NDArray`. """ assert self.inputs_need_grad if merge_multi_context: return _merge_multi_context(self.input_grad_arrays, self.data_layouts) # depends on [control=['if'], data=[]] return self.input_grad_arrays
def fit_predict(training_data, fitting_data, tau=1, samples_per_job=0, save_results=True, show=False): from disco.worker.pipeline.worker import Worker, Stage from disco.core import Job, result_iterator from disco.core import Disco """ training_data - training samples fitting_data - dataset to be fitted to training data. tau - controls how quickly the weight of a training sample falls off with distance of its x(i) from the query point x. samples_per_job - define a number of samples that will be processed in single mapreduce job. If 0, algorithm will calculate number of samples per job. """ try: tau = float(tau) if tau <= 0: raise Exception("Parameter tau should be >= 0.") except ValueError: raise Exception("Parameter tau should be numerical.") if fitting_data.params["id_index"] == -1: raise Exception("Predict data should have id_index set.") job = Job(worker=Worker(save_results=save_results)) job.pipeline = [ ("split", Stage("map", input_chain=fitting_data.params["input_chain"], init=simple_init, process=map_predict))] job.params = fitting_data.params job.run(name="lwlr_read_data", input=fitting_data.params["data_tag"]) samples = {} results = [] tau = float(2 * tau ** 2) # calculate tau once counter = 0 for test_id, x in result_iterator(job.wait(show=show)): if samples_per_job == 0: # calculate number of samples per job if len(x) <= 100: # if there is less than 100 attributes samples_per_job = 100 # 100 samples is max per on job else: # there is more than 100 attributes samples_per_job = len(x) * -25 / 900. + 53 # linear function samples[test_id] = x if counter == samples_per_job: results.append(_fit_predict(training_data, samples, tau, save_results, show)) counter = 0 samples = {} counter += 1 if len(samples) > 0: # if there is some samples left in the the dictionary results.append(_fit_predict(training_data, samples, tau, save_results, show)) # merge results of every iteration into a single tag ddfs = Disco().ddfs ddfs.tag(job.name, [[list(ddfs.blobs(tag))[0][0]] for tag in results]) return ["tag://" + job.name]
def function[fit_predict, parameter[training_data, fitting_data, tau, samples_per_job, save_results, show]]: from relative_module[disco.worker.pipeline.worker] import module[Worker], module[Stage] from relative_module[disco.core] import module[Job], module[result_iterator] from relative_module[disco.core] import module[Disco] constant[ training_data - training samples fitting_data - dataset to be fitted to training data. tau - controls how quickly the weight of a training sample falls off with distance of its x(i) from the query point x. samples_per_job - define a number of samples that will be processed in single mapreduce job. If 0, algorithm will calculate number of samples per job. ] <ast.Try object at 0x7da2054a5c90> if compare[call[name[fitting_data].params][constant[id_index]] equal[==] <ast.UnaryOp object at 0x7da2054a45e0>] begin[:] <ast.Raise object at 0x7da2054a5a20> variable[job] assign[=] call[name[Job], parameter[]] name[job].pipeline assign[=] list[[<ast.Tuple object at 0x7da2054a61a0>]] name[job].params assign[=] name[fitting_data].params call[name[job].run, parameter[]] variable[samples] assign[=] dictionary[[], []] variable[results] assign[=] list[[]] variable[tau] assign[=] call[name[float], parameter[binary_operation[constant[2] * binary_operation[name[tau] ** constant[2]]]]] variable[counter] assign[=] constant[0] for taget[tuple[[<ast.Name object at 0x7da2054a5450>, <ast.Name object at 0x7da2054a4e50>]]] in starred[call[name[result_iterator], parameter[call[name[job].wait, parameter[]]]]] begin[:] if compare[name[samples_per_job] equal[==] constant[0]] begin[:] if compare[call[name[len], parameter[name[x]]] less_or_equal[<=] constant[100]] begin[:] variable[samples_per_job] assign[=] constant[100] call[name[samples]][name[test_id]] assign[=] name[x] if compare[name[counter] equal[==] name[samples_per_job]] begin[:] call[name[results].append, parameter[call[name[_fit_predict], parameter[name[training_data], name[samples], name[tau], name[save_results], name[show]]]]] variable[counter] assign[=] constant[0] variable[samples] assign[=] dictionary[[], []] <ast.AugAssign object at 0x7da18fe92bf0> if compare[call[name[len], parameter[name[samples]]] greater[>] constant[0]] begin[:] call[name[results].append, parameter[call[name[_fit_predict], parameter[name[training_data], name[samples], name[tau], name[save_results], name[show]]]]] variable[ddfs] assign[=] call[name[Disco], parameter[]].ddfs call[name[ddfs].tag, parameter[name[job].name, <ast.ListComp object at 0x7da18fe93a60>]] return[list[[<ast.BinOp object at 0x7da18fe932b0>]]]
keyword[def] identifier[fit_predict] ( identifier[training_data] , identifier[fitting_data] , identifier[tau] = literal[int] , identifier[samples_per_job] = literal[int] , identifier[save_results] = keyword[True] , identifier[show] = keyword[False] ): keyword[from] identifier[disco] . identifier[worker] . identifier[pipeline] . identifier[worker] keyword[import] identifier[Worker] , identifier[Stage] keyword[from] identifier[disco] . identifier[core] keyword[import] identifier[Job] , identifier[result_iterator] keyword[from] identifier[disco] . identifier[core] keyword[import] identifier[Disco] literal[string] keyword[try] : identifier[tau] = identifier[float] ( identifier[tau] ) keyword[if] identifier[tau] <= literal[int] : keyword[raise] identifier[Exception] ( literal[string] ) keyword[except] identifier[ValueError] : keyword[raise] identifier[Exception] ( literal[string] ) keyword[if] identifier[fitting_data] . identifier[params] [ literal[string] ]==- literal[int] : keyword[raise] identifier[Exception] ( literal[string] ) identifier[job] = identifier[Job] ( identifier[worker] = identifier[Worker] ( identifier[save_results] = identifier[save_results] )) identifier[job] . identifier[pipeline] =[ ( literal[string] , identifier[Stage] ( literal[string] , identifier[input_chain] = identifier[fitting_data] . identifier[params] [ literal[string] ], identifier[init] = identifier[simple_init] , identifier[process] = identifier[map_predict] ))] identifier[job] . identifier[params] = identifier[fitting_data] . identifier[params] identifier[job] . identifier[run] ( identifier[name] = literal[string] , identifier[input] = identifier[fitting_data] . identifier[params] [ literal[string] ]) identifier[samples] ={} identifier[results] =[] identifier[tau] = identifier[float] ( literal[int] * identifier[tau] ** literal[int] ) identifier[counter] = literal[int] keyword[for] identifier[test_id] , identifier[x] keyword[in] identifier[result_iterator] ( identifier[job] . identifier[wait] ( identifier[show] = identifier[show] )): keyword[if] identifier[samples_per_job] == literal[int] : keyword[if] identifier[len] ( identifier[x] )<= literal[int] : identifier[samples_per_job] = literal[int] keyword[else] : identifier[samples_per_job] = identifier[len] ( identifier[x] )*- literal[int] / literal[int] + literal[int] identifier[samples] [ identifier[test_id] ]= identifier[x] keyword[if] identifier[counter] == identifier[samples_per_job] : identifier[results] . identifier[append] ( identifier[_fit_predict] ( identifier[training_data] , identifier[samples] , identifier[tau] , identifier[save_results] , identifier[show] )) identifier[counter] = literal[int] identifier[samples] ={} identifier[counter] += literal[int] keyword[if] identifier[len] ( identifier[samples] )> literal[int] : identifier[results] . identifier[append] ( identifier[_fit_predict] ( identifier[training_data] , identifier[samples] , identifier[tau] , identifier[save_results] , identifier[show] )) identifier[ddfs] = identifier[Disco] (). identifier[ddfs] identifier[ddfs] . identifier[tag] ( identifier[job] . identifier[name] ,[[ identifier[list] ( identifier[ddfs] . identifier[blobs] ( identifier[tag] ))[ literal[int] ][ literal[int] ]] keyword[for] identifier[tag] keyword[in] identifier[results] ]) keyword[return] [ literal[string] + identifier[job] . identifier[name] ]
def fit_predict(training_data, fitting_data, tau=1, samples_per_job=0, save_results=True, show=False): from disco.worker.pipeline.worker import Worker, Stage from disco.core import Job, result_iterator from disco.core import Disco '\n training_data - training samples\n fitting_data - dataset to be fitted to training data.\n tau - controls how quickly the weight of a training sample falls off with distance of its x(i) from the query point x.\n samples_per_job - define a number of samples that will be processed in single mapreduce job. If 0, algorithm will calculate number of samples per job.\n ' try: tau = float(tau) if tau <= 0: raise Exception('Parameter tau should be >= 0.') # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except ValueError: raise Exception('Parameter tau should be numerical.') # depends on [control=['except'], data=[]] if fitting_data.params['id_index'] == -1: raise Exception('Predict data should have id_index set.') # depends on [control=['if'], data=[]] job = Job(worker=Worker(save_results=save_results)) job.pipeline = [('split', Stage('map', input_chain=fitting_data.params['input_chain'], init=simple_init, process=map_predict))] job.params = fitting_data.params job.run(name='lwlr_read_data', input=fitting_data.params['data_tag']) samples = {} results = [] tau = float(2 * tau ** 2) # calculate tau once counter = 0 for (test_id, x) in result_iterator(job.wait(show=show)): if samples_per_job == 0: # calculate number of samples per job if len(x) <= 100: # if there is less than 100 attributes samples_per_job = 100 # 100 samples is max per on job # depends on [control=['if'], data=[]] else: # there is more than 100 attributes samples_per_job = len(x) * -25 / 900.0 + 53 # linear function # depends on [control=['if'], data=['samples_per_job']] samples[test_id] = x if counter == samples_per_job: results.append(_fit_predict(training_data, samples, tau, save_results, show)) counter = 0 samples = {} # depends on [control=['if'], data=['counter']] counter += 1 # depends on [control=['for'], data=[]] if len(samples) > 0: # if there is some samples left in the the dictionary results.append(_fit_predict(training_data, samples, tau, save_results, show)) # depends on [control=['if'], data=[]] # merge results of every iteration into a single tag ddfs = Disco().ddfs ddfs.tag(job.name, [[list(ddfs.blobs(tag))[0][0]] for tag in results]) return ['tag://' + job.name]
def unpack_rows(self, parameters_metadata, connection): """Unpack output or input/output parameters from the stored procedure call result :parameters_metadata: a stored procedure parameters metadata :returns: parameter values """ values = [] for param in parameters_metadata: # Unpack OUT or INOUT parameters' values if param.iotype != parameter_direction.IN: values.append( by_type_code[param.datatype].from_resultset(self.payload) ) yield tuple(values)
def function[unpack_rows, parameter[self, parameters_metadata, connection]]: constant[Unpack output or input/output parameters from the stored procedure call result :parameters_metadata: a stored procedure parameters metadata :returns: parameter values ] variable[values] assign[=] list[[]] for taget[name[param]] in starred[name[parameters_metadata]] begin[:] if compare[name[param].iotype not_equal[!=] name[parameter_direction].IN] begin[:] call[name[values].append, parameter[call[call[name[by_type_code]][name[param].datatype].from_resultset, parameter[name[self].payload]]]] <ast.Yield object at 0x7da204344910>
keyword[def] identifier[unpack_rows] ( identifier[self] , identifier[parameters_metadata] , identifier[connection] ): literal[string] identifier[values] =[] keyword[for] identifier[param] keyword[in] identifier[parameters_metadata] : keyword[if] identifier[param] . identifier[iotype] != identifier[parameter_direction] . identifier[IN] : identifier[values] . identifier[append] ( identifier[by_type_code] [ identifier[param] . identifier[datatype] ]. identifier[from_resultset] ( identifier[self] . identifier[payload] )) keyword[yield] identifier[tuple] ( identifier[values] )
def unpack_rows(self, parameters_metadata, connection): """Unpack output or input/output parameters from the stored procedure call result :parameters_metadata: a stored procedure parameters metadata :returns: parameter values """ values = [] for param in parameters_metadata: # Unpack OUT or INOUT parameters' values if param.iotype != parameter_direction.IN: values.append(by_type_code[param.datatype].from_resultset(self.payload)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['param']] yield tuple(values)
def create(self, product_type, attribute_set_id, sku, data): """ Create Product and return ID :param product_type: String type of product :param attribute_set_id: ID of attribute set :param sku: SKU of the product :param data: Dictionary of data :return: INT id of product created """ return int(self.call( 'catalog_product.create', [product_type, attribute_set_id, sku, data] ) )
def function[create, parameter[self, product_type, attribute_set_id, sku, data]]: constant[ Create Product and return ID :param product_type: String type of product :param attribute_set_id: ID of attribute set :param sku: SKU of the product :param data: Dictionary of data :return: INT id of product created ] return[call[name[int], parameter[call[name[self].call, parameter[constant[catalog_product.create], list[[<ast.Name object at 0x7da1b04f57b0>, <ast.Name object at 0x7da1b04f46a0>, <ast.Name object at 0x7da1b04f5210>, <ast.Name object at 0x7da1b04f5060>]]]]]]]
keyword[def] identifier[create] ( identifier[self] , identifier[product_type] , identifier[attribute_set_id] , identifier[sku] , identifier[data] ): literal[string] keyword[return] identifier[int] ( identifier[self] . identifier[call] ( literal[string] , [ identifier[product_type] , identifier[attribute_set_id] , identifier[sku] , identifier[data] ] ) )
def create(self, product_type, attribute_set_id, sku, data): """ Create Product and return ID :param product_type: String type of product :param attribute_set_id: ID of attribute set :param sku: SKU of the product :param data: Dictionary of data :return: INT id of product created """ return int(self.call('catalog_product.create', [product_type, attribute_set_id, sku, data]))
def clear_queue(self, trans_id=None): ''' Clear the queue of database operations without executing any of the pending operations''' if not self.queue: return if trans_id is None: self.queue = [] return for index, op in enumerate(self.queue): if op.trans_id == trans_id: break self.queue = self.queue[:index]
def function[clear_queue, parameter[self, trans_id]]: constant[ Clear the queue of database operations without executing any of the pending operations] if <ast.UnaryOp object at 0x7da207f9ac80> begin[:] return[None] if compare[name[trans_id] is constant[None]] begin[:] name[self].queue assign[=] list[[]] return[None] for taget[tuple[[<ast.Name object at 0x7da207f98fa0>, <ast.Name object at 0x7da207f9a1a0>]]] in starred[call[name[enumerate], parameter[name[self].queue]]] begin[:] if compare[name[op].trans_id equal[==] name[trans_id]] begin[:] break name[self].queue assign[=] call[name[self].queue][<ast.Slice object at 0x7da207f989d0>]
keyword[def] identifier[clear_queue] ( identifier[self] , identifier[trans_id] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[queue] : keyword[return] keyword[if] identifier[trans_id] keyword[is] keyword[None] : identifier[self] . identifier[queue] =[] keyword[return] keyword[for] identifier[index] , identifier[op] keyword[in] identifier[enumerate] ( identifier[self] . identifier[queue] ): keyword[if] identifier[op] . identifier[trans_id] == identifier[trans_id] : keyword[break] identifier[self] . identifier[queue] = identifier[self] . identifier[queue] [: identifier[index] ]
def clear_queue(self, trans_id=None): """ Clear the queue of database operations without executing any of the pending operations""" if not self.queue: return # depends on [control=['if'], data=[]] if trans_id is None: self.queue = [] return # depends on [control=['if'], data=[]] for (index, op) in enumerate(self.queue): if op.trans_id == trans_id: break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] self.queue = self.queue[:index]
def cdx_limit(cdx_iter, limit): """ limit cdx to at most `limit`. """ # for cdx, _ in itertools.izip(cdx_iter, xrange(limit)): # yield cdx return (cdx for cdx, _ in zip(cdx_iter, range(limit)))
def function[cdx_limit, parameter[cdx_iter, limit]]: constant[ limit cdx to at most `limit`. ] return[<ast.GeneratorExp object at 0x7da20c9931f0>]
keyword[def] identifier[cdx_limit] ( identifier[cdx_iter] , identifier[limit] ): literal[string] keyword[return] ( identifier[cdx] keyword[for] identifier[cdx] , identifier[_] keyword[in] identifier[zip] ( identifier[cdx_iter] , identifier[range] ( identifier[limit] )))
def cdx_limit(cdx_iter, limit): """ limit cdx to at most `limit`. """ # for cdx, _ in itertools.izip(cdx_iter, xrange(limit)): # yield cdx return (cdx for (cdx, _) in zip(cdx_iter, range(limit)))
def get_user_config_dir(app_name, app_author, roaming=True, force_xdg=True): """Returns the config folder for the application. The default behavior is to return whatever is most appropriate for the operating system. For an example application called ``"My App"`` by ``"Acme"``, something like the following folders could be returned: macOS (non-XDG): ``~/Library/Application Support/My App`` Mac OS X (XDG): ``~/.config/my-app`` Unix: ``~/.config/my-app`` Windows 7 (roaming): ``C:\\Users\<user>\AppData\Roaming\Acme\My App`` Windows 7 (not roaming): ``C:\\Users\<user>\AppData\Local\Acme\My App`` :param app_name: the application name. This should be properly capitalized and can contain whitespace. :param app_author: The app author's name (or company). This should be properly capitalized and can contain whitespace. :param roaming: controls if the folder should be roaming or not on Windows. Has no effect on non-Windows systems. :param force_xdg: if this is set to `True`, then on macOS the XDG Base Directory Specification will be followed. Has no effect on non-macOS systems. """ if WIN: key = 'APPDATA' if roaming else 'LOCALAPPDATA' folder = os.path.expanduser(os.environ.get(key, '~')) return os.path.join(folder, app_author, app_name) if MAC and not force_xdg: return os.path.join(os.path.expanduser( '~/Library/Application Support'), app_name) return os.path.join( os.path.expanduser(os.environ.get('XDG_CONFIG_HOME', '~/.config')), _pathify(app_name))
def function[get_user_config_dir, parameter[app_name, app_author, roaming, force_xdg]]: constant[Returns the config folder for the application. The default behavior is to return whatever is most appropriate for the operating system. For an example application called ``"My App"`` by ``"Acme"``, something like the following folders could be returned: macOS (non-XDG): ``~/Library/Application Support/My App`` Mac OS X (XDG): ``~/.config/my-app`` Unix: ``~/.config/my-app`` Windows 7 (roaming): ``C:\Users\<user>\AppData\Roaming\Acme\My App`` Windows 7 (not roaming): ``C:\Users\<user>\AppData\Local\Acme\My App`` :param app_name: the application name. This should be properly capitalized and can contain whitespace. :param app_author: The app author's name (or company). This should be properly capitalized and can contain whitespace. :param roaming: controls if the folder should be roaming or not on Windows. Has no effect on non-Windows systems. :param force_xdg: if this is set to `True`, then on macOS the XDG Base Directory Specification will be followed. Has no effect on non-macOS systems. ] if name[WIN] begin[:] variable[key] assign[=] <ast.IfExp object at 0x7da1b2344730> variable[folder] assign[=] call[name[os].path.expanduser, parameter[call[name[os].environ.get, parameter[name[key], constant[~]]]]] return[call[name[os].path.join, parameter[name[folder], name[app_author], name[app_name]]]] if <ast.BoolOp object at 0x7da1b2344d60> begin[:] return[call[name[os].path.join, parameter[call[name[os].path.expanduser, parameter[constant[~/Library/Application Support]]], name[app_name]]]] return[call[name[os].path.join, parameter[call[name[os].path.expanduser, parameter[call[name[os].environ.get, parameter[constant[XDG_CONFIG_HOME], constant[~/.config]]]]], call[name[_pathify], parameter[name[app_name]]]]]]
keyword[def] identifier[get_user_config_dir] ( identifier[app_name] , identifier[app_author] , identifier[roaming] = keyword[True] , identifier[force_xdg] = keyword[True] ): literal[string] keyword[if] identifier[WIN] : identifier[key] = literal[string] keyword[if] identifier[roaming] keyword[else] literal[string] identifier[folder] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[os] . identifier[environ] . identifier[get] ( identifier[key] , literal[string] )) keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[folder] , identifier[app_author] , identifier[app_name] ) keyword[if] identifier[MAC] keyword[and] keyword[not] identifier[force_xdg] : keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] ), identifier[app_name] ) keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[expanduser] ( identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[string] )), identifier[_pathify] ( identifier[app_name] ))
def get_user_config_dir(app_name, app_author, roaming=True, force_xdg=True): """Returns the config folder for the application. The default behavior is to return whatever is most appropriate for the operating system. For an example application called ``"My App"`` by ``"Acme"``, something like the following folders could be returned: macOS (non-XDG): ``~/Library/Application Support/My App`` Mac OS X (XDG): ``~/.config/my-app`` Unix: ``~/.config/my-app`` Windows 7 (roaming): ``C:\\Users\\<user>\\AppData\\Roaming\\Acme\\My App`` Windows 7 (not roaming): ``C:\\Users\\<user>\\AppData\\Local\\Acme\\My App`` :param app_name: the application name. This should be properly capitalized and can contain whitespace. :param app_author: The app author's name (or company). This should be properly capitalized and can contain whitespace. :param roaming: controls if the folder should be roaming or not on Windows. Has no effect on non-Windows systems. :param force_xdg: if this is set to `True`, then on macOS the XDG Base Directory Specification will be followed. Has no effect on non-macOS systems. """ if WIN: key = 'APPDATA' if roaming else 'LOCALAPPDATA' folder = os.path.expanduser(os.environ.get(key, '~')) return os.path.join(folder, app_author, app_name) # depends on [control=['if'], data=[]] if MAC and (not force_xdg): return os.path.join(os.path.expanduser('~/Library/Application Support'), app_name) # depends on [control=['if'], data=[]] return os.path.join(os.path.expanduser(os.environ.get('XDG_CONFIG_HOME', '~/.config')), _pathify(app_name))
def __create_profile_from_identities(self, identities, uuid, verbose): """Create a profile using the data from the identities""" import re EMAIL_ADDRESS_REGEX = r"^(?P<email>[^\s@]+@[^\s@.]+\.[^\s@]+)$" NAME_REGEX = r"^\w+\s\w+" name = None email = None username = None for identity in identities: if not name and identity.name: m = re.match(NAME_REGEX, identity.name) if m: name = identity.name if not email and identity.email: m = re.match(EMAIL_ADDRESS_REGEX, identity.email) if m: email = identity.email if not username: if identity.username and identity.username != 'None': username = identity.username # We need a name for each profile, so if no one was defined, # use email or username to complete it. if not name: if email: name = email.split('@')[0] elif username: # filter email addresses on username fields name = username.split('@')[0] else: name = None kw = {'name': name, 'email': email} api.edit_profile(self.db, uuid, **kw) self.log("-- profile %s updated" % uuid, verbose)
def function[__create_profile_from_identities, parameter[self, identities, uuid, verbose]]: constant[Create a profile using the data from the identities] import module[re] variable[EMAIL_ADDRESS_REGEX] assign[=] constant[^(?P<email>[^\s@]+@[^\s@.]+\.[^\s@]+)$] variable[NAME_REGEX] assign[=] constant[^\w+\s\w+] variable[name] assign[=] constant[None] variable[email] assign[=] constant[None] variable[username] assign[=] constant[None] for taget[name[identity]] in starred[name[identities]] begin[:] if <ast.BoolOp object at 0x7da1b0d0f490> begin[:] variable[m] assign[=] call[name[re].match, parameter[name[NAME_REGEX], name[identity].name]] if name[m] begin[:] variable[name] assign[=] name[identity].name if <ast.BoolOp object at 0x7da1b0d0ee60> begin[:] variable[m] assign[=] call[name[re].match, parameter[name[EMAIL_ADDRESS_REGEX], name[identity].email]] if name[m] begin[:] variable[email] assign[=] name[identity].email if <ast.UnaryOp object at 0x7da1b0dbcc10> begin[:] if <ast.BoolOp object at 0x7da1b0dbea10> begin[:] variable[username] assign[=] name[identity].username if <ast.UnaryOp object at 0x7da1b0dbe200> begin[:] if name[email] begin[:] variable[name] assign[=] call[call[name[email].split, parameter[constant[@]]]][constant[0]] variable[kw] assign[=] dictionary[[<ast.Constant object at 0x7da1b0dbde10>, <ast.Constant object at 0x7da1b0dbc850>], [<ast.Name object at 0x7da1b0dbc5b0>, <ast.Name object at 0x7da1b0dbc880>]] call[name[api].edit_profile, parameter[name[self].db, name[uuid]]] call[name[self].log, parameter[binary_operation[constant[-- profile %s updated] <ast.Mod object at 0x7da2590d6920> name[uuid]], name[verbose]]]
keyword[def] identifier[__create_profile_from_identities] ( identifier[self] , identifier[identities] , identifier[uuid] , identifier[verbose] ): literal[string] keyword[import] identifier[re] identifier[EMAIL_ADDRESS_REGEX] = literal[string] identifier[NAME_REGEX] = literal[string] identifier[name] = keyword[None] identifier[email] = keyword[None] identifier[username] = keyword[None] keyword[for] identifier[identity] keyword[in] identifier[identities] : keyword[if] keyword[not] identifier[name] keyword[and] identifier[identity] . identifier[name] : identifier[m] = identifier[re] . identifier[match] ( identifier[NAME_REGEX] , identifier[identity] . identifier[name] ) keyword[if] identifier[m] : identifier[name] = identifier[identity] . identifier[name] keyword[if] keyword[not] identifier[email] keyword[and] identifier[identity] . identifier[email] : identifier[m] = identifier[re] . identifier[match] ( identifier[EMAIL_ADDRESS_REGEX] , identifier[identity] . identifier[email] ) keyword[if] identifier[m] : identifier[email] = identifier[identity] . identifier[email] keyword[if] keyword[not] identifier[username] : keyword[if] identifier[identity] . identifier[username] keyword[and] identifier[identity] . identifier[username] != literal[string] : identifier[username] = identifier[identity] . identifier[username] keyword[if] keyword[not] identifier[name] : keyword[if] identifier[email] : identifier[name] = identifier[email] . identifier[split] ( literal[string] )[ literal[int] ] keyword[elif] identifier[username] : identifier[name] = identifier[username] . identifier[split] ( literal[string] )[ literal[int] ] keyword[else] : identifier[name] = keyword[None] identifier[kw] ={ literal[string] : identifier[name] , literal[string] : identifier[email] } identifier[api] . identifier[edit_profile] ( identifier[self] . identifier[db] , identifier[uuid] ,** identifier[kw] ) identifier[self] . identifier[log] ( literal[string] % identifier[uuid] , identifier[verbose] )
def __create_profile_from_identities(self, identities, uuid, verbose): """Create a profile using the data from the identities""" import re EMAIL_ADDRESS_REGEX = '^(?P<email>[^\\s@]+@[^\\s@.]+\\.[^\\s@]+)$' NAME_REGEX = '^\\w+\\s\\w+' name = None email = None username = None for identity in identities: if not name and identity.name: m = re.match(NAME_REGEX, identity.name) if m: name = identity.name # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if not email and identity.email: m = re.match(EMAIL_ADDRESS_REGEX, identity.email) if m: email = identity.email # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if not username: if identity.username and identity.username != 'None': username = identity.username # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['identity']] # We need a name for each profile, so if no one was defined, # use email or username to complete it. if not name: if email: name = email.split('@')[0] # depends on [control=['if'], data=[]] elif username: # filter email addresses on username fields name = username.split('@')[0] # depends on [control=['if'], data=[]] else: name = None # depends on [control=['if'], data=[]] kw = {'name': name, 'email': email} api.edit_profile(self.db, uuid, **kw) self.log('-- profile %s updated' % uuid, verbose)
def is_clockwise(vertices): """ Evaluate whether vertices are in clockwise order. Args: vertices: list of vertices (x, y) in polygon. Returns: True: clockwise, False: counter-clockwise Raises: ValueError: the polygon is complex or overlapped. """ it = iterator.consecutive(cycle(vertices), 3) clockwise = 0 counter = 0 for _ in range(len(vertices)): p0, p1, p2 = next(it) cross = cross_product(p1, p2, p0) int_angle = interior_angle(p0, p2, p1) # raises ValueError if cross < 0: clockwise += int_angle counter += 2 * pi - int_angle else: clockwise += 2 * pi - int_angle counter += int_angle if round(clockwise / pi) == len(vertices) - 2: return True elif round(counter / pi) == len(vertices) - 2: return False else: raise ValueError("the polygon is complex or overlapped")
def function[is_clockwise, parameter[vertices]]: constant[ Evaluate whether vertices are in clockwise order. Args: vertices: list of vertices (x, y) in polygon. Returns: True: clockwise, False: counter-clockwise Raises: ValueError: the polygon is complex or overlapped. ] variable[it] assign[=] call[name[iterator].consecutive, parameter[call[name[cycle], parameter[name[vertices]]], constant[3]]] variable[clockwise] assign[=] constant[0] variable[counter] assign[=] constant[0] for taget[name[_]] in starred[call[name[range], parameter[call[name[len], parameter[name[vertices]]]]]] begin[:] <ast.Tuple object at 0x7da1b24ff280> assign[=] call[name[next], parameter[name[it]]] variable[cross] assign[=] call[name[cross_product], parameter[name[p1], name[p2], name[p0]]] variable[int_angle] assign[=] call[name[interior_angle], parameter[name[p0], name[p2], name[p1]]] if compare[name[cross] less[<] constant[0]] begin[:] <ast.AugAssign object at 0x7da1b24fe470> <ast.AugAssign object at 0x7da1b24feb90> if compare[call[name[round], parameter[binary_operation[name[clockwise] / name[pi]]]] equal[==] binary_operation[call[name[len], parameter[name[vertices]]] - constant[2]]] begin[:] return[constant[True]]
keyword[def] identifier[is_clockwise] ( identifier[vertices] ): literal[string] identifier[it] = identifier[iterator] . identifier[consecutive] ( identifier[cycle] ( identifier[vertices] ), literal[int] ) identifier[clockwise] = literal[int] identifier[counter] = literal[int] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[len] ( identifier[vertices] )): identifier[p0] , identifier[p1] , identifier[p2] = identifier[next] ( identifier[it] ) identifier[cross] = identifier[cross_product] ( identifier[p1] , identifier[p2] , identifier[p0] ) identifier[int_angle] = identifier[interior_angle] ( identifier[p0] , identifier[p2] , identifier[p1] ) keyword[if] identifier[cross] < literal[int] : identifier[clockwise] += identifier[int_angle] identifier[counter] += literal[int] * identifier[pi] - identifier[int_angle] keyword[else] : identifier[clockwise] += literal[int] * identifier[pi] - identifier[int_angle] identifier[counter] += identifier[int_angle] keyword[if] identifier[round] ( identifier[clockwise] / identifier[pi] )== identifier[len] ( identifier[vertices] )- literal[int] : keyword[return] keyword[True] keyword[elif] identifier[round] ( identifier[counter] / identifier[pi] )== identifier[len] ( identifier[vertices] )- literal[int] : keyword[return] keyword[False] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] )
def is_clockwise(vertices): """ Evaluate whether vertices are in clockwise order. Args: vertices: list of vertices (x, y) in polygon. Returns: True: clockwise, False: counter-clockwise Raises: ValueError: the polygon is complex or overlapped. """ it = iterator.consecutive(cycle(vertices), 3) clockwise = 0 counter = 0 for _ in range(len(vertices)): (p0, p1, p2) = next(it) cross = cross_product(p1, p2, p0) int_angle = interior_angle(p0, p2, p1) # raises ValueError if cross < 0: clockwise += int_angle counter += 2 * pi - int_angle # depends on [control=['if'], data=[]] else: clockwise += 2 * pi - int_angle counter += int_angle # depends on [control=['for'], data=[]] if round(clockwise / pi) == len(vertices) - 2: return True # depends on [control=['if'], data=[]] elif round(counter / pi) == len(vertices) - 2: return False # depends on [control=['if'], data=[]] else: raise ValueError('the polygon is complex or overlapped')
def candidate_subclass( class_name, args, table_name=None, cardinality=None, values=None ): """ Creates and returns a Candidate subclass with provided argument names, which are Context type. Creates the table in DB if does not exist yet. Import using: .. code-block:: python from fonduer.candidates.models import candidate_subclass :param class_name: The name of the class, should be "camel case" e.g. NewCandidate :param args: A list of names of constituent arguments, which refer to the Contexts--representing mentions--that comprise the candidate :param table_name: The name of the corresponding table in DB; if not provided, is converted from camel case by default, e.g. new_candidate :param cardinality: The cardinality of the variable corresponding to the Candidate. By default is 2 i.e. is a binary value, e.g. is or is not a true mention. """ if table_name is None: table_name = camel_to_under(class_name) # If cardinality and values are None, default to binary classification if cardinality is None and values is None: values = [True, False] cardinality = 2 # Else use values if present, and validate proper input elif values is not None: if cardinality is not None and len(values) != cardinality: raise ValueError("Number of values must match cardinality.") if None in values: raise ValueError("`None` is a protected value.") # Note that bools are instances of ints in Python... if any([isinstance(v, int) and not isinstance(v, bool) for v in values]): raise ValueError( ( "Default usage of values is consecutive integers." "Leave values unset if trying to define values as integers." ) ) cardinality = len(values) # If cardinality is specified but not values, fill in with ints elif cardinality is not None: values = list(range(cardinality)) class_spec = (args, table_name, cardinality, values) if class_name in candidate_subclasses: if class_spec == candidate_subclasses[class_name][1]: return candidate_subclasses[class_name][0] else: raise ValueError( f"Candidate subclass {class_name} " f"already exists in memory with incompatible " f"specification: {candidate_subclasses[class_name][1]}" ) else: # Set the class attributes == the columns in the database class_attribs = { # Declares name for storage table "__tablename__": table_name, # Connects candidate_subclass records to generic Candidate records "id": Column( Integer, ForeignKey("candidate.id", ondelete="CASCADE"), primary_key=True, ), # Store values & cardinality information in the class only "values": values, "cardinality": cardinality, # Polymorphism information for SQLAlchemy "__mapper_args__": {"polymorphic_identity": table_name}, # Helper method to get argument names "__argnames__": [_.__tablename__ for _ in args], "mentions": args, } class_attribs["document_id"] = Column( Integer, ForeignKey("document.id", ondelete="CASCADE") ) class_attribs["document"] = relationship( "Document", backref=backref(table_name + "s", cascade="all, delete-orphan"), foreign_keys=class_attribs["document_id"], ) # Create named arguments, i.e. the entity mentions comprising the # relation mention. unique_args = [] for arg in args: # Primary arguments are constituent Contexts, and their ids class_attribs[arg.__tablename__ + "_id"] = Column( Integer, ForeignKey(arg.__tablename__ + ".id", ondelete="CASCADE") ) class_attribs[arg.__tablename__] = relationship( arg.__name__, backref=backref( table_name + "_" + arg.__tablename__ + "s", cascade_backrefs=False, cascade="all, delete-orphan", ), cascade_backrefs=False, foreign_keys=class_attribs[arg.__tablename__ + "_id"], ) unique_args.append(class_attribs[arg.__tablename__ + "_id"]) # Add unique constraints to the arguments class_attribs["__table_args__"] = (UniqueConstraint(*unique_args),) # Create class C = type(class_name, (Candidate,), class_attribs) # Create table in DB if not Meta.engine.dialect.has_table(Meta.engine, table_name): C.__table__.create(bind=Meta.engine) candidate_subclasses[class_name] = C, class_spec return C
def function[candidate_subclass, parameter[class_name, args, table_name, cardinality, values]]: constant[ Creates and returns a Candidate subclass with provided argument names, which are Context type. Creates the table in DB if does not exist yet. Import using: .. code-block:: python from fonduer.candidates.models import candidate_subclass :param class_name: The name of the class, should be "camel case" e.g. NewCandidate :param args: A list of names of constituent arguments, which refer to the Contexts--representing mentions--that comprise the candidate :param table_name: The name of the corresponding table in DB; if not provided, is converted from camel case by default, e.g. new_candidate :param cardinality: The cardinality of the variable corresponding to the Candidate. By default is 2 i.e. is a binary value, e.g. is or is not a true mention. ] if compare[name[table_name] is constant[None]] begin[:] variable[table_name] assign[=] call[name[camel_to_under], parameter[name[class_name]]] if <ast.BoolOp object at 0x7da1b23479d0> begin[:] variable[values] assign[=] list[[<ast.Constant object at 0x7da1b2347a90>, <ast.Constant object at 0x7da1b2345db0>]] variable[cardinality] assign[=] constant[2] variable[class_spec] assign[=] tuple[[<ast.Name object at 0x7da1b23441c0>, <ast.Name object at 0x7da1b23475b0>, <ast.Name object at 0x7da1b2346e90>, <ast.Name object at 0x7da1b2345d20>]] if compare[name[class_name] in name[candidate_subclasses]] begin[:] if compare[name[class_spec] equal[==] call[call[name[candidate_subclasses]][name[class_name]]][constant[1]]] begin[:] return[call[call[name[candidate_subclasses]][name[class_name]]][constant[0]]]
keyword[def] identifier[candidate_subclass] ( identifier[class_name] , identifier[args] , identifier[table_name] = keyword[None] , identifier[cardinality] = keyword[None] , identifier[values] = keyword[None] ): literal[string] keyword[if] identifier[table_name] keyword[is] keyword[None] : identifier[table_name] = identifier[camel_to_under] ( identifier[class_name] ) keyword[if] identifier[cardinality] keyword[is] keyword[None] keyword[and] identifier[values] keyword[is] keyword[None] : identifier[values] =[ keyword[True] , keyword[False] ] identifier[cardinality] = literal[int] keyword[elif] identifier[values] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[cardinality] keyword[is] keyword[not] keyword[None] keyword[and] identifier[len] ( identifier[values] )!= identifier[cardinality] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] keyword[None] keyword[in] identifier[values] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[any] ([ identifier[isinstance] ( identifier[v] , identifier[int] ) keyword[and] keyword[not] identifier[isinstance] ( identifier[v] , identifier[bool] ) keyword[for] identifier[v] keyword[in] identifier[values] ]): keyword[raise] identifier[ValueError] ( ( literal[string] literal[string] ) ) identifier[cardinality] = identifier[len] ( identifier[values] ) keyword[elif] identifier[cardinality] keyword[is] keyword[not] keyword[None] : identifier[values] = identifier[list] ( identifier[range] ( identifier[cardinality] )) identifier[class_spec] =( identifier[args] , identifier[table_name] , identifier[cardinality] , identifier[values] ) keyword[if] identifier[class_name] keyword[in] identifier[candidate_subclasses] : keyword[if] identifier[class_spec] == identifier[candidate_subclasses] [ identifier[class_name] ][ literal[int] ]: keyword[return] identifier[candidate_subclasses] [ identifier[class_name] ][ literal[int] ] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] literal[string] ) keyword[else] : identifier[class_attribs] ={ literal[string] : identifier[table_name] , literal[string] : identifier[Column] ( identifier[Integer] , identifier[ForeignKey] ( literal[string] , identifier[ondelete] = literal[string] ), identifier[primary_key] = keyword[True] , ), literal[string] : identifier[values] , literal[string] : identifier[cardinality] , literal[string] :{ literal[string] : identifier[table_name] }, literal[string] :[ identifier[_] . identifier[__tablename__] keyword[for] identifier[_] keyword[in] identifier[args] ], literal[string] : identifier[args] , } identifier[class_attribs] [ literal[string] ]= identifier[Column] ( identifier[Integer] , identifier[ForeignKey] ( literal[string] , identifier[ondelete] = literal[string] ) ) identifier[class_attribs] [ literal[string] ]= identifier[relationship] ( literal[string] , identifier[backref] = identifier[backref] ( identifier[table_name] + literal[string] , identifier[cascade] = literal[string] ), identifier[foreign_keys] = identifier[class_attribs] [ literal[string] ], ) identifier[unique_args] =[] keyword[for] identifier[arg] keyword[in] identifier[args] : identifier[class_attribs] [ identifier[arg] . identifier[__tablename__] + literal[string] ]= identifier[Column] ( identifier[Integer] , identifier[ForeignKey] ( identifier[arg] . identifier[__tablename__] + literal[string] , identifier[ondelete] = literal[string] ) ) identifier[class_attribs] [ identifier[arg] . identifier[__tablename__] ]= identifier[relationship] ( identifier[arg] . identifier[__name__] , identifier[backref] = identifier[backref] ( identifier[table_name] + literal[string] + identifier[arg] . identifier[__tablename__] + literal[string] , identifier[cascade_backrefs] = keyword[False] , identifier[cascade] = literal[string] , ), identifier[cascade_backrefs] = keyword[False] , identifier[foreign_keys] = identifier[class_attribs] [ identifier[arg] . identifier[__tablename__] + literal[string] ], ) identifier[unique_args] . identifier[append] ( identifier[class_attribs] [ identifier[arg] . identifier[__tablename__] + literal[string] ]) identifier[class_attribs] [ literal[string] ]=( identifier[UniqueConstraint] (* identifier[unique_args] ),) identifier[C] = identifier[type] ( identifier[class_name] ,( identifier[Candidate] ,), identifier[class_attribs] ) keyword[if] keyword[not] identifier[Meta] . identifier[engine] . identifier[dialect] . identifier[has_table] ( identifier[Meta] . identifier[engine] , identifier[table_name] ): identifier[C] . identifier[__table__] . identifier[create] ( identifier[bind] = identifier[Meta] . identifier[engine] ) identifier[candidate_subclasses] [ identifier[class_name] ]= identifier[C] , identifier[class_spec] keyword[return] identifier[C]
def candidate_subclass(class_name, args, table_name=None, cardinality=None, values=None): """ Creates and returns a Candidate subclass with provided argument names, which are Context type. Creates the table in DB if does not exist yet. Import using: .. code-block:: python from fonduer.candidates.models import candidate_subclass :param class_name: The name of the class, should be "camel case" e.g. NewCandidate :param args: A list of names of constituent arguments, which refer to the Contexts--representing mentions--that comprise the candidate :param table_name: The name of the corresponding table in DB; if not provided, is converted from camel case by default, e.g. new_candidate :param cardinality: The cardinality of the variable corresponding to the Candidate. By default is 2 i.e. is a binary value, e.g. is or is not a true mention. """ if table_name is None: table_name = camel_to_under(class_name) # depends on [control=['if'], data=['table_name']] # If cardinality and values are None, default to binary classification if cardinality is None and values is None: values = [True, False] cardinality = 2 # depends on [control=['if'], data=[]] # Else use values if present, and validate proper input elif values is not None: if cardinality is not None and len(values) != cardinality: raise ValueError('Number of values must match cardinality.') # depends on [control=['if'], data=[]] if None in values: raise ValueError('`None` is a protected value.') # depends on [control=['if'], data=[]] # Note that bools are instances of ints in Python... if any([isinstance(v, int) and (not isinstance(v, bool)) for v in values]): raise ValueError('Default usage of values is consecutive integers.Leave values unset if trying to define values as integers.') # depends on [control=['if'], data=[]] cardinality = len(values) # depends on [control=['if'], data=['values']] # If cardinality is specified but not values, fill in with ints elif cardinality is not None: values = list(range(cardinality)) # depends on [control=['if'], data=['cardinality']] class_spec = (args, table_name, cardinality, values) if class_name in candidate_subclasses: if class_spec == candidate_subclasses[class_name][1]: return candidate_subclasses[class_name][0] # depends on [control=['if'], data=[]] else: raise ValueError(f'Candidate subclass {class_name} already exists in memory with incompatible specification: {candidate_subclasses[class_name][1]}') # depends on [control=['if'], data=['class_name', 'candidate_subclasses']] else: # Set the class attributes == the columns in the database # Declares name for storage table # Connects candidate_subclass records to generic Candidate records # Store values & cardinality information in the class only # Polymorphism information for SQLAlchemy # Helper method to get argument names class_attribs = {'__tablename__': table_name, 'id': Column(Integer, ForeignKey('candidate.id', ondelete='CASCADE'), primary_key=True), 'values': values, 'cardinality': cardinality, '__mapper_args__': {'polymorphic_identity': table_name}, '__argnames__': [_.__tablename__ for _ in args], 'mentions': args} class_attribs['document_id'] = Column(Integer, ForeignKey('document.id', ondelete='CASCADE')) class_attribs['document'] = relationship('Document', backref=backref(table_name + 's', cascade='all, delete-orphan'), foreign_keys=class_attribs['document_id']) # Create named arguments, i.e. the entity mentions comprising the # relation mention. unique_args = [] for arg in args: # Primary arguments are constituent Contexts, and their ids class_attribs[arg.__tablename__ + '_id'] = Column(Integer, ForeignKey(arg.__tablename__ + '.id', ondelete='CASCADE')) class_attribs[arg.__tablename__] = relationship(arg.__name__, backref=backref(table_name + '_' + arg.__tablename__ + 's', cascade_backrefs=False, cascade='all, delete-orphan'), cascade_backrefs=False, foreign_keys=class_attribs[arg.__tablename__ + '_id']) unique_args.append(class_attribs[arg.__tablename__ + '_id']) # depends on [control=['for'], data=['arg']] # Add unique constraints to the arguments class_attribs['__table_args__'] = (UniqueConstraint(*unique_args),) # Create class C = type(class_name, (Candidate,), class_attribs) # Create table in DB if not Meta.engine.dialect.has_table(Meta.engine, table_name): C.__table__.create(bind=Meta.engine) # depends on [control=['if'], data=[]] candidate_subclasses[class_name] = (C, class_spec) return C
def __get_query_agg_ts(cls, field, time_field, interval=None, time_zone=None, start=None, end=None, agg_type='count', offset=None): """ Create an es_dsl aggregation object for getting the time series values for a field. :param field: field to get the time series values :param time_field: field with the date :param interval: interval to be used to generate the time series values, such as:(year(y), quarter(q), month(M), week(w), day(d), hour(h), minute(m), second(s)) :param time_zone: time zone for the time_field :param start: date from for the time series, should be a datetime.datetime object :param end: date to for the time series, should be a datetime.datetime object :param agg_type: kind of aggregation for the field (cardinality, avg, percentiles) :param offset: offset to be added to the time_field in days :return: a aggregation object to calculate timeseries values of a field """ """ Time series for an aggregation metric """ if not interval: interval = '1M' if not time_zone: time_zone = 'UTC' if not field: field_agg = '' else: if agg_type == "cardinality": agg_id, field_agg = cls.__get_query_agg_cardinality(field, agg_id=cls.AGGREGATION_ID + 1) elif agg_type == "avg": agg_id, field_agg = cls.__get_query_agg_avg(field, agg_id=cls.AGGREGATION_ID + 1) elif agg_type == "percentiles": agg_id, field_agg = cls.__get_query_agg_percentiles(field, agg_id=cls.AGGREGATION_ID + 1) else: raise RuntimeError("Aggregation of %s in ts not supported" % agg_type) bounds = {} if start or end: if not offset: # With offset and quarter interval bogus buckets are added # to the start and to the end if extended_bounds is used # https://github.com/elastic/elasticsearch/issues/23776 bounds = cls.__get_bounds(start, end) else: bounds = {'offset': offset} query_agg = A("date_histogram", field=time_field, interval=interval, time_zone=time_zone, min_doc_count=0, **bounds) agg_dict = field_agg.to_dict()[field_agg.name] query_agg.bucket(agg_id, field_agg.name, **agg_dict) return (cls.AGGREGATION_ID, query_agg)
def function[__get_query_agg_ts, parameter[cls, field, time_field, interval, time_zone, start, end, agg_type, offset]]: constant[ Create an es_dsl aggregation object for getting the time series values for a field. :param field: field to get the time series values :param time_field: field with the date :param interval: interval to be used to generate the time series values, such as:(year(y), quarter(q), month(M), week(w), day(d), hour(h), minute(m), second(s)) :param time_zone: time zone for the time_field :param start: date from for the time series, should be a datetime.datetime object :param end: date to for the time series, should be a datetime.datetime object :param agg_type: kind of aggregation for the field (cardinality, avg, percentiles) :param offset: offset to be added to the time_field in days :return: a aggregation object to calculate timeseries values of a field ] constant[ Time series for an aggregation metric ] if <ast.UnaryOp object at 0x7da1b26a3ca0> begin[:] variable[interval] assign[=] constant[1M] if <ast.UnaryOp object at 0x7da1b26a38e0> begin[:] variable[time_zone] assign[=] constant[UTC] if <ast.UnaryOp object at 0x7da1b26a1c30> begin[:] variable[field_agg] assign[=] constant[] variable[bounds] assign[=] dictionary[[], []] if <ast.BoolOp object at 0x7da1b2667af0> begin[:] if <ast.UnaryOp object at 0x7da1b26663b0> begin[:] variable[bounds] assign[=] call[name[cls].__get_bounds, parameter[name[start], name[end]]] variable[query_agg] assign[=] call[name[A], parameter[constant[date_histogram]]] variable[agg_dict] assign[=] call[call[name[field_agg].to_dict, parameter[]]][name[field_agg].name] call[name[query_agg].bucket, parameter[name[agg_id], name[field_agg].name]] return[tuple[[<ast.Attribute object at 0x7da1b25318a0>, <ast.Name object at 0x7da1b2530580>]]]
keyword[def] identifier[__get_query_agg_ts] ( identifier[cls] , identifier[field] , identifier[time_field] , identifier[interval] = keyword[None] , identifier[time_zone] = keyword[None] , identifier[start] = keyword[None] , identifier[end] = keyword[None] , identifier[agg_type] = literal[string] , identifier[offset] = keyword[None] ): literal[string] literal[string] keyword[if] keyword[not] identifier[interval] : identifier[interval] = literal[string] keyword[if] keyword[not] identifier[time_zone] : identifier[time_zone] = literal[string] keyword[if] keyword[not] identifier[field] : identifier[field_agg] = literal[string] keyword[else] : keyword[if] identifier[agg_type] == literal[string] : identifier[agg_id] , identifier[field_agg] = identifier[cls] . identifier[__get_query_agg_cardinality] ( identifier[field] , identifier[agg_id] = identifier[cls] . identifier[AGGREGATION_ID] + literal[int] ) keyword[elif] identifier[agg_type] == literal[string] : identifier[agg_id] , identifier[field_agg] = identifier[cls] . identifier[__get_query_agg_avg] ( identifier[field] , identifier[agg_id] = identifier[cls] . identifier[AGGREGATION_ID] + literal[int] ) keyword[elif] identifier[agg_type] == literal[string] : identifier[agg_id] , identifier[field_agg] = identifier[cls] . identifier[__get_query_agg_percentiles] ( identifier[field] , identifier[agg_id] = identifier[cls] . identifier[AGGREGATION_ID] + literal[int] ) keyword[else] : keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[agg_type] ) identifier[bounds] ={} keyword[if] identifier[start] keyword[or] identifier[end] : keyword[if] keyword[not] identifier[offset] : identifier[bounds] = identifier[cls] . identifier[__get_bounds] ( identifier[start] , identifier[end] ) keyword[else] : identifier[bounds] ={ literal[string] : identifier[offset] } identifier[query_agg] = identifier[A] ( literal[string] , identifier[field] = identifier[time_field] , identifier[interval] = identifier[interval] , identifier[time_zone] = identifier[time_zone] , identifier[min_doc_count] = literal[int] ,** identifier[bounds] ) identifier[agg_dict] = identifier[field_agg] . identifier[to_dict] ()[ identifier[field_agg] . identifier[name] ] identifier[query_agg] . identifier[bucket] ( identifier[agg_id] , identifier[field_agg] . identifier[name] ,** identifier[agg_dict] ) keyword[return] ( identifier[cls] . identifier[AGGREGATION_ID] , identifier[query_agg] )
def __get_query_agg_ts(cls, field, time_field, interval=None, time_zone=None, start=None, end=None, agg_type='count', offset=None): """ Create an es_dsl aggregation object for getting the time series values for a field. :param field: field to get the time series values :param time_field: field with the date :param interval: interval to be used to generate the time series values, such as:(year(y), quarter(q), month(M), week(w), day(d), hour(h), minute(m), second(s)) :param time_zone: time zone for the time_field :param start: date from for the time series, should be a datetime.datetime object :param end: date to for the time series, should be a datetime.datetime object :param agg_type: kind of aggregation for the field (cardinality, avg, percentiles) :param offset: offset to be added to the time_field in days :return: a aggregation object to calculate timeseries values of a field """ ' Time series for an aggregation metric ' if not interval: interval = '1M' # depends on [control=['if'], data=[]] if not time_zone: time_zone = 'UTC' # depends on [control=['if'], data=[]] if not field: field_agg = '' # depends on [control=['if'], data=[]] elif agg_type == 'cardinality': (agg_id, field_agg) = cls.__get_query_agg_cardinality(field, agg_id=cls.AGGREGATION_ID + 1) # depends on [control=['if'], data=[]] elif agg_type == 'avg': (agg_id, field_agg) = cls.__get_query_agg_avg(field, agg_id=cls.AGGREGATION_ID + 1) # depends on [control=['if'], data=[]] elif agg_type == 'percentiles': (agg_id, field_agg) = cls.__get_query_agg_percentiles(field, agg_id=cls.AGGREGATION_ID + 1) # depends on [control=['if'], data=[]] else: raise RuntimeError('Aggregation of %s in ts not supported' % agg_type) bounds = {} if start or end: if not offset: # With offset and quarter interval bogus buckets are added # to the start and to the end if extended_bounds is used # https://github.com/elastic/elasticsearch/issues/23776 bounds = cls.__get_bounds(start, end) # depends on [control=['if'], data=[]] else: bounds = {'offset': offset} # depends on [control=['if'], data=[]] query_agg = A('date_histogram', field=time_field, interval=interval, time_zone=time_zone, min_doc_count=0, **bounds) agg_dict = field_agg.to_dict()[field_agg.name] query_agg.bucket(agg_id, field_agg.name, **agg_dict) return (cls.AGGREGATION_ID, query_agg)
def show_vcs_output_vcs_nodes_vcs_node_info_node_rbridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_vcs = ET.Element("show_vcs") config = show_vcs output = ET.SubElement(show_vcs, "output") vcs_nodes = ET.SubElement(output, "vcs-nodes") vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info") node_rbridge_id = ET.SubElement(vcs_node_info, "node-rbridge-id") node_rbridge_id.text = kwargs.pop('node_rbridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[show_vcs_output_vcs_nodes_vcs_node_info_node_rbridge_id, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[show_vcs] assign[=] call[name[ET].Element, parameter[constant[show_vcs]]] variable[config] assign[=] name[show_vcs] variable[output] assign[=] call[name[ET].SubElement, parameter[name[show_vcs], constant[output]]] variable[vcs_nodes] assign[=] call[name[ET].SubElement, parameter[name[output], constant[vcs-nodes]]] variable[vcs_node_info] assign[=] call[name[ET].SubElement, parameter[name[vcs_nodes], constant[vcs-node-info]]] variable[node_rbridge_id] assign[=] call[name[ET].SubElement, parameter[name[vcs_node_info], constant[node-rbridge-id]]] name[node_rbridge_id].text assign[=] call[name[kwargs].pop, parameter[constant[node_rbridge_id]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[show_vcs_output_vcs_nodes_vcs_node_info_node_rbridge_id] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[show_vcs] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[config] = identifier[show_vcs] identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[show_vcs] , literal[string] ) identifier[vcs_nodes] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] ) identifier[vcs_node_info] = identifier[ET] . identifier[SubElement] ( identifier[vcs_nodes] , literal[string] ) identifier[node_rbridge_id] = identifier[ET] . identifier[SubElement] ( identifier[vcs_node_info] , literal[string] ) identifier[node_rbridge_id] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def show_vcs_output_vcs_nodes_vcs_node_info_node_rbridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') show_vcs = ET.Element('show_vcs') config = show_vcs output = ET.SubElement(show_vcs, 'output') vcs_nodes = ET.SubElement(output, 'vcs-nodes') vcs_node_info = ET.SubElement(vcs_nodes, 'vcs-node-info') node_rbridge_id = ET.SubElement(vcs_node_info, 'node-rbridge-id') node_rbridge_id.text = kwargs.pop('node_rbridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
def _get_unpatched(cls): """Protect against re-patching the distutils if reloaded Also ensures that no other distutils extension monkeypatched the distutils first. """ while cls.__module__.startswith('setuptools'): cls, = cls.__bases__ if not cls.__module__.startswith('distutils'): raise AssertionError( "distutils has already been patched by %r" % cls ) return cls
def function[_get_unpatched, parameter[cls]]: constant[Protect against re-patching the distutils if reloaded Also ensures that no other distutils extension monkeypatched the distutils first. ] while call[name[cls].__module__.startswith, parameter[constant[setuptools]]] begin[:] <ast.Tuple object at 0x7da20c6aa620> assign[=] name[cls].__bases__ if <ast.UnaryOp object at 0x7da20c6a86a0> begin[:] <ast.Raise object at 0x7da20c6a97b0> return[name[cls]]
keyword[def] identifier[_get_unpatched] ( identifier[cls] ): literal[string] keyword[while] identifier[cls] . identifier[__module__] . identifier[startswith] ( literal[string] ): identifier[cls] ,= identifier[cls] . identifier[__bases__] keyword[if] keyword[not] identifier[cls] . identifier[__module__] . identifier[startswith] ( literal[string] ): keyword[raise] identifier[AssertionError] ( literal[string] % identifier[cls] ) keyword[return] identifier[cls]
def _get_unpatched(cls): """Protect against re-patching the distutils if reloaded Also ensures that no other distutils extension monkeypatched the distutils first. """ while cls.__module__.startswith('setuptools'): (cls,) = cls.__bases__ # depends on [control=['while'], data=[]] if not cls.__module__.startswith('distutils'): raise AssertionError('distutils has already been patched by %r' % cls) # depends on [control=['if'], data=[]] return cls
def get_category_aliases_under(parent_alias=None): """Returns a list of category aliases under the given parent. Could be useful to pass to `ModelWithCategory.enable_category_lists_editor` in `additional_parents_aliases` parameter. :param str|None parent_alias: Parent alias or None to categories under root :rtype: list :return: a list of category aliases """ return [ch.alias for ch in get_cache().get_children_for(parent_alias, only_with_aliases=True)]
def function[get_category_aliases_under, parameter[parent_alias]]: constant[Returns a list of category aliases under the given parent. Could be useful to pass to `ModelWithCategory.enable_category_lists_editor` in `additional_parents_aliases` parameter. :param str|None parent_alias: Parent alias or None to categories under root :rtype: list :return: a list of category aliases ] return[<ast.ListComp object at 0x7da1b23b1030>]
keyword[def] identifier[get_category_aliases_under] ( identifier[parent_alias] = keyword[None] ): literal[string] keyword[return] [ identifier[ch] . identifier[alias] keyword[for] identifier[ch] keyword[in] identifier[get_cache] (). identifier[get_children_for] ( identifier[parent_alias] , identifier[only_with_aliases] = keyword[True] )]
def get_category_aliases_under(parent_alias=None): """Returns a list of category aliases under the given parent. Could be useful to pass to `ModelWithCategory.enable_category_lists_editor` in `additional_parents_aliases` parameter. :param str|None parent_alias: Parent alias or None to categories under root :rtype: list :return: a list of category aliases """ return [ch.alias for ch in get_cache().get_children_for(parent_alias, only_with_aliases=True)]
def quit(self): """ Quits the application (called when the last window is closed) """ logger.debug("ArgosApplication.quit called") assert len(self.mainWindows) == 0, \ "Bug: still {} windows present at application quit!".format(len(self.mainWindows)) self.qApplication.quit()
def function[quit, parameter[self]]: constant[ Quits the application (called when the last window is closed) ] call[name[logger].debug, parameter[constant[ArgosApplication.quit called]]] assert[compare[call[name[len], parameter[name[self].mainWindows]] equal[==] constant[0]]] call[name[self].qApplication.quit, parameter[]]
keyword[def] identifier[quit] ( identifier[self] ): literal[string] identifier[logger] . identifier[debug] ( literal[string] ) keyword[assert] identifier[len] ( identifier[self] . identifier[mainWindows] )== literal[int] , literal[string] . identifier[format] ( identifier[len] ( identifier[self] . identifier[mainWindows] )) identifier[self] . identifier[qApplication] . identifier[quit] ()
def quit(self): """ Quits the application (called when the last window is closed) """ logger.debug('ArgosApplication.quit called') assert len(self.mainWindows) == 0, 'Bug: still {} windows present at application quit!'.format(len(self.mainWindows)) self.qApplication.quit()
def remove_account(self, id): """Add Account from config (does not save)""" if self.parser.has_section(id): self.parser.remove_section(id) return True return False
def function[remove_account, parameter[self, id]]: constant[Add Account from config (does not save)] if call[name[self].parser.has_section, parameter[name[id]]] begin[:] call[name[self].parser.remove_section, parameter[name[id]]] return[constant[True]] return[constant[False]]
keyword[def] identifier[remove_account] ( identifier[self] , identifier[id] ): literal[string] keyword[if] identifier[self] . identifier[parser] . identifier[has_section] ( identifier[id] ): identifier[self] . identifier[parser] . identifier[remove_section] ( identifier[id] ) keyword[return] keyword[True] keyword[return] keyword[False]
def remove_account(self, id): """Add Account from config (does not save)""" if self.parser.has_section(id): self.parser.remove_section(id) return True # depends on [control=['if'], data=[]] return False
def _extract_when(body): """Extract the generated datetime from the notification.""" # NOTE: I am keeping the logic the same as it was in openstack # code, However, *ALL* notifications should have a 'timestamp' # field, it's part of the notification envelope spec. If this was # put here because some openstack project is generating notifications # without a timestamp, then that needs to be filed as a bug with the # offending project (mdragon) when = body.get('timestamp', body.get('_context_timestamp')) if when: return Datatype.datetime.convert(when) return utcnow()
def function[_extract_when, parameter[body]]: constant[Extract the generated datetime from the notification.] variable[when] assign[=] call[name[body].get, parameter[constant[timestamp], call[name[body].get, parameter[constant[_context_timestamp]]]]] if name[when] begin[:] return[call[name[Datatype].datetime.convert, parameter[name[when]]]] return[call[name[utcnow], parameter[]]]
keyword[def] identifier[_extract_when] ( identifier[body] ): literal[string] identifier[when] = identifier[body] . identifier[get] ( literal[string] , identifier[body] . identifier[get] ( literal[string] )) keyword[if] identifier[when] : keyword[return] identifier[Datatype] . identifier[datetime] . identifier[convert] ( identifier[when] ) keyword[return] identifier[utcnow] ()
def _extract_when(body): """Extract the generated datetime from the notification.""" # NOTE: I am keeping the logic the same as it was in openstack # code, However, *ALL* notifications should have a 'timestamp' # field, it's part of the notification envelope spec. If this was # put here because some openstack project is generating notifications # without a timestamp, then that needs to be filed as a bug with the # offending project (mdragon) when = body.get('timestamp', body.get('_context_timestamp')) if when: return Datatype.datetime.convert(when) # depends on [control=['if'], data=[]] return utcnow()
def as_dict(self, keep_readonly=True, key_transformer=attribute_transformer): """Return a dict that can be JSONify using json.dump. Advanced usage might optionaly use a callback as parameter: .. code::python def my_key_transformer(key, attr_desc, value): return key Key is the attribute name used in Python. Attr_desc is a dict of metadata. Currently contains 'type' with the msrest type and 'key' with the RestAPI encoded key. Value is the current value in this object. The string returned will be used to serialize the key. If the return type is a list, this is considered hierarchical result dict. See the three examples in this file: - attribute_transformer - full_restapi_key_transformer - last_restapi_key_transformer :param function key_transformer: A key transformer function. :returns: A dict JSON compatible object :rtype: dict """ serializer = Serializer(self._infer_class_models()) return serializer._serialize(self, key_transformer=key_transformer, keep_readonly=keep_readonly)
def function[as_dict, parameter[self, keep_readonly, key_transformer]]: constant[Return a dict that can be JSONify using json.dump. Advanced usage might optionaly use a callback as parameter: .. code::python def my_key_transformer(key, attr_desc, value): return key Key is the attribute name used in Python. Attr_desc is a dict of metadata. Currently contains 'type' with the msrest type and 'key' with the RestAPI encoded key. Value is the current value in this object. The string returned will be used to serialize the key. If the return type is a list, this is considered hierarchical result dict. See the three examples in this file: - attribute_transformer - full_restapi_key_transformer - last_restapi_key_transformer :param function key_transformer: A key transformer function. :returns: A dict JSON compatible object :rtype: dict ] variable[serializer] assign[=] call[name[Serializer], parameter[call[name[self]._infer_class_models, parameter[]]]] return[call[name[serializer]._serialize, parameter[name[self]]]]
keyword[def] identifier[as_dict] ( identifier[self] , identifier[keep_readonly] = keyword[True] , identifier[key_transformer] = identifier[attribute_transformer] ): literal[string] identifier[serializer] = identifier[Serializer] ( identifier[self] . identifier[_infer_class_models] ()) keyword[return] identifier[serializer] . identifier[_serialize] ( identifier[self] , identifier[key_transformer] = identifier[key_transformer] , identifier[keep_readonly] = identifier[keep_readonly] )
def as_dict(self, keep_readonly=True, key_transformer=attribute_transformer): """Return a dict that can be JSONify using json.dump. Advanced usage might optionaly use a callback as parameter: .. code::python def my_key_transformer(key, attr_desc, value): return key Key is the attribute name used in Python. Attr_desc is a dict of metadata. Currently contains 'type' with the msrest type and 'key' with the RestAPI encoded key. Value is the current value in this object. The string returned will be used to serialize the key. If the return type is a list, this is considered hierarchical result dict. See the three examples in this file: - attribute_transformer - full_restapi_key_transformer - last_restapi_key_transformer :param function key_transformer: A key transformer function. :returns: A dict JSON compatible object :rtype: dict """ serializer = Serializer(self._infer_class_models()) return serializer._serialize(self, key_transformer=key_transformer, keep_readonly=keep_readonly)
def match_published_date(self, start, end, match): """Match assets that are published between the specified time period. arg: start (osid.calendaring.DateTime): start time of the query arg: end (osid.calendaring.DateTime): end time of the query arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: InvalidArgument - ``end`` is les than ``start`` raise: NullArgument - ``start`` or ``end`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ self._match_minimum_date_time('publishedDate', start, match) self._match_maximum_date_time('publishedDate', end, match)
def function[match_published_date, parameter[self, start, end, match]]: constant[Match assets that are published between the specified time period. arg: start (osid.calendaring.DateTime): start time of the query arg: end (osid.calendaring.DateTime): end time of the query arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: InvalidArgument - ``end`` is les than ``start`` raise: NullArgument - ``start`` or ``end`` is ``null`` *compliance: mandatory -- This method must be implemented.* ] call[name[self]._match_minimum_date_time, parameter[constant[publishedDate], name[start], name[match]]] call[name[self]._match_maximum_date_time, parameter[constant[publishedDate], name[end], name[match]]]
keyword[def] identifier[match_published_date] ( identifier[self] , identifier[start] , identifier[end] , identifier[match] ): literal[string] identifier[self] . identifier[_match_minimum_date_time] ( literal[string] , identifier[start] , identifier[match] ) identifier[self] . identifier[_match_maximum_date_time] ( literal[string] , identifier[end] , identifier[match] )
def match_published_date(self, start, end, match): """Match assets that are published between the specified time period. arg: start (osid.calendaring.DateTime): start time of the query arg: end (osid.calendaring.DateTime): end time of the query arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: InvalidArgument - ``end`` is les than ``start`` raise: NullArgument - ``start`` or ``end`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ self._match_minimum_date_time('publishedDate', start, match) self._match_maximum_date_time('publishedDate', end, match)
def Entry(self, name, directory=None, create=1): """ Create `SCons.Node.FS.Entry` """ return self._create_node(name, self.env.fs.Entry, directory, create)
def function[Entry, parameter[self, name, directory, create]]: constant[ Create `SCons.Node.FS.Entry` ] return[call[name[self]._create_node, parameter[name[name], name[self].env.fs.Entry, name[directory], name[create]]]]
keyword[def] identifier[Entry] ( identifier[self] , identifier[name] , identifier[directory] = keyword[None] , identifier[create] = literal[int] ): literal[string] keyword[return] identifier[self] . identifier[_create_node] ( identifier[name] , identifier[self] . identifier[env] . identifier[fs] . identifier[Entry] , identifier[directory] , identifier[create] )
def Entry(self, name, directory=None, create=1): """ Create `SCons.Node.FS.Entry` """ return self._create_node(name, self.env.fs.Entry, directory, create)
def device_query_create(self, device, **kwargs): # noqa: E501 """Create a device query # noqa: E501 Create a new device query. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.device_query_create(device, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param DeviceQueryPostPutRequest device: (required) :return: DeviceQuery If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.device_query_create_with_http_info(device, **kwargs) # noqa: E501 else: (data) = self.device_query_create_with_http_info(device, **kwargs) # noqa: E501 return data
def function[device_query_create, parameter[self, device]]: constant[Create a device query # noqa: E501 Create a new device query. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.device_query_create(device, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param DeviceQueryPostPutRequest device: (required) :return: DeviceQuery If the method is called asynchronously, returns the request thread. ] call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True] if call[name[kwargs].get, parameter[constant[asynchronous]]] begin[:] return[call[name[self].device_query_create_with_http_info, parameter[name[device]]]]
keyword[def] identifier[device_query_create] ( identifier[self] , identifier[device] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= keyword[True] keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ): keyword[return] identifier[self] . identifier[device_query_create_with_http_info] ( identifier[device] ,** identifier[kwargs] ) keyword[else] : ( identifier[data] )= identifier[self] . identifier[device_query_create_with_http_info] ( identifier[device] ,** identifier[kwargs] ) keyword[return] identifier[data]
def device_query_create(self, device, **kwargs): # noqa: E501 'Create a device query # noqa: E501\n\n Create a new device query. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass asynchronous=True\n >>> thread = api.device_query_create(device, asynchronous=True)\n >>> result = thread.get()\n\n :param asynchronous bool\n :param DeviceQueryPostPutRequest device: (required)\n :return: DeviceQuery\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.device_query_create_with_http_info(device, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]] else: data = self.device_query_create_with_http_info(device, **kwargs) # noqa: E501 return data
def CheckSupportedFormat(cls, path, check_readable_only=False): """Checks if the storage file format is supported. Args: path (str): path to the storage file. check_readable_only (Optional[bool]): whether the store should only be checked to see if it can be read. If False, the store will be checked to see if it can be read and written to. Returns: bool: True if the format is supported. """ try: connection = sqlite3.connect( path, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES) cursor = connection.cursor() query = 'SELECT * FROM metadata' cursor.execute(query) metadata_values = {row[0]: row[1] for row in cursor.fetchall()} cls._CheckStorageMetadata( metadata_values, check_readable_only=check_readable_only) connection.close() result = True except (IOError, sqlite3.DatabaseError): result = False return result
def function[CheckSupportedFormat, parameter[cls, path, check_readable_only]]: constant[Checks if the storage file format is supported. Args: path (str): path to the storage file. check_readable_only (Optional[bool]): whether the store should only be checked to see if it can be read. If False, the store will be checked to see if it can be read and written to. Returns: bool: True if the format is supported. ] <ast.Try object at 0x7da18c4cf310> return[name[result]]
keyword[def] identifier[CheckSupportedFormat] ( identifier[cls] , identifier[path] , identifier[check_readable_only] = keyword[False] ): literal[string] keyword[try] : identifier[connection] = identifier[sqlite3] . identifier[connect] ( identifier[path] , identifier[detect_types] = identifier[sqlite3] . identifier[PARSE_DECLTYPES] | identifier[sqlite3] . identifier[PARSE_COLNAMES] ) identifier[cursor] = identifier[connection] . identifier[cursor] () identifier[query] = literal[string] identifier[cursor] . identifier[execute] ( identifier[query] ) identifier[metadata_values] ={ identifier[row] [ literal[int] ]: identifier[row] [ literal[int] ] keyword[for] identifier[row] keyword[in] identifier[cursor] . identifier[fetchall] ()} identifier[cls] . identifier[_CheckStorageMetadata] ( identifier[metadata_values] , identifier[check_readable_only] = identifier[check_readable_only] ) identifier[connection] . identifier[close] () identifier[result] = keyword[True] keyword[except] ( identifier[IOError] , identifier[sqlite3] . identifier[DatabaseError] ): identifier[result] = keyword[False] keyword[return] identifier[result]
def CheckSupportedFormat(cls, path, check_readable_only=False): """Checks if the storage file format is supported. Args: path (str): path to the storage file. check_readable_only (Optional[bool]): whether the store should only be checked to see if it can be read. If False, the store will be checked to see if it can be read and written to. Returns: bool: True if the format is supported. """ try: connection = sqlite3.connect(path, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES) cursor = connection.cursor() query = 'SELECT * FROM metadata' cursor.execute(query) metadata_values = {row[0]: row[1] for row in cursor.fetchall()} cls._CheckStorageMetadata(metadata_values, check_readable_only=check_readable_only) connection.close() result = True # depends on [control=['try'], data=[]] except (IOError, sqlite3.DatabaseError): result = False # depends on [control=['except'], data=[]] return result
def detect_scheme(filename): """Detects partitioning scheme of the source Args: filename (str): path to file or device for detection of \ partitioning scheme. Returns: SCHEME_MBR, SCHEME_GPT or SCHEME_UNKNOWN Raises: IOError: The file doesn't exist or cannot be opened for reading >>> from rawdisk.scheme.common import * >>> scheme = detect_scheme('/dev/disk1') >>> if scheme == PartitionScheme.SCHEME_MBR: >>> <...> """ logger = logging.getLogger(__name__) logger.info('Detecting partitioning scheme') with open(filename, 'rb') as f: # Look for MBR signature first f.seek(mbr.MBR_SIG_OFFSET) data = f.read(mbr.MBR_SIG_SIZE) signature = struct.unpack("<H", data)[0] if signature != mbr.MBR_SIGNATURE: # Something else logger.debug('Unknown partitioning scheme') return PartitionScheme.SCHEME_UNKNOWN else: # Could be MBR or GPT, look for GPT header f.seek(gpt.GPT_HEADER_OFFSET) data = f.read(gpt.GPT_SIG_SIZE) signature = struct.unpack("<8s", data)[0] if signature != gpt.GPT_SIGNATURE: logger.debug('MBR scheme detected') return PartitionScheme.SCHEME_MBR else: logger.debug('GPT scheme detected') return PartitionScheme.SCHEME_GPT
def function[detect_scheme, parameter[filename]]: constant[Detects partitioning scheme of the source Args: filename (str): path to file or device for detection of partitioning scheme. Returns: SCHEME_MBR, SCHEME_GPT or SCHEME_UNKNOWN Raises: IOError: The file doesn't exist or cannot be opened for reading >>> from rawdisk.scheme.common import * >>> scheme = detect_scheme('/dev/disk1') >>> if scheme == PartitionScheme.SCHEME_MBR: >>> <...> ] variable[logger] assign[=] call[name[logging].getLogger, parameter[name[__name__]]] call[name[logger].info, parameter[constant[Detecting partitioning scheme]]] with call[name[open], parameter[name[filename], constant[rb]]] begin[:] call[name[f].seek, parameter[name[mbr].MBR_SIG_OFFSET]] variable[data] assign[=] call[name[f].read, parameter[name[mbr].MBR_SIG_SIZE]] variable[signature] assign[=] call[call[name[struct].unpack, parameter[constant[<H], name[data]]]][constant[0]] if compare[name[signature] not_equal[!=] name[mbr].MBR_SIGNATURE] begin[:] call[name[logger].debug, parameter[constant[Unknown partitioning scheme]]] return[name[PartitionScheme].SCHEME_UNKNOWN]
keyword[def] identifier[detect_scheme] ( identifier[filename] ): literal[string] identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] ) identifier[logger] . identifier[info] ( literal[string] ) keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] : identifier[f] . identifier[seek] ( identifier[mbr] . identifier[MBR_SIG_OFFSET] ) identifier[data] = identifier[f] . identifier[read] ( identifier[mbr] . identifier[MBR_SIG_SIZE] ) identifier[signature] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[data] )[ literal[int] ] keyword[if] identifier[signature] != identifier[mbr] . identifier[MBR_SIGNATURE] : identifier[logger] . identifier[debug] ( literal[string] ) keyword[return] identifier[PartitionScheme] . identifier[SCHEME_UNKNOWN] keyword[else] : identifier[f] . identifier[seek] ( identifier[gpt] . identifier[GPT_HEADER_OFFSET] ) identifier[data] = identifier[f] . identifier[read] ( identifier[gpt] . identifier[GPT_SIG_SIZE] ) identifier[signature] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[data] )[ literal[int] ] keyword[if] identifier[signature] != identifier[gpt] . identifier[GPT_SIGNATURE] : identifier[logger] . identifier[debug] ( literal[string] ) keyword[return] identifier[PartitionScheme] . identifier[SCHEME_MBR] keyword[else] : identifier[logger] . identifier[debug] ( literal[string] ) keyword[return] identifier[PartitionScheme] . identifier[SCHEME_GPT]
def detect_scheme(filename): """Detects partitioning scheme of the source Args: filename (str): path to file or device for detection of partitioning scheme. Returns: SCHEME_MBR, SCHEME_GPT or SCHEME_UNKNOWN Raises: IOError: The file doesn't exist or cannot be opened for reading >>> from rawdisk.scheme.common import * >>> scheme = detect_scheme('/dev/disk1') >>> if scheme == PartitionScheme.SCHEME_MBR: >>> <...> """ logger = logging.getLogger(__name__) logger.info('Detecting partitioning scheme') with open(filename, 'rb') as f: # Look for MBR signature first f.seek(mbr.MBR_SIG_OFFSET) data = f.read(mbr.MBR_SIG_SIZE) signature = struct.unpack('<H', data)[0] if signature != mbr.MBR_SIGNATURE: # Something else logger.debug('Unknown partitioning scheme') return PartitionScheme.SCHEME_UNKNOWN # depends on [control=['if'], data=[]] else: # Could be MBR or GPT, look for GPT header f.seek(gpt.GPT_HEADER_OFFSET) data = f.read(gpt.GPT_SIG_SIZE) signature = struct.unpack('<8s', data)[0] if signature != gpt.GPT_SIGNATURE: logger.debug('MBR scheme detected') return PartitionScheme.SCHEME_MBR # depends on [control=['if'], data=[]] else: logger.debug('GPT scheme detected') return PartitionScheme.SCHEME_GPT # depends on [control=['with'], data=['f']]
def _to_utc(self, dt): """Takes a naive timezone with an localized value and return it formatted as utc.""" tz = self._get_tz() loc_dt = tz.localize(dt) return loc_dt.astimezone(pytz.utc)
def function[_to_utc, parameter[self, dt]]: constant[Takes a naive timezone with an localized value and return it formatted as utc.] variable[tz] assign[=] call[name[self]._get_tz, parameter[]] variable[loc_dt] assign[=] call[name[tz].localize, parameter[name[dt]]] return[call[name[loc_dt].astimezone, parameter[name[pytz].utc]]]
keyword[def] identifier[_to_utc] ( identifier[self] , identifier[dt] ): literal[string] identifier[tz] = identifier[self] . identifier[_get_tz] () identifier[loc_dt] = identifier[tz] . identifier[localize] ( identifier[dt] ) keyword[return] identifier[loc_dt] . identifier[astimezone] ( identifier[pytz] . identifier[utc] )
def _to_utc(self, dt): """Takes a naive timezone with an localized value and return it formatted as utc.""" tz = self._get_tz() loc_dt = tz.localize(dt) return loc_dt.astimezone(pytz.utc)
def _parse_signal_lines(signal_lines): """ Extract fields from a list of signal line strings into a dictionary. """ n_sig = len(signal_lines) # Dictionary for signal fields signal_fields = {} # Each dictionary field is a list for field in SIGNAL_SPECS.index: signal_fields[field] = n_sig * [None] # Read string fields from signal line for ch in range(n_sig): (signal_fields['file_name'][ch], signal_fields['fmt'][ch], signal_fields['samps_per_frame'][ch], signal_fields['skew'][ch], signal_fields['byte_offset'][ch], signal_fields['adc_gain'][ch], signal_fields['baseline'][ch], signal_fields['units'][ch], signal_fields['adc_res'][ch], signal_fields['adc_zero'][ch], signal_fields['init_value'][ch], signal_fields['checksum'][ch], signal_fields['block_size'][ch], signal_fields['sig_name'][ch]) = _rx_signal.findall(signal_lines[ch])[0] for field in SIGNAL_SPECS.index: # Replace empty strings with their read defaults (which are mostly None) # Note: Never set a field to None. [None]* n_sig is accurate, indicating # that different channels can be present or missing. if signal_fields[field][ch] == '': signal_fields[field][ch] = SIGNAL_SPECS.loc[field, 'read_default'] # Special case: missing baseline defaults to ADCzero if present if field == 'baseline' and signal_fields['adc_zero'][ch] != '': signal_fields['baseline'][ch] = int(signal_fields['adc_zero'][ch]) # Typecast non-empty strings for numerical fields else: if SIGNAL_SPECS.loc[field, 'allowed_types'] is int_types: signal_fields[field][ch] = int(signal_fields[field][ch]) elif SIGNAL_SPECS.loc[field, 'allowed_types'] is float_types: signal_fields[field][ch] = float(signal_fields[field][ch]) # Special case: adc_gain of 0 means 200 if field == 'adc_gain' and signal_fields['adc_gain'][ch] == 0: signal_fields['adc_gain'][ch] = 200. return signal_fields
def function[_parse_signal_lines, parameter[signal_lines]]: constant[ Extract fields from a list of signal line strings into a dictionary. ] variable[n_sig] assign[=] call[name[len], parameter[name[signal_lines]]] variable[signal_fields] assign[=] dictionary[[], []] for taget[name[field]] in starred[name[SIGNAL_SPECS].index] begin[:] call[name[signal_fields]][name[field]] assign[=] binary_operation[name[n_sig] * list[[<ast.Constant object at 0x7da1b18a0940>]]] for taget[name[ch]] in starred[call[name[range], parameter[name[n_sig]]]] begin[:] <ast.Tuple object at 0x7da1b18a3280> assign[=] call[call[name[_rx_signal].findall, parameter[call[name[signal_lines]][name[ch]]]]][constant[0]] for taget[name[field]] in starred[name[SIGNAL_SPECS].index] begin[:] if compare[call[call[name[signal_fields]][name[field]]][name[ch]] equal[==] constant[]] begin[:] call[call[name[signal_fields]][name[field]]][name[ch]] assign[=] call[name[SIGNAL_SPECS].loc][tuple[[<ast.Name object at 0x7da1b194c280>, <ast.Constant object at 0x7da1b194df90>]]] if <ast.BoolOp object at 0x7da1b194cfd0> begin[:] call[call[name[signal_fields]][constant[baseline]]][name[ch]] assign[=] call[name[int], parameter[call[call[name[signal_fields]][constant[adc_zero]]][name[ch]]]] return[name[signal_fields]]
keyword[def] identifier[_parse_signal_lines] ( identifier[signal_lines] ): literal[string] identifier[n_sig] = identifier[len] ( identifier[signal_lines] ) identifier[signal_fields] ={} keyword[for] identifier[field] keyword[in] identifier[SIGNAL_SPECS] . identifier[index] : identifier[signal_fields] [ identifier[field] ]= identifier[n_sig] *[ keyword[None] ] keyword[for] identifier[ch] keyword[in] identifier[range] ( identifier[n_sig] ): ( identifier[signal_fields] [ literal[string] ][ identifier[ch] ], identifier[signal_fields] [ literal[string] ][ identifier[ch] ], identifier[signal_fields] [ literal[string] ][ identifier[ch] ], identifier[signal_fields] [ literal[string] ][ identifier[ch] ], identifier[signal_fields] [ literal[string] ][ identifier[ch] ], identifier[signal_fields] [ literal[string] ][ identifier[ch] ], identifier[signal_fields] [ literal[string] ][ identifier[ch] ], identifier[signal_fields] [ literal[string] ][ identifier[ch] ], identifier[signal_fields] [ literal[string] ][ identifier[ch] ], identifier[signal_fields] [ literal[string] ][ identifier[ch] ], identifier[signal_fields] [ literal[string] ][ identifier[ch] ], identifier[signal_fields] [ literal[string] ][ identifier[ch] ], identifier[signal_fields] [ literal[string] ][ identifier[ch] ], identifier[signal_fields] [ literal[string] ][ identifier[ch] ])= identifier[_rx_signal] . identifier[findall] ( identifier[signal_lines] [ identifier[ch] ])[ literal[int] ] keyword[for] identifier[field] keyword[in] identifier[SIGNAL_SPECS] . identifier[index] : keyword[if] identifier[signal_fields] [ identifier[field] ][ identifier[ch] ]== literal[string] : identifier[signal_fields] [ identifier[field] ][ identifier[ch] ]= identifier[SIGNAL_SPECS] . identifier[loc] [ identifier[field] , literal[string] ] keyword[if] identifier[field] == literal[string] keyword[and] identifier[signal_fields] [ literal[string] ][ identifier[ch] ]!= literal[string] : identifier[signal_fields] [ literal[string] ][ identifier[ch] ]= identifier[int] ( identifier[signal_fields] [ literal[string] ][ identifier[ch] ]) keyword[else] : keyword[if] identifier[SIGNAL_SPECS] . identifier[loc] [ identifier[field] , literal[string] ] keyword[is] identifier[int_types] : identifier[signal_fields] [ identifier[field] ][ identifier[ch] ]= identifier[int] ( identifier[signal_fields] [ identifier[field] ][ identifier[ch] ]) keyword[elif] identifier[SIGNAL_SPECS] . identifier[loc] [ identifier[field] , literal[string] ] keyword[is] identifier[float_types] : identifier[signal_fields] [ identifier[field] ][ identifier[ch] ]= identifier[float] ( identifier[signal_fields] [ identifier[field] ][ identifier[ch] ]) keyword[if] identifier[field] == literal[string] keyword[and] identifier[signal_fields] [ literal[string] ][ identifier[ch] ]== literal[int] : identifier[signal_fields] [ literal[string] ][ identifier[ch] ]= literal[int] keyword[return] identifier[signal_fields]
def _parse_signal_lines(signal_lines): """ Extract fields from a list of signal line strings into a dictionary. """ n_sig = len(signal_lines) # Dictionary for signal fields signal_fields = {} # Each dictionary field is a list for field in SIGNAL_SPECS.index: signal_fields[field] = n_sig * [None] # depends on [control=['for'], data=['field']] # Read string fields from signal line for ch in range(n_sig): (signal_fields['file_name'][ch], signal_fields['fmt'][ch], signal_fields['samps_per_frame'][ch], signal_fields['skew'][ch], signal_fields['byte_offset'][ch], signal_fields['adc_gain'][ch], signal_fields['baseline'][ch], signal_fields['units'][ch], signal_fields['adc_res'][ch], signal_fields['adc_zero'][ch], signal_fields['init_value'][ch], signal_fields['checksum'][ch], signal_fields['block_size'][ch], signal_fields['sig_name'][ch]) = _rx_signal.findall(signal_lines[ch])[0] for field in SIGNAL_SPECS.index: # Replace empty strings with their read defaults (which are mostly None) # Note: Never set a field to None. [None]* n_sig is accurate, indicating # that different channels can be present or missing. if signal_fields[field][ch] == '': signal_fields[field][ch] = SIGNAL_SPECS.loc[field, 'read_default'] # Special case: missing baseline defaults to ADCzero if present if field == 'baseline' and signal_fields['adc_zero'][ch] != '': signal_fields['baseline'][ch] = int(signal_fields['adc_zero'][ch]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # Typecast non-empty strings for numerical fields elif SIGNAL_SPECS.loc[field, 'allowed_types'] is int_types: signal_fields[field][ch] = int(signal_fields[field][ch]) # depends on [control=['if'], data=[]] elif SIGNAL_SPECS.loc[field, 'allowed_types'] is float_types: signal_fields[field][ch] = float(signal_fields[field][ch]) # Special case: adc_gain of 0 means 200 if field == 'adc_gain' and signal_fields['adc_gain'][ch] == 0: signal_fields['adc_gain'][ch] = 200.0 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field']] # depends on [control=['for'], data=['ch']] return signal_fields
def write_catalog(filename, catalog, fmt=None, meta=None, prefix=None): """ Write a catalog (list of sources) to a file with format determined by extension. Sources must be of type :class:`AegeanTools.models.OutputSource`, :class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`. Parameters ---------- filename : str Base name for file to write. `_simp`, `_comp`, or `_isle` will be added to differentiate the different types of sources that are being written. catalog : list A list of source objects. Sources must be of type :class:`AegeanTools.models.OutputSource`, :class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`. fmt : str The file format extension. prefix : str Prepend each column name with "prefix_". Default is to prepend nothing. meta : dict A dictionary to be used as metadata for some file types (fits, VOTable). Returns ------- None """ if meta is None: meta = {} if prefix is None: pre='' else: pre = prefix + '_' def writer(filename, catalog, fmt=None): """ construct a dict of the data this method preserves the data types in the VOTable """ tab_dict = {} name_list = [] for name in catalog[0].names: col_name = name if catalog[0].galactic: if name.startswith('ra'): col_name = 'lon'+name[2:] elif name.endswith('ra'): col_name = name[:-2] + 'lon' elif name.startswith('dec'): col_name = 'lat'+name[3:] elif name.endswith('dec'): col_name = name[:-3] + 'lat' col_name = pre + col_name tab_dict[col_name] = [getattr(c, name, None) for c in catalog] name_list.append(col_name) t = Table(tab_dict, meta=meta) # re-order the columns t = t[[n for n in name_list]] if fmt is not None: if fmt in ["vot", "vo", "xml"]: vot = from_table(t) # description of this votable vot.description = repr(meta) writetoVO(vot, filename) elif fmt in ['hdf5']: t.write(filename, path='data', overwrite=True) elif fmt in ['fits']: writeFITSTable(filename, t) else: ascii.write(t, filename, fmt, overwrite=True) else: ascii.write(t, filename, overwrite=True) return # sort the sources into types and then write them out individually components, islands, simples = classify_catalog(catalog) if len(components) > 0: new_name = "{1}{0}{2}".format('_comp', *os.path.splitext(filename)) writer(new_name, components, fmt) log.info("wrote {0}".format(new_name)) if len(islands) > 0: new_name = "{1}{0}{2}".format('_isle', *os.path.splitext(filename)) writer(new_name, islands, fmt) log.info("wrote {0}".format(new_name)) if len(simples) > 0: new_name = "{1}{0}{2}".format('_simp', *os.path.splitext(filename)) writer(new_name, simples, fmt) log.info("wrote {0}".format(new_name)) return
def function[write_catalog, parameter[filename, catalog, fmt, meta, prefix]]: constant[ Write a catalog (list of sources) to a file with format determined by extension. Sources must be of type :class:`AegeanTools.models.OutputSource`, :class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`. Parameters ---------- filename : str Base name for file to write. `_simp`, `_comp`, or `_isle` will be added to differentiate the different types of sources that are being written. catalog : list A list of source objects. Sources must be of type :class:`AegeanTools.models.OutputSource`, :class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`. fmt : str The file format extension. prefix : str Prepend each column name with "prefix_". Default is to prepend nothing. meta : dict A dictionary to be used as metadata for some file types (fits, VOTable). Returns ------- None ] if compare[name[meta] is constant[None]] begin[:] variable[meta] assign[=] dictionary[[], []] if compare[name[prefix] is constant[None]] begin[:] variable[pre] assign[=] constant[] def function[writer, parameter[filename, catalog, fmt]]: constant[ construct a dict of the data this method preserves the data types in the VOTable ] variable[tab_dict] assign[=] dictionary[[], []] variable[name_list] assign[=] list[[]] for taget[name[name]] in starred[call[name[catalog]][constant[0]].names] begin[:] variable[col_name] assign[=] name[name] if call[name[catalog]][constant[0]].galactic begin[:] if call[name[name].startswith, parameter[constant[ra]]] begin[:] variable[col_name] assign[=] binary_operation[constant[lon] + call[name[name]][<ast.Slice object at 0x7da1b0f38160>]] variable[col_name] assign[=] binary_operation[name[pre] + name[col_name]] call[name[tab_dict]][name[col_name]] assign[=] <ast.ListComp object at 0x7da207f02920> call[name[name_list].append, parameter[name[col_name]]] variable[t] assign[=] call[name[Table], parameter[name[tab_dict]]] variable[t] assign[=] call[name[t]][<ast.ListComp object at 0x7da207f03070>] if compare[name[fmt] is_not constant[None]] begin[:] if compare[name[fmt] in list[[<ast.Constant object at 0x7da207f02ad0>, <ast.Constant object at 0x7da207f01d50>, <ast.Constant object at 0x7da207f00fa0>]]] begin[:] variable[vot] assign[=] call[name[from_table], parameter[name[t]]] name[vot].description assign[=] call[name[repr], parameter[name[meta]]] call[name[writetoVO], parameter[name[vot], name[filename]]] return[None] <ast.Tuple object at 0x7da207f030d0> assign[=] call[name[classify_catalog], parameter[name[catalog]]] if compare[call[name[len], parameter[name[components]]] greater[>] constant[0]] begin[:] variable[new_name] assign[=] call[constant[{1}{0}{2}].format, parameter[constant[_comp], <ast.Starred object at 0x7da207f03550>]] call[name[writer], parameter[name[new_name], name[components], name[fmt]]] call[name[log].info, parameter[call[constant[wrote {0}].format, parameter[name[new_name]]]]] if compare[call[name[len], parameter[name[islands]]] greater[>] constant[0]] begin[:] variable[new_name] assign[=] call[constant[{1}{0}{2}].format, parameter[constant[_isle], <ast.Starred object at 0x7da18eb54a30>]] call[name[writer], parameter[name[new_name], name[islands], name[fmt]]] call[name[log].info, parameter[call[constant[wrote {0}].format, parameter[name[new_name]]]]] if compare[call[name[len], parameter[name[simples]]] greater[>] constant[0]] begin[:] variable[new_name] assign[=] call[constant[{1}{0}{2}].format, parameter[constant[_simp], <ast.Starred object at 0x7da18eb56650>]] call[name[writer], parameter[name[new_name], name[simples], name[fmt]]] call[name[log].info, parameter[call[constant[wrote {0}].format, parameter[name[new_name]]]]] return[None]
keyword[def] identifier[write_catalog] ( identifier[filename] , identifier[catalog] , identifier[fmt] = keyword[None] , identifier[meta] = keyword[None] , identifier[prefix] = keyword[None] ): literal[string] keyword[if] identifier[meta] keyword[is] keyword[None] : identifier[meta] ={} keyword[if] identifier[prefix] keyword[is] keyword[None] : identifier[pre] = literal[string] keyword[else] : identifier[pre] = identifier[prefix] + literal[string] keyword[def] identifier[writer] ( identifier[filename] , identifier[catalog] , identifier[fmt] = keyword[None] ): literal[string] identifier[tab_dict] ={} identifier[name_list] =[] keyword[for] identifier[name] keyword[in] identifier[catalog] [ literal[int] ]. identifier[names] : identifier[col_name] = identifier[name] keyword[if] identifier[catalog] [ literal[int] ]. identifier[galactic] : keyword[if] identifier[name] . identifier[startswith] ( literal[string] ): identifier[col_name] = literal[string] + identifier[name] [ literal[int] :] keyword[elif] identifier[name] . identifier[endswith] ( literal[string] ): identifier[col_name] = identifier[name] [:- literal[int] ]+ literal[string] keyword[elif] identifier[name] . identifier[startswith] ( literal[string] ): identifier[col_name] = literal[string] + identifier[name] [ literal[int] :] keyword[elif] identifier[name] . identifier[endswith] ( literal[string] ): identifier[col_name] = identifier[name] [:- literal[int] ]+ literal[string] identifier[col_name] = identifier[pre] + identifier[col_name] identifier[tab_dict] [ identifier[col_name] ]=[ identifier[getattr] ( identifier[c] , identifier[name] , keyword[None] ) keyword[for] identifier[c] keyword[in] identifier[catalog] ] identifier[name_list] . identifier[append] ( identifier[col_name] ) identifier[t] = identifier[Table] ( identifier[tab_dict] , identifier[meta] = identifier[meta] ) identifier[t] = identifier[t] [[ identifier[n] keyword[for] identifier[n] keyword[in] identifier[name_list] ]] keyword[if] identifier[fmt] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[fmt] keyword[in] [ literal[string] , literal[string] , literal[string] ]: identifier[vot] = identifier[from_table] ( identifier[t] ) identifier[vot] . identifier[description] = identifier[repr] ( identifier[meta] ) identifier[writetoVO] ( identifier[vot] , identifier[filename] ) keyword[elif] identifier[fmt] keyword[in] [ literal[string] ]: identifier[t] . identifier[write] ( identifier[filename] , identifier[path] = literal[string] , identifier[overwrite] = keyword[True] ) keyword[elif] identifier[fmt] keyword[in] [ literal[string] ]: identifier[writeFITSTable] ( identifier[filename] , identifier[t] ) keyword[else] : identifier[ascii] . identifier[write] ( identifier[t] , identifier[filename] , identifier[fmt] , identifier[overwrite] = keyword[True] ) keyword[else] : identifier[ascii] . identifier[write] ( identifier[t] , identifier[filename] , identifier[overwrite] = keyword[True] ) keyword[return] identifier[components] , identifier[islands] , identifier[simples] = identifier[classify_catalog] ( identifier[catalog] ) keyword[if] identifier[len] ( identifier[components] )> literal[int] : identifier[new_name] = literal[string] . identifier[format] ( literal[string] ,* identifier[os] . identifier[path] . identifier[splitext] ( identifier[filename] )) identifier[writer] ( identifier[new_name] , identifier[components] , identifier[fmt] ) identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[new_name] )) keyword[if] identifier[len] ( identifier[islands] )> literal[int] : identifier[new_name] = literal[string] . identifier[format] ( literal[string] ,* identifier[os] . identifier[path] . identifier[splitext] ( identifier[filename] )) identifier[writer] ( identifier[new_name] , identifier[islands] , identifier[fmt] ) identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[new_name] )) keyword[if] identifier[len] ( identifier[simples] )> literal[int] : identifier[new_name] = literal[string] . identifier[format] ( literal[string] ,* identifier[os] . identifier[path] . identifier[splitext] ( identifier[filename] )) identifier[writer] ( identifier[new_name] , identifier[simples] , identifier[fmt] ) identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[new_name] )) keyword[return]
def write_catalog(filename, catalog, fmt=None, meta=None, prefix=None): """ Write a catalog (list of sources) to a file with format determined by extension. Sources must be of type :class:`AegeanTools.models.OutputSource`, :class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`. Parameters ---------- filename : str Base name for file to write. `_simp`, `_comp`, or `_isle` will be added to differentiate the different types of sources that are being written. catalog : list A list of source objects. Sources must be of type :class:`AegeanTools.models.OutputSource`, :class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`. fmt : str The file format extension. prefix : str Prepend each column name with "prefix_". Default is to prepend nothing. meta : dict A dictionary to be used as metadata for some file types (fits, VOTable). Returns ------- None """ if meta is None: meta = {} # depends on [control=['if'], data=['meta']] if prefix is None: pre = '' # depends on [control=['if'], data=[]] else: pre = prefix + '_' def writer(filename, catalog, fmt=None): """ construct a dict of the data this method preserves the data types in the VOTable """ tab_dict = {} name_list = [] for name in catalog[0].names: col_name = name if catalog[0].galactic: if name.startswith('ra'): col_name = 'lon' + name[2:] # depends on [control=['if'], data=[]] elif name.endswith('ra'): col_name = name[:-2] + 'lon' # depends on [control=['if'], data=[]] elif name.startswith('dec'): col_name = 'lat' + name[3:] # depends on [control=['if'], data=[]] elif name.endswith('dec'): col_name = name[:-3] + 'lat' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] col_name = pre + col_name tab_dict[col_name] = [getattr(c, name, None) for c in catalog] name_list.append(col_name) # depends on [control=['for'], data=['name']] t = Table(tab_dict, meta=meta) # re-order the columns t = t[[n for n in name_list]] if fmt is not None: if fmt in ['vot', 'vo', 'xml']: vot = from_table(t) # description of this votable vot.description = repr(meta) writetoVO(vot, filename) # depends on [control=['if'], data=[]] elif fmt in ['hdf5']: t.write(filename, path='data', overwrite=True) # depends on [control=['if'], data=[]] elif fmt in ['fits']: writeFITSTable(filename, t) # depends on [control=['if'], data=[]] else: ascii.write(t, filename, fmt, overwrite=True) # depends on [control=['if'], data=['fmt']] else: ascii.write(t, filename, overwrite=True) return # sort the sources into types and then write them out individually (components, islands, simples) = classify_catalog(catalog) if len(components) > 0: new_name = '{1}{0}{2}'.format('_comp', *os.path.splitext(filename)) writer(new_name, components, fmt) log.info('wrote {0}'.format(new_name)) # depends on [control=['if'], data=[]] if len(islands) > 0: new_name = '{1}{0}{2}'.format('_isle', *os.path.splitext(filename)) writer(new_name, islands, fmt) log.info('wrote {0}'.format(new_name)) # depends on [control=['if'], data=[]] if len(simples) > 0: new_name = '{1}{0}{2}'.format('_simp', *os.path.splitext(filename)) writer(new_name, simples, fmt) log.info('wrote {0}'.format(new_name)) # depends on [control=['if'], data=[]] return
def load_xml(self, xmlfile, **kwargs): """Load sources from an XML file.""" extdir = kwargs.get('extdir', self.extdir) coordsys = kwargs.get('coordsys', 'CEL') if not os.path.isfile(xmlfile): xmlfile = os.path.join(fermipy.PACKAGE_DATA, 'catalogs', xmlfile) root = ElementTree.ElementTree(file=xmlfile).getroot() diffuse_srcs = [] srcs = [] ra, dec = [], [] for s in root.findall('source'): src = Source.create_from_xml(s, extdir=extdir) if src.diffuse: diffuse_srcs += [src] else: srcs += [src] ra += [src['RAJ2000']] dec += [src['DEJ2000']] src_skydir = SkyCoord(ra=np.array(ra) * u.deg, dec=np.array(dec) * u.deg) radec = np.vstack((src_skydir.ra.deg, src_skydir.dec.deg)).T glonlat = np.vstack((src_skydir.galactic.l.deg, src_skydir.galactic.b.deg)).T offset = self.skydir.separation(src_skydir).deg offset_cel = wcs_utils.sky_to_offset(self.skydir, radec[:, 0], radec[:, 1], 'CEL') offset_gal = wcs_utils.sky_to_offset(self.skydir, glonlat[:, 0], glonlat[:, 1], 'GAL') m0 = get_skydir_distance_mask(src_skydir, self.skydir, self.config['src_radius']) m1 = get_skydir_distance_mask(src_skydir, self.skydir, self.config['src_radius_roi'], square=True, coordsys=coordsys) m = (m0 & m1) srcs = np.array(srcs)[m] for i, s in enumerate(srcs): s.data['offset'] = offset[m][i] s.data['offset_ra'] = offset_cel[:, 0][m][i] s.data['offset_dec'] = offset_cel[:, 1][m][i] s.data['offset_glon'] = offset_gal[:, 0][m][i] s.data['offset_glat'] = offset_gal[:, 1][m][i] self.load_source(s, False, merge_sources=self.config['merge_sources']) for i, s in enumerate(diffuse_srcs): self.load_source(s, False, merge_sources=self.config['merge_sources']) self._build_src_index() return srcs
def function[load_xml, parameter[self, xmlfile]]: constant[Load sources from an XML file.] variable[extdir] assign[=] call[name[kwargs].get, parameter[constant[extdir], name[self].extdir]] variable[coordsys] assign[=] call[name[kwargs].get, parameter[constant[coordsys], constant[CEL]]] if <ast.UnaryOp object at 0x7da1b23475e0> begin[:] variable[xmlfile] assign[=] call[name[os].path.join, parameter[name[fermipy].PACKAGE_DATA, constant[catalogs], name[xmlfile]]] variable[root] assign[=] call[call[name[ElementTree].ElementTree, parameter[]].getroot, parameter[]] variable[diffuse_srcs] assign[=] list[[]] variable[srcs] assign[=] list[[]] <ast.Tuple object at 0x7da1b2344460> assign[=] tuple[[<ast.List object at 0x7da1b2345c60>, <ast.List object at 0x7da1b2345810>]] for taget[name[s]] in starred[call[name[root].findall, parameter[constant[source]]]] begin[:] variable[src] assign[=] call[name[Source].create_from_xml, parameter[name[s]]] if name[src].diffuse begin[:] <ast.AugAssign object at 0x7da1b2344d90> variable[src_skydir] assign[=] call[name[SkyCoord], parameter[]] variable[radec] assign[=] call[name[np].vstack, parameter[tuple[[<ast.Attribute object at 0x7da1b23440d0>, <ast.Attribute object at 0x7da1b2345300>]]]].T variable[glonlat] assign[=] call[name[np].vstack, parameter[tuple[[<ast.Attribute object at 0x7da1b2345030>, <ast.Attribute object at 0x7da1b2346530>]]]].T variable[offset] assign[=] call[name[self].skydir.separation, parameter[name[src_skydir]]].deg variable[offset_cel] assign[=] call[name[wcs_utils].sky_to_offset, parameter[name[self].skydir, call[name[radec]][tuple[[<ast.Slice object at 0x7da1b2347010>, <ast.Constant object at 0x7da1b2346e30>]]], call[name[radec]][tuple[[<ast.Slice object at 0x7da1b2344340>, <ast.Constant object at 0x7da1b2344f40>]]], constant[CEL]]] variable[offset_gal] assign[=] call[name[wcs_utils].sky_to_offset, parameter[name[self].skydir, call[name[glonlat]][tuple[[<ast.Slice object at 0x7da1b2345120>, <ast.Constant object at 0x7da1b2345930>]]], call[name[glonlat]][tuple[[<ast.Slice object at 0x7da1b2344ee0>, <ast.Constant object at 0x7da1b2347eb0>]]], constant[GAL]]] variable[m0] assign[=] call[name[get_skydir_distance_mask], parameter[name[src_skydir], name[self].skydir, call[name[self].config][constant[src_radius]]]] variable[m1] assign[=] call[name[get_skydir_distance_mask], parameter[name[src_skydir], name[self].skydir, call[name[self].config][constant[src_radius_roi]]]] variable[m] assign[=] binary_operation[name[m0] <ast.BitAnd object at 0x7da2590d6b60> name[m1]] variable[srcs] assign[=] call[call[name[np].array, parameter[name[srcs]]]][name[m]] for taget[tuple[[<ast.Name object at 0x7da1b2347130>, <ast.Name object at 0x7da1b2345210>]]] in starred[call[name[enumerate], parameter[name[srcs]]]] begin[:] call[name[s].data][constant[offset]] assign[=] call[call[name[offset]][name[m]]][name[i]] call[name[s].data][constant[offset_ra]] assign[=] call[call[call[name[offset_cel]][tuple[[<ast.Slice object at 0x7da1b2344e50>, <ast.Constant object at 0x7da1b2345180>]]]][name[m]]][name[i]] call[name[s].data][constant[offset_dec]] assign[=] call[call[call[name[offset_cel]][tuple[[<ast.Slice object at 0x7da1b23444f0>, <ast.Constant object at 0x7da1b23467a0>]]]][name[m]]][name[i]] call[name[s].data][constant[offset_glon]] assign[=] call[call[call[name[offset_gal]][tuple[[<ast.Slice object at 0x7da1b2346320>, <ast.Constant object at 0x7da1b2345e10>]]]][name[m]]][name[i]] call[name[s].data][constant[offset_glat]] assign[=] call[call[call[name[offset_gal]][tuple[[<ast.Slice object at 0x7da1b2345420>, <ast.Constant object at 0x7da1b2346ce0>]]]][name[m]]][name[i]] call[name[self].load_source, parameter[name[s], constant[False]]] for taget[tuple[[<ast.Name object at 0x7da1b2346ec0>, <ast.Name object at 0x7da1b2347190>]]] in starred[call[name[enumerate], parameter[name[diffuse_srcs]]]] begin[:] call[name[self].load_source, parameter[name[s], constant[False]]] call[name[self]._build_src_index, parameter[]] return[name[srcs]]
keyword[def] identifier[load_xml] ( identifier[self] , identifier[xmlfile] ,** identifier[kwargs] ): literal[string] identifier[extdir] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[self] . identifier[extdir] ) identifier[coordsys] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[xmlfile] ): identifier[xmlfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[fermipy] . identifier[PACKAGE_DATA] , literal[string] , identifier[xmlfile] ) identifier[root] = identifier[ElementTree] . identifier[ElementTree] ( identifier[file] = identifier[xmlfile] ). identifier[getroot] () identifier[diffuse_srcs] =[] identifier[srcs] =[] identifier[ra] , identifier[dec] =[],[] keyword[for] identifier[s] keyword[in] identifier[root] . identifier[findall] ( literal[string] ): identifier[src] = identifier[Source] . identifier[create_from_xml] ( identifier[s] , identifier[extdir] = identifier[extdir] ) keyword[if] identifier[src] . identifier[diffuse] : identifier[diffuse_srcs] +=[ identifier[src] ] keyword[else] : identifier[srcs] +=[ identifier[src] ] identifier[ra] +=[ identifier[src] [ literal[string] ]] identifier[dec] +=[ identifier[src] [ literal[string] ]] identifier[src_skydir] = identifier[SkyCoord] ( identifier[ra] = identifier[np] . identifier[array] ( identifier[ra] )* identifier[u] . identifier[deg] , identifier[dec] = identifier[np] . identifier[array] ( identifier[dec] )* identifier[u] . identifier[deg] ) identifier[radec] = identifier[np] . identifier[vstack] (( identifier[src_skydir] . identifier[ra] . identifier[deg] , identifier[src_skydir] . identifier[dec] . identifier[deg] )). identifier[T] identifier[glonlat] = identifier[np] . identifier[vstack] (( identifier[src_skydir] . identifier[galactic] . identifier[l] . identifier[deg] , identifier[src_skydir] . identifier[galactic] . identifier[b] . identifier[deg] )). identifier[T] identifier[offset] = identifier[self] . identifier[skydir] . identifier[separation] ( identifier[src_skydir] ). identifier[deg] identifier[offset_cel] = identifier[wcs_utils] . identifier[sky_to_offset] ( identifier[self] . identifier[skydir] , identifier[radec] [:, literal[int] ], identifier[radec] [:, literal[int] ], literal[string] ) identifier[offset_gal] = identifier[wcs_utils] . identifier[sky_to_offset] ( identifier[self] . identifier[skydir] , identifier[glonlat] [:, literal[int] ], identifier[glonlat] [:, literal[int] ], literal[string] ) identifier[m0] = identifier[get_skydir_distance_mask] ( identifier[src_skydir] , identifier[self] . identifier[skydir] , identifier[self] . identifier[config] [ literal[string] ]) identifier[m1] = identifier[get_skydir_distance_mask] ( identifier[src_skydir] , identifier[self] . identifier[skydir] , identifier[self] . identifier[config] [ literal[string] ], identifier[square] = keyword[True] , identifier[coordsys] = identifier[coordsys] ) identifier[m] =( identifier[m0] & identifier[m1] ) identifier[srcs] = identifier[np] . identifier[array] ( identifier[srcs] )[ identifier[m] ] keyword[for] identifier[i] , identifier[s] keyword[in] identifier[enumerate] ( identifier[srcs] ): identifier[s] . identifier[data] [ literal[string] ]= identifier[offset] [ identifier[m] ][ identifier[i] ] identifier[s] . identifier[data] [ literal[string] ]= identifier[offset_cel] [:, literal[int] ][ identifier[m] ][ identifier[i] ] identifier[s] . identifier[data] [ literal[string] ]= identifier[offset_cel] [:, literal[int] ][ identifier[m] ][ identifier[i] ] identifier[s] . identifier[data] [ literal[string] ]= identifier[offset_gal] [:, literal[int] ][ identifier[m] ][ identifier[i] ] identifier[s] . identifier[data] [ literal[string] ]= identifier[offset_gal] [:, literal[int] ][ identifier[m] ][ identifier[i] ] identifier[self] . identifier[load_source] ( identifier[s] , keyword[False] , identifier[merge_sources] = identifier[self] . identifier[config] [ literal[string] ]) keyword[for] identifier[i] , identifier[s] keyword[in] identifier[enumerate] ( identifier[diffuse_srcs] ): identifier[self] . identifier[load_source] ( identifier[s] , keyword[False] , identifier[merge_sources] = identifier[self] . identifier[config] [ literal[string] ]) identifier[self] . identifier[_build_src_index] () keyword[return] identifier[srcs]
def load_xml(self, xmlfile, **kwargs): """Load sources from an XML file.""" extdir = kwargs.get('extdir', self.extdir) coordsys = kwargs.get('coordsys', 'CEL') if not os.path.isfile(xmlfile): xmlfile = os.path.join(fermipy.PACKAGE_DATA, 'catalogs', xmlfile) # depends on [control=['if'], data=[]] root = ElementTree.ElementTree(file=xmlfile).getroot() diffuse_srcs = [] srcs = [] (ra, dec) = ([], []) for s in root.findall('source'): src = Source.create_from_xml(s, extdir=extdir) if src.diffuse: diffuse_srcs += [src] # depends on [control=['if'], data=[]] else: srcs += [src] ra += [src['RAJ2000']] dec += [src['DEJ2000']] # depends on [control=['for'], data=['s']] src_skydir = SkyCoord(ra=np.array(ra) * u.deg, dec=np.array(dec) * u.deg) radec = np.vstack((src_skydir.ra.deg, src_skydir.dec.deg)).T glonlat = np.vstack((src_skydir.galactic.l.deg, src_skydir.galactic.b.deg)).T offset = self.skydir.separation(src_skydir).deg offset_cel = wcs_utils.sky_to_offset(self.skydir, radec[:, 0], radec[:, 1], 'CEL') offset_gal = wcs_utils.sky_to_offset(self.skydir, glonlat[:, 0], glonlat[:, 1], 'GAL') m0 = get_skydir_distance_mask(src_skydir, self.skydir, self.config['src_radius']) m1 = get_skydir_distance_mask(src_skydir, self.skydir, self.config['src_radius_roi'], square=True, coordsys=coordsys) m = m0 & m1 srcs = np.array(srcs)[m] for (i, s) in enumerate(srcs): s.data['offset'] = offset[m][i] s.data['offset_ra'] = offset_cel[:, 0][m][i] s.data['offset_dec'] = offset_cel[:, 1][m][i] s.data['offset_glon'] = offset_gal[:, 0][m][i] s.data['offset_glat'] = offset_gal[:, 1][m][i] self.load_source(s, False, merge_sources=self.config['merge_sources']) # depends on [control=['for'], data=[]] for (i, s) in enumerate(diffuse_srcs): self.load_source(s, False, merge_sources=self.config['merge_sources']) # depends on [control=['for'], data=[]] self._build_src_index() return srcs
def dual_obj_grad(alpha, beta, a, b, C, regul): """ Compute objective value and gradients of dual objective. Parameters ---------- alpha: array, shape = len(a) beta: array, shape = len(b) Current iterate of dual potentials. a: array, shape = len(a) b: array, shape = len(b) Input histograms (should be non-negative and sum to 1). C: array, shape = len(a) x len(b) Ground cost matrix. regul: Regularization object Should implement a delta_Omega(X) method. Returns ------- obj: float Objective value (higher is better). grad_alpha: array, shape = len(a) Gradient w.r.t. alpha. grad_beta: array, shape = len(b) Gradient w.r.t. beta. """ obj = np.dot(alpha, a) + np.dot(beta, b) grad_alpha = a.copy() grad_beta = b.copy() # X[:, j] = alpha + beta[j] - C[:, j] X = alpha[:, np.newaxis] + beta - C # val.shape = len(b) # G.shape = len(a) x len(b) val, G = regul.delta_Omega(X) obj -= np.sum(val) grad_alpha -= G.sum(axis=1) grad_beta -= G.sum(axis=0) return obj, grad_alpha, grad_beta
def function[dual_obj_grad, parameter[alpha, beta, a, b, C, regul]]: constant[ Compute objective value and gradients of dual objective. Parameters ---------- alpha: array, shape = len(a) beta: array, shape = len(b) Current iterate of dual potentials. a: array, shape = len(a) b: array, shape = len(b) Input histograms (should be non-negative and sum to 1). C: array, shape = len(a) x len(b) Ground cost matrix. regul: Regularization object Should implement a delta_Omega(X) method. Returns ------- obj: float Objective value (higher is better). grad_alpha: array, shape = len(a) Gradient w.r.t. alpha. grad_beta: array, shape = len(b) Gradient w.r.t. beta. ] variable[obj] assign[=] binary_operation[call[name[np].dot, parameter[name[alpha], name[a]]] + call[name[np].dot, parameter[name[beta], name[b]]]] variable[grad_alpha] assign[=] call[name[a].copy, parameter[]] variable[grad_beta] assign[=] call[name[b].copy, parameter[]] variable[X] assign[=] binary_operation[binary_operation[call[name[alpha]][tuple[[<ast.Slice object at 0x7da1b1639de0>, <ast.Attribute object at 0x7da1b1639e10>]]] + name[beta]] - name[C]] <ast.Tuple object at 0x7da1b1638070> assign[=] call[name[regul].delta_Omega, parameter[name[X]]] <ast.AugAssign object at 0x7da1b1638430> <ast.AugAssign object at 0x7da1b163a1a0> <ast.AugAssign object at 0x7da1b163a170> return[tuple[[<ast.Name object at 0x7da1b1638580>, <ast.Name object at 0x7da1b163bfa0>, <ast.Name object at 0x7da1b163bb80>]]]
keyword[def] identifier[dual_obj_grad] ( identifier[alpha] , identifier[beta] , identifier[a] , identifier[b] , identifier[C] , identifier[regul] ): literal[string] identifier[obj] = identifier[np] . identifier[dot] ( identifier[alpha] , identifier[a] )+ identifier[np] . identifier[dot] ( identifier[beta] , identifier[b] ) identifier[grad_alpha] = identifier[a] . identifier[copy] () identifier[grad_beta] = identifier[b] . identifier[copy] () identifier[X] = identifier[alpha] [:, identifier[np] . identifier[newaxis] ]+ identifier[beta] - identifier[C] identifier[val] , identifier[G] = identifier[regul] . identifier[delta_Omega] ( identifier[X] ) identifier[obj] -= identifier[np] . identifier[sum] ( identifier[val] ) identifier[grad_alpha] -= identifier[G] . identifier[sum] ( identifier[axis] = literal[int] ) identifier[grad_beta] -= identifier[G] . identifier[sum] ( identifier[axis] = literal[int] ) keyword[return] identifier[obj] , identifier[grad_alpha] , identifier[grad_beta]
def dual_obj_grad(alpha, beta, a, b, C, regul): """ Compute objective value and gradients of dual objective. Parameters ---------- alpha: array, shape = len(a) beta: array, shape = len(b) Current iterate of dual potentials. a: array, shape = len(a) b: array, shape = len(b) Input histograms (should be non-negative and sum to 1). C: array, shape = len(a) x len(b) Ground cost matrix. regul: Regularization object Should implement a delta_Omega(X) method. Returns ------- obj: float Objective value (higher is better). grad_alpha: array, shape = len(a) Gradient w.r.t. alpha. grad_beta: array, shape = len(b) Gradient w.r.t. beta. """ obj = np.dot(alpha, a) + np.dot(beta, b) grad_alpha = a.copy() grad_beta = b.copy() # X[:, j] = alpha + beta[j] - C[:, j] X = alpha[:, np.newaxis] + beta - C # val.shape = len(b) # G.shape = len(a) x len(b) (val, G) = regul.delta_Omega(X) obj -= np.sum(val) grad_alpha -= G.sum(axis=1) grad_beta -= G.sum(axis=0) return (obj, grad_alpha, grad_beta)
def service_restart(name): ''' Restart a "service" on the ssh server .. versionadded:: 2015.8.2 ''' cmd = 'restart ' + name # Send the command to execute out, err = DETAILS['server'].sendline(cmd) # "scrape" the output and return the right fields as a dict return parse(out)
def function[service_restart, parameter[name]]: constant[ Restart a "service" on the ssh server .. versionadded:: 2015.8.2 ] variable[cmd] assign[=] binary_operation[constant[restart ] + name[name]] <ast.Tuple object at 0x7da20c6c74c0> assign[=] call[call[name[DETAILS]][constant[server]].sendline, parameter[name[cmd]]] return[call[name[parse], parameter[name[out]]]]
keyword[def] identifier[service_restart] ( identifier[name] ): literal[string] identifier[cmd] = literal[string] + identifier[name] identifier[out] , identifier[err] = identifier[DETAILS] [ literal[string] ]. identifier[sendline] ( identifier[cmd] ) keyword[return] identifier[parse] ( identifier[out] )
def service_restart(name): """ Restart a "service" on the ssh server .. versionadded:: 2015.8.2 """ cmd = 'restart ' + name # Send the command to execute (out, err) = DETAILS['server'].sendline(cmd) # "scrape" the output and return the right fields as a dict return parse(out)
def get_time_remaining_estimate(self): """ In Mac OS X 10.7+ Uses IOPSGetTimeRemainingEstimate to get time remaining estimate. In Mac OS X 10.6 IOPSGetTimeRemainingEstimate is not available. If providing power source type is AC, returns TIME_REMAINING_UNLIMITED. Otherwise looks through all power sources returned by IOPSGetProvidingPowerSourceType and returns total estimate. """ if IOPSGetTimeRemainingEstimate is not None: # Mac OS X 10.7+ estimate = float(IOPSGetTimeRemainingEstimate()) if estimate == -1.0: return common.TIME_REMAINING_UNKNOWN elif estimate == -2.0: return common.TIME_REMAINING_UNLIMITED else: return estimate / 60.0 else: # Mac OS X 10.6 warnings.warn("IOPSGetTimeRemainingEstimate is not preset", RuntimeWarning) blob = IOPSCopyPowerSourcesInfo() type = IOPSGetProvidingPowerSourceType(blob) if type == common.POWER_TYPE_AC: return common.TIME_REMAINING_UNLIMITED else: estimate = 0.0 for source in IOPSCopyPowerSourcesList(blob): description = IOPSGetPowerSourceDescription(blob, source) if kIOPSIsPresentKey in description and description[kIOPSIsPresentKey] and kIOPSTimeToEmptyKey in description and description[kIOPSTimeToEmptyKey] > 0.0: estimate += float(description[kIOPSTimeToEmptyKey]) if estimate > 0.0: return float(estimate) else: return common.TIME_REMAINING_UNKNOWN
def function[get_time_remaining_estimate, parameter[self]]: constant[ In Mac OS X 10.7+ Uses IOPSGetTimeRemainingEstimate to get time remaining estimate. In Mac OS X 10.6 IOPSGetTimeRemainingEstimate is not available. If providing power source type is AC, returns TIME_REMAINING_UNLIMITED. Otherwise looks through all power sources returned by IOPSGetProvidingPowerSourceType and returns total estimate. ] if compare[name[IOPSGetTimeRemainingEstimate] is_not constant[None]] begin[:] variable[estimate] assign[=] call[name[float], parameter[call[name[IOPSGetTimeRemainingEstimate], parameter[]]]] if compare[name[estimate] equal[==] <ast.UnaryOp object at 0x7da1b1040100>] begin[:] return[name[common].TIME_REMAINING_UNKNOWN]
keyword[def] identifier[get_time_remaining_estimate] ( identifier[self] ): literal[string] keyword[if] identifier[IOPSGetTimeRemainingEstimate] keyword[is] keyword[not] keyword[None] : identifier[estimate] = identifier[float] ( identifier[IOPSGetTimeRemainingEstimate] ()) keyword[if] identifier[estimate] ==- literal[int] : keyword[return] identifier[common] . identifier[TIME_REMAINING_UNKNOWN] keyword[elif] identifier[estimate] ==- literal[int] : keyword[return] identifier[common] . identifier[TIME_REMAINING_UNLIMITED] keyword[else] : keyword[return] identifier[estimate] / literal[int] keyword[else] : identifier[warnings] . identifier[warn] ( literal[string] , identifier[RuntimeWarning] ) identifier[blob] = identifier[IOPSCopyPowerSourcesInfo] () identifier[type] = identifier[IOPSGetProvidingPowerSourceType] ( identifier[blob] ) keyword[if] identifier[type] == identifier[common] . identifier[POWER_TYPE_AC] : keyword[return] identifier[common] . identifier[TIME_REMAINING_UNLIMITED] keyword[else] : identifier[estimate] = literal[int] keyword[for] identifier[source] keyword[in] identifier[IOPSCopyPowerSourcesList] ( identifier[blob] ): identifier[description] = identifier[IOPSGetPowerSourceDescription] ( identifier[blob] , identifier[source] ) keyword[if] identifier[kIOPSIsPresentKey] keyword[in] identifier[description] keyword[and] identifier[description] [ identifier[kIOPSIsPresentKey] ] keyword[and] identifier[kIOPSTimeToEmptyKey] keyword[in] identifier[description] keyword[and] identifier[description] [ identifier[kIOPSTimeToEmptyKey] ]> literal[int] : identifier[estimate] += identifier[float] ( identifier[description] [ identifier[kIOPSTimeToEmptyKey] ]) keyword[if] identifier[estimate] > literal[int] : keyword[return] identifier[float] ( identifier[estimate] ) keyword[else] : keyword[return] identifier[common] . identifier[TIME_REMAINING_UNKNOWN]
def get_time_remaining_estimate(self): """ In Mac OS X 10.7+ Uses IOPSGetTimeRemainingEstimate to get time remaining estimate. In Mac OS X 10.6 IOPSGetTimeRemainingEstimate is not available. If providing power source type is AC, returns TIME_REMAINING_UNLIMITED. Otherwise looks through all power sources returned by IOPSGetProvidingPowerSourceType and returns total estimate. """ if IOPSGetTimeRemainingEstimate is not None: # Mac OS X 10.7+ estimate = float(IOPSGetTimeRemainingEstimate()) if estimate == -1.0: return common.TIME_REMAINING_UNKNOWN # depends on [control=['if'], data=[]] elif estimate == -2.0: return common.TIME_REMAINING_UNLIMITED # depends on [control=['if'], data=[]] else: return estimate / 60.0 # depends on [control=['if'], data=['IOPSGetTimeRemainingEstimate']] else: # Mac OS X 10.6 warnings.warn('IOPSGetTimeRemainingEstimate is not preset', RuntimeWarning) blob = IOPSCopyPowerSourcesInfo() type = IOPSGetProvidingPowerSourceType(blob) if type == common.POWER_TYPE_AC: return common.TIME_REMAINING_UNLIMITED # depends on [control=['if'], data=[]] else: estimate = 0.0 for source in IOPSCopyPowerSourcesList(blob): description = IOPSGetPowerSourceDescription(blob, source) if kIOPSIsPresentKey in description and description[kIOPSIsPresentKey] and (kIOPSTimeToEmptyKey in description) and (description[kIOPSTimeToEmptyKey] > 0.0): estimate += float(description[kIOPSTimeToEmptyKey]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['source']] if estimate > 0.0: return float(estimate) # depends on [control=['if'], data=['estimate']] else: return common.TIME_REMAINING_UNKNOWN
def tweak_message(message): """We piggyback on jinja2's babel_extract() (really, Babel's extract_* functions) but they don't support some things we need so this function will tweak the message. Specifically: 1) We strip whitespace from the msgid. Jinja2 will only strip whitespace from the ends of a string so linebreaks show up in your .po files still. 2) Babel doesn't support context (msgctxt). We hack that in ourselves here. """ if isinstance(message, basestring): message = strip_whitespace(message) elif isinstance(message, tuple): # A tuple of 2 has context, 3 is plural, 4 is plural with context if len(message) == 2: message = add_context(message[1], message[0]) elif len(message) == 3: if all(isinstance(x, basestring) for x in message[:2]): singular, plural, num = message message = (strip_whitespace(singular), strip_whitespace(plural), num) elif len(message) == 4: singular, plural, num, ctxt = message message = (add_context(ctxt, strip_whitespace(singular)), add_context(ctxt, strip_whitespace(plural)), num) return message
def function[tweak_message, parameter[message]]: constant[We piggyback on jinja2's babel_extract() (really, Babel's extract_* functions) but they don't support some things we need so this function will tweak the message. Specifically: 1) We strip whitespace from the msgid. Jinja2 will only strip whitespace from the ends of a string so linebreaks show up in your .po files still. 2) Babel doesn't support context (msgctxt). We hack that in ourselves here. ] if call[name[isinstance], parameter[name[message], name[basestring]]] begin[:] variable[message] assign[=] call[name[strip_whitespace], parameter[name[message]]] return[name[message]]
keyword[def] identifier[tweak_message] ( identifier[message] ): literal[string] keyword[if] identifier[isinstance] ( identifier[message] , identifier[basestring] ): identifier[message] = identifier[strip_whitespace] ( identifier[message] ) keyword[elif] identifier[isinstance] ( identifier[message] , identifier[tuple] ): keyword[if] identifier[len] ( identifier[message] )== literal[int] : identifier[message] = identifier[add_context] ( identifier[message] [ literal[int] ], identifier[message] [ literal[int] ]) keyword[elif] identifier[len] ( identifier[message] )== literal[int] : keyword[if] identifier[all] ( identifier[isinstance] ( identifier[x] , identifier[basestring] ) keyword[for] identifier[x] keyword[in] identifier[message] [: literal[int] ]): identifier[singular] , identifier[plural] , identifier[num] = identifier[message] identifier[message] =( identifier[strip_whitespace] ( identifier[singular] ), identifier[strip_whitespace] ( identifier[plural] ), identifier[num] ) keyword[elif] identifier[len] ( identifier[message] )== literal[int] : identifier[singular] , identifier[plural] , identifier[num] , identifier[ctxt] = identifier[message] identifier[message] =( identifier[add_context] ( identifier[ctxt] , identifier[strip_whitespace] ( identifier[singular] )), identifier[add_context] ( identifier[ctxt] , identifier[strip_whitespace] ( identifier[plural] )), identifier[num] ) keyword[return] identifier[message]
def tweak_message(message): """We piggyback on jinja2's babel_extract() (really, Babel's extract_* functions) but they don't support some things we need so this function will tweak the message. Specifically: 1) We strip whitespace from the msgid. Jinja2 will only strip whitespace from the ends of a string so linebreaks show up in your .po files still. 2) Babel doesn't support context (msgctxt). We hack that in ourselves here. """ if isinstance(message, basestring): message = strip_whitespace(message) # depends on [control=['if'], data=[]] elif isinstance(message, tuple): # A tuple of 2 has context, 3 is plural, 4 is plural with context if len(message) == 2: message = add_context(message[1], message[0]) # depends on [control=['if'], data=[]] elif len(message) == 3: if all((isinstance(x, basestring) for x in message[:2])): (singular, plural, num) = message message = (strip_whitespace(singular), strip_whitespace(plural), num) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif len(message) == 4: (singular, plural, num, ctxt) = message message = (add_context(ctxt, strip_whitespace(singular)), add_context(ctxt, strip_whitespace(plural)), num) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return message
def download_data(job, master_ip, inputs, known_snps, bam, hdfs_snps, hdfs_bam): """ Downloads input data files from S3. :type masterIP: MasterAddress """ log.info("Downloading known sites file %s to %s.", known_snps, hdfs_snps) call_conductor(job, master_ip, known_snps, hdfs_snps, memory=inputs.memory) log.info("Downloading input BAM %s to %s.", bam, hdfs_bam) call_conductor(job, master_ip, bam, hdfs_bam, memory=inputs.memory)
def function[download_data, parameter[job, master_ip, inputs, known_snps, bam, hdfs_snps, hdfs_bam]]: constant[ Downloads input data files from S3. :type masterIP: MasterAddress ] call[name[log].info, parameter[constant[Downloading known sites file %s to %s.], name[known_snps], name[hdfs_snps]]] call[name[call_conductor], parameter[name[job], name[master_ip], name[known_snps], name[hdfs_snps]]] call[name[log].info, parameter[constant[Downloading input BAM %s to %s.], name[bam], name[hdfs_bam]]] call[name[call_conductor], parameter[name[job], name[master_ip], name[bam], name[hdfs_bam]]]
keyword[def] identifier[download_data] ( identifier[job] , identifier[master_ip] , identifier[inputs] , identifier[known_snps] , identifier[bam] , identifier[hdfs_snps] , identifier[hdfs_bam] ): literal[string] identifier[log] . identifier[info] ( literal[string] , identifier[known_snps] , identifier[hdfs_snps] ) identifier[call_conductor] ( identifier[job] , identifier[master_ip] , identifier[known_snps] , identifier[hdfs_snps] , identifier[memory] = identifier[inputs] . identifier[memory] ) identifier[log] . identifier[info] ( literal[string] , identifier[bam] , identifier[hdfs_bam] ) identifier[call_conductor] ( identifier[job] , identifier[master_ip] , identifier[bam] , identifier[hdfs_bam] , identifier[memory] = identifier[inputs] . identifier[memory] )
def download_data(job, master_ip, inputs, known_snps, bam, hdfs_snps, hdfs_bam): """ Downloads input data files from S3. :type masterIP: MasterAddress """ log.info('Downloading known sites file %s to %s.', known_snps, hdfs_snps) call_conductor(job, master_ip, known_snps, hdfs_snps, memory=inputs.memory) log.info('Downloading input BAM %s to %s.', bam, hdfs_bam) call_conductor(job, master_ip, bam, hdfs_bam, memory=inputs.memory)
def perform_oauth(email, master_token, android_id, service, app, client_sig, device_country='us', operatorCountry='us', lang='en', sdk_version=17): """ Use a master token from master_login to perform OAuth to a specific Google service. Return a dict, eg:: { 'Auth': '...', 'LSID': '...', 'SID': '..', 'issueAdvice': 'auto', 'services': 'hist,mail,googleme,...' } To authenticate requests to this service, include a header ``Authorization: GoogleLogin auth=res['Auth']``. """ data = { 'accountType': 'HOSTED_OR_GOOGLE', 'Email': email, 'has_permission': 1, 'EncryptedPasswd': master_token, 'service': service, 'source': 'android', 'androidId': android_id, 'app': app, 'client_sig': client_sig, 'device_country': device_country, 'operatorCountry': device_country, 'lang': lang, 'sdk_version': sdk_version } return _perform_auth_request(data)
def function[perform_oauth, parameter[email, master_token, android_id, service, app, client_sig, device_country, operatorCountry, lang, sdk_version]]: constant[ Use a master token from master_login to perform OAuth to a specific Google service. Return a dict, eg:: { 'Auth': '...', 'LSID': '...', 'SID': '..', 'issueAdvice': 'auto', 'services': 'hist,mail,googleme,...' } To authenticate requests to this service, include a header ``Authorization: GoogleLogin auth=res['Auth']``. ] variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da204565960>, <ast.Constant object at 0x7da204566bc0>, <ast.Constant object at 0x7da204566ec0>, <ast.Constant object at 0x7da204565390>, <ast.Constant object at 0x7da204564eb0>, <ast.Constant object at 0x7da204567280>, <ast.Constant object at 0x7da204567ee0>, <ast.Constant object at 0x7da204567610>, <ast.Constant object at 0x7da204566c20>, <ast.Constant object at 0x7da204567a60>, <ast.Constant object at 0x7da2045663e0>, <ast.Constant object at 0x7da2045668f0>, <ast.Constant object at 0x7da204565060>], [<ast.Constant object at 0x7da204564ac0>, <ast.Name object at 0x7da2045660e0>, <ast.Constant object at 0x7da204566230>, <ast.Name object at 0x7da204567100>, <ast.Name object at 0x7da204567760>, <ast.Constant object at 0x7da204564520>, <ast.Name object at 0x7da204565540>, <ast.Name object at 0x7da204564d90>, <ast.Name object at 0x7da204566830>, <ast.Name object at 0x7da204565e70>, <ast.Name object at 0x7da2045664a0>, <ast.Name object at 0x7da204564970>, <ast.Name object at 0x7da204564a00>]] return[call[name[_perform_auth_request], parameter[name[data]]]]
keyword[def] identifier[perform_oauth] ( identifier[email] , identifier[master_token] , identifier[android_id] , identifier[service] , identifier[app] , identifier[client_sig] , identifier[device_country] = literal[string] , identifier[operatorCountry] = literal[string] , identifier[lang] = literal[string] , identifier[sdk_version] = literal[int] ): literal[string] identifier[data] ={ literal[string] : literal[string] , literal[string] : identifier[email] , literal[string] : literal[int] , literal[string] : identifier[master_token] , literal[string] : identifier[service] , literal[string] : literal[string] , literal[string] : identifier[android_id] , literal[string] : identifier[app] , literal[string] : identifier[client_sig] , literal[string] : identifier[device_country] , literal[string] : identifier[device_country] , literal[string] : identifier[lang] , literal[string] : identifier[sdk_version] } keyword[return] identifier[_perform_auth_request] ( identifier[data] )
def perform_oauth(email, master_token, android_id, service, app, client_sig, device_country='us', operatorCountry='us', lang='en', sdk_version=17): """ Use a master token from master_login to perform OAuth to a specific Google service. Return a dict, eg:: { 'Auth': '...', 'LSID': '...', 'SID': '..', 'issueAdvice': 'auto', 'services': 'hist,mail,googleme,...' } To authenticate requests to this service, include a header ``Authorization: GoogleLogin auth=res['Auth']``. """ data = {'accountType': 'HOSTED_OR_GOOGLE', 'Email': email, 'has_permission': 1, 'EncryptedPasswd': master_token, 'service': service, 'source': 'android', 'androidId': android_id, 'app': app, 'client_sig': client_sig, 'device_country': device_country, 'operatorCountry': device_country, 'lang': lang, 'sdk_version': sdk_version} return _perform_auth_request(data)
def validate_offset(reference_event, estimated_event, t_collar=0.200, percentage_of_length=0.5): """Validate estimated event based on event offset Parameters ---------- reference_event : dict Reference event. estimated_event : dict Estimated event. t_collar : float > 0, seconds First condition, Time collar with which the estimated offset has to be in order to be consider valid estimation. Default value 0.2 percentage_of_length : float in [0, 1] Second condition, percentage of the length within which the estimated offset has to be in order to be consider valid estimation. Default value 0.5 Returns ------- bool """ # Detect field naming style used and validate onset if 'event_offset' in reference_event and 'event_offset' in estimated_event: annotated_length = reference_event['event_offset'] - reference_event['event_onset'] return math.fabs(reference_event['event_offset'] - estimated_event['event_offset']) <= max(t_collar, percentage_of_length * annotated_length) elif 'offset' in reference_event and 'offset' in estimated_event: annotated_length = reference_event['offset'] - reference_event['onset'] return math.fabs(reference_event['offset'] - estimated_event['offset']) <= max(t_collar, percentage_of_length * annotated_length)
def function[validate_offset, parameter[reference_event, estimated_event, t_collar, percentage_of_length]]: constant[Validate estimated event based on event offset Parameters ---------- reference_event : dict Reference event. estimated_event : dict Estimated event. t_collar : float > 0, seconds First condition, Time collar with which the estimated offset has to be in order to be consider valid estimation. Default value 0.2 percentage_of_length : float in [0, 1] Second condition, percentage of the length within which the estimated offset has to be in order to be consider valid estimation. Default value 0.5 Returns ------- bool ] if <ast.BoolOp object at 0x7da18f00eb00> begin[:] variable[annotated_length] assign[=] binary_operation[call[name[reference_event]][constant[event_offset]] - call[name[reference_event]][constant[event_onset]]] return[compare[call[name[math].fabs, parameter[binary_operation[call[name[reference_event]][constant[event_offset]] - call[name[estimated_event]][constant[event_offset]]]]] less_or_equal[<=] call[name[max], parameter[name[t_collar], binary_operation[name[percentage_of_length] * name[annotated_length]]]]]]
keyword[def] identifier[validate_offset] ( identifier[reference_event] , identifier[estimated_event] , identifier[t_collar] = literal[int] , identifier[percentage_of_length] = literal[int] ): literal[string] keyword[if] literal[string] keyword[in] identifier[reference_event] keyword[and] literal[string] keyword[in] identifier[estimated_event] : identifier[annotated_length] = identifier[reference_event] [ literal[string] ]- identifier[reference_event] [ literal[string] ] keyword[return] identifier[math] . identifier[fabs] ( identifier[reference_event] [ literal[string] ]- identifier[estimated_event] [ literal[string] ])<= identifier[max] ( identifier[t_collar] , identifier[percentage_of_length] * identifier[annotated_length] ) keyword[elif] literal[string] keyword[in] identifier[reference_event] keyword[and] literal[string] keyword[in] identifier[estimated_event] : identifier[annotated_length] = identifier[reference_event] [ literal[string] ]- identifier[reference_event] [ literal[string] ] keyword[return] identifier[math] . identifier[fabs] ( identifier[reference_event] [ literal[string] ]- identifier[estimated_event] [ literal[string] ])<= identifier[max] ( identifier[t_collar] , identifier[percentage_of_length] * identifier[annotated_length] )
def validate_offset(reference_event, estimated_event, t_collar=0.2, percentage_of_length=0.5): """Validate estimated event based on event offset Parameters ---------- reference_event : dict Reference event. estimated_event : dict Estimated event. t_collar : float > 0, seconds First condition, Time collar with which the estimated offset has to be in order to be consider valid estimation. Default value 0.2 percentage_of_length : float in [0, 1] Second condition, percentage of the length within which the estimated offset has to be in order to be consider valid estimation. Default value 0.5 Returns ------- bool """ # Detect field naming style used and validate onset if 'event_offset' in reference_event and 'event_offset' in estimated_event: annotated_length = reference_event['event_offset'] - reference_event['event_onset'] return math.fabs(reference_event['event_offset'] - estimated_event['event_offset']) <= max(t_collar, percentage_of_length * annotated_length) # depends on [control=['if'], data=[]] elif 'offset' in reference_event and 'offset' in estimated_event: annotated_length = reference_event['offset'] - reference_event['onset'] return math.fabs(reference_event['offset'] - estimated_event['offset']) <= max(t_collar, percentage_of_length * annotated_length) # depends on [control=['if'], data=[]]
def start_api_and_rpc_workers(self): """Initializes eventlet and starts wait for workers to exit. Spawns the workers returned from serve_rpc """ pool = eventlet.GreenPool() quark_rpc = self.serve_rpc() pool.spawn(quark_rpc.wait) pool.waitall()
def function[start_api_and_rpc_workers, parameter[self]]: constant[Initializes eventlet and starts wait for workers to exit. Spawns the workers returned from serve_rpc ] variable[pool] assign[=] call[name[eventlet].GreenPool, parameter[]] variable[quark_rpc] assign[=] call[name[self].serve_rpc, parameter[]] call[name[pool].spawn, parameter[name[quark_rpc].wait]] call[name[pool].waitall, parameter[]]
keyword[def] identifier[start_api_and_rpc_workers] ( identifier[self] ): literal[string] identifier[pool] = identifier[eventlet] . identifier[GreenPool] () identifier[quark_rpc] = identifier[self] . identifier[serve_rpc] () identifier[pool] . identifier[spawn] ( identifier[quark_rpc] . identifier[wait] ) identifier[pool] . identifier[waitall] ()
def start_api_and_rpc_workers(self): """Initializes eventlet and starts wait for workers to exit. Spawns the workers returned from serve_rpc """ pool = eventlet.GreenPool() quark_rpc = self.serve_rpc() pool.spawn(quark_rpc.wait) pool.waitall()
def list(self, search_opts=None): """Get a list of Plugins.""" query = base.get_query_string(search_opts) return self._list('/plugins%s' % query, 'plugins')
def function[list, parameter[self, search_opts]]: constant[Get a list of Plugins.] variable[query] assign[=] call[name[base].get_query_string, parameter[name[search_opts]]] return[call[name[self]._list, parameter[binary_operation[constant[/plugins%s] <ast.Mod object at 0x7da2590d6920> name[query]], constant[plugins]]]]
keyword[def] identifier[list] ( identifier[self] , identifier[search_opts] = keyword[None] ): literal[string] identifier[query] = identifier[base] . identifier[get_query_string] ( identifier[search_opts] ) keyword[return] identifier[self] . identifier[_list] ( literal[string] % identifier[query] , literal[string] )
def list(self, search_opts=None): """Get a list of Plugins.""" query = base.get_query_string(search_opts) return self._list('/plugins%s' % query, 'plugins')
def _format(self, method="sparql", dt_format="turtle"): """ Rormats the value in various formats args: method: ['sparql', 'json', 'pyuri'] dt_format: ['turtle','uri'] used in conjuction with the 'sparql' method """ try: return __FORMAT_OPTIONS__[method](self, dt_format=dt_format) except KeyError: raise NotImplementedError("'{}' is not a valid format method" "".format(method))
def function[_format, parameter[self, method, dt_format]]: constant[ Rormats the value in various formats args: method: ['sparql', 'json', 'pyuri'] dt_format: ['turtle','uri'] used in conjuction with the 'sparql' method ] <ast.Try object at 0x7da18f721c30>
keyword[def] identifier[_format] ( identifier[self] , identifier[method] = literal[string] , identifier[dt_format] = literal[string] ): literal[string] keyword[try] : keyword[return] identifier[__FORMAT_OPTIONS__] [ identifier[method] ]( identifier[self] , identifier[dt_format] = identifier[dt_format] ) keyword[except] identifier[KeyError] : keyword[raise] identifier[NotImplementedError] ( literal[string] literal[string] . identifier[format] ( identifier[method] ))
def _format(self, method='sparql', dt_format='turtle'): """ Rormats the value in various formats args: method: ['sparql', 'json', 'pyuri'] dt_format: ['turtle','uri'] used in conjuction with the 'sparql' method """ try: return __FORMAT_OPTIONS__[method](self, dt_format=dt_format) # depends on [control=['try'], data=[]] except KeyError: raise NotImplementedError("'{}' is not a valid format method".format(method)) # depends on [control=['except'], data=[]]
def get_chassis_datacenter(host=None, admin_username=None, admin_password=None): ''' Get the datacenter of the chassis. host The chassis host. admin_username The username used to access the chassis. admin_password The password used to access the chassis. CLI Example: .. code-block:: bash salt '*' dracr.set_chassis_location host=111.222.333.444 admin_username=root admin_password=secret ''' return get_general('cfgLocation', 'cfgLocationDatacenter', host=host, admin_username=admin_username, admin_password=admin_password)
def function[get_chassis_datacenter, parameter[host, admin_username, admin_password]]: constant[ Get the datacenter of the chassis. host The chassis host. admin_username The username used to access the chassis. admin_password The password used to access the chassis. CLI Example: .. code-block:: bash salt '*' dracr.set_chassis_location host=111.222.333.444 admin_username=root admin_password=secret ] return[call[name[get_general], parameter[constant[cfgLocation], constant[cfgLocationDatacenter]]]]
keyword[def] identifier[get_chassis_datacenter] ( identifier[host] = keyword[None] , identifier[admin_username] = keyword[None] , identifier[admin_password] = keyword[None] ): literal[string] keyword[return] identifier[get_general] ( literal[string] , literal[string] , identifier[host] = identifier[host] , identifier[admin_username] = identifier[admin_username] , identifier[admin_password] = identifier[admin_password] )
def get_chassis_datacenter(host=None, admin_username=None, admin_password=None): """ Get the datacenter of the chassis. host The chassis host. admin_username The username used to access the chassis. admin_password The password used to access the chassis. CLI Example: .. code-block:: bash salt '*' dracr.set_chassis_location host=111.222.333.444 admin_username=root admin_password=secret """ return get_general('cfgLocation', 'cfgLocationDatacenter', host=host, admin_username=admin_username, admin_password=admin_password)
def extend(self, items): """ extend items and print them to stdout using the new line separator """ print('\n'.join(items)) super(MyList, self).extend(items)
def function[extend, parameter[self, items]]: constant[ extend items and print them to stdout using the new line separator ] call[name[print], parameter[call[constant[ ].join, parameter[name[items]]]]] call[call[name[super], parameter[name[MyList], name[self]]].extend, parameter[name[items]]]
keyword[def] identifier[extend] ( identifier[self] , identifier[items] ): literal[string] identifier[print] ( literal[string] . identifier[join] ( identifier[items] )) identifier[super] ( identifier[MyList] , identifier[self] ). identifier[extend] ( identifier[items] )
def extend(self, items): """ extend items and print them to stdout using the new line separator """ print('\n'.join(items)) super(MyList, self).extend(items)
def print_event(attributes=[]): """ Function that returns a Python callback to pretty print the events. """ def python_callback(event): cls_name = event.__class__.__name__ attrs = ', '.join(['{attr}={val}'.format(attr=attr, val=event.__dict__[attr]) for attr in attributes]) print('{cls_name}({attrs})'.format(cls_name=cls_name, attrs=attrs)) return python_callback
def function[print_event, parameter[attributes]]: constant[ Function that returns a Python callback to pretty print the events. ] def function[python_callback, parameter[event]]: variable[cls_name] assign[=] name[event].__class__.__name__ variable[attrs] assign[=] call[constant[, ].join, parameter[<ast.ListComp object at 0x7da20c76e260>]] call[name[print], parameter[call[constant[{cls_name}({attrs})].format, parameter[]]]] return[name[python_callback]]
keyword[def] identifier[print_event] ( identifier[attributes] =[]): literal[string] keyword[def] identifier[python_callback] ( identifier[event] ): identifier[cls_name] = identifier[event] . identifier[__class__] . identifier[__name__] identifier[attrs] = literal[string] . identifier[join] ([ literal[string] . identifier[format] ( identifier[attr] = identifier[attr] , identifier[val] = identifier[event] . identifier[__dict__] [ identifier[attr] ]) keyword[for] identifier[attr] keyword[in] identifier[attributes] ]) identifier[print] ( literal[string] . identifier[format] ( identifier[cls_name] = identifier[cls_name] , identifier[attrs] = identifier[attrs] )) keyword[return] identifier[python_callback]
def print_event(attributes=[]): """ Function that returns a Python callback to pretty print the events. """ def python_callback(event): cls_name = event.__class__.__name__ attrs = ', '.join(['{attr}={val}'.format(attr=attr, val=event.__dict__[attr]) for attr in attributes]) print('{cls_name}({attrs})'.format(cls_name=cls_name, attrs=attrs)) return python_callback
def search(self, source, destination = None, display = None, component = None, q = None, algo = 'DFS', reverse = False, **kargs): ''' API: search(self, source, destination = None, display = None, component = None, q = Stack(), algo = 'DFS', reverse = False, **kargs) Description: Generic search method. Changes behavior (dfs,bfs,dijkstra,prim) according to algo argument. if destination is not specified: This method determines all nodes reachable from "source" ie. creates precedence tree and returns it (dictionary). if destionation is given: If there exists a path from "source" to "destination" it will return list of the nodes is this path. If there is no such path, it will return the precedence tree constructed from source (dictionary). Optionally, it marks all nodes reachable from "source" with a component number. The variable "q" determines the order in which the nodes are searched. Input: source: Search starts from node with this name. destination: Destination node name. display: Display method. algo: Algortihm that specifies search. Available algortihms are 'DFS', 'BFS', 'Dijkstra' and 'Prim'. reverse: Search goes in reverse arc directions if True. kargs: Additional keyword arguments. Post: Nodes will have 'component' attribute that will have component number as value (if component argument provided). Color attribute of nodes and edges may change. Return: Returns predecessor tree in dictionary form if destination is not specified, returns list of node names in the path from source to destionation if destionation is specified and there is a path. If there is no path returns predecessor tree in dictionary form. See description section. ''' if display == None: display = self.attr['display'] else: self.set_display_mode(display) if algo == 'DFS': if q is None: q = Stack() self.get_node(source).set_attr('component', component) elif algo == 'BFS' or algo == 'UnweightedSPT': if q is None: q = Queue() self.get_node(source).set_attr('component', component) elif algo == 'Dijkstra' or algo == 'Prim': if q is None: q = PriorityQueue() else: print("Unknown search algorithm...exiting") return neighbors = self.neighbors if self.graph_type == DIRECTED_GRAPH and reverse: neighbors = self.in_neighbors for i in self.get_node_list(): self.get_node(i).set_attr('label', '-') self.get_node(i).attr.pop('priority', None) self.get_node(i).set_attr('distance', None) self.get_node(i).set_attr('color', 'black') for j in neighbors[i]: if reverse: self.set_edge_attr(j, i, 'color', 'black') else: self.set_edge_attr(i, j, 'color', 'black') self.display() pred = {} self.process_edge_search(None, source, pred, q, component, algo, **kargs) found = True if source != destination: found = False while not q.isEmpty() and not found: current = q.peek() if self.get_node(current).get_attr('color') == 'green': q.remove(current) continue self.process_node_search(current, q, **kargs) self.get_node(current).set_attr('color', 'blue') if current != source: if reverse: self.set_edge_attr(current, pred[current], 'color', 'green') else: self.set_edge_attr(pred[current], current, 'color', 'green') if current == destination: found = True break self.display() for n in neighbors[current]: if self.get_node(n).get_attr('color') != 'green': if reverse: self.set_edge_attr(n, current, 'color', 'yellow') else: self.set_edge_attr(current, n, 'color', 'yellow') self.display() self.process_edge_search(current, n, pred, q, component, algo, **kargs) if reverse: self.set_edge_attr(n, current, 'color', 'black') else: self.set_edge_attr(current, n, 'color', 'black') q.remove(current) self.get_node(current).set_attr('color', 'green') self.display() if found: path = [destination] current = destination while current != source: path.insert(0, pred[current]) current = pred[current] return path if destination == None: return pred else: return None
def function[search, parameter[self, source, destination, display, component, q, algo, reverse]]: constant[ API: search(self, source, destination = None, display = None, component = None, q = Stack(), algo = 'DFS', reverse = False, **kargs) Description: Generic search method. Changes behavior (dfs,bfs,dijkstra,prim) according to algo argument. if destination is not specified: This method determines all nodes reachable from "source" ie. creates precedence tree and returns it (dictionary). if destionation is given: If there exists a path from "source" to "destination" it will return list of the nodes is this path. If there is no such path, it will return the precedence tree constructed from source (dictionary). Optionally, it marks all nodes reachable from "source" with a component number. The variable "q" determines the order in which the nodes are searched. Input: source: Search starts from node with this name. destination: Destination node name. display: Display method. algo: Algortihm that specifies search. Available algortihms are 'DFS', 'BFS', 'Dijkstra' and 'Prim'. reverse: Search goes in reverse arc directions if True. kargs: Additional keyword arguments. Post: Nodes will have 'component' attribute that will have component number as value (if component argument provided). Color attribute of nodes and edges may change. Return: Returns predecessor tree in dictionary form if destination is not specified, returns list of node names in the path from source to destionation if destionation is specified and there is a path. If there is no path returns predecessor tree in dictionary form. See description section. ] if compare[name[display] equal[==] constant[None]] begin[:] variable[display] assign[=] call[name[self].attr][constant[display]] if compare[name[algo] equal[==] constant[DFS]] begin[:] if compare[name[q] is constant[None]] begin[:] variable[q] assign[=] call[name[Stack], parameter[]] call[call[name[self].get_node, parameter[name[source]]].set_attr, parameter[constant[component], name[component]]] variable[neighbors] assign[=] name[self].neighbors if <ast.BoolOp object at 0x7da1b02e6530> begin[:] variable[neighbors] assign[=] name[self].in_neighbors for taget[name[i]] in starred[call[name[self].get_node_list, parameter[]]] begin[:] call[call[name[self].get_node, parameter[name[i]]].set_attr, parameter[constant[label], constant[-]]] call[call[name[self].get_node, parameter[name[i]]].attr.pop, parameter[constant[priority], constant[None]]] call[call[name[self].get_node, parameter[name[i]]].set_attr, parameter[constant[distance], constant[None]]] call[call[name[self].get_node, parameter[name[i]]].set_attr, parameter[constant[color], constant[black]]] for taget[name[j]] in starred[call[name[neighbors]][name[i]]] begin[:] if name[reverse] begin[:] call[name[self].set_edge_attr, parameter[name[j], name[i], constant[color], constant[black]]] call[name[self].display, parameter[]] variable[pred] assign[=] dictionary[[], []] call[name[self].process_edge_search, parameter[constant[None], name[source], name[pred], name[q], name[component], name[algo]]] variable[found] assign[=] constant[True] if compare[name[source] not_equal[!=] name[destination]] begin[:] variable[found] assign[=] constant[False] while <ast.BoolOp object at 0x7da1b0529ed0> begin[:] variable[current] assign[=] call[name[q].peek, parameter[]] if compare[call[call[name[self].get_node, parameter[name[current]]].get_attr, parameter[constant[color]]] equal[==] constant[green]] begin[:] call[name[q].remove, parameter[name[current]]] continue call[name[self].process_node_search, parameter[name[current], name[q]]] call[call[name[self].get_node, parameter[name[current]]].set_attr, parameter[constant[color], constant[blue]]] if compare[name[current] not_equal[!=] name[source]] begin[:] if name[reverse] begin[:] call[name[self].set_edge_attr, parameter[name[current], call[name[pred]][name[current]], constant[color], constant[green]]] if compare[name[current] equal[==] name[destination]] begin[:] variable[found] assign[=] constant[True] break call[name[self].display, parameter[]] for taget[name[n]] in starred[call[name[neighbors]][name[current]]] begin[:] if compare[call[call[name[self].get_node, parameter[name[n]]].get_attr, parameter[constant[color]]] not_equal[!=] constant[green]] begin[:] if name[reverse] begin[:] call[name[self].set_edge_attr, parameter[name[n], name[current], constant[color], constant[yellow]]] call[name[self].display, parameter[]] call[name[self].process_edge_search, parameter[name[current], name[n], name[pred], name[q], name[component], name[algo]]] if name[reverse] begin[:] call[name[self].set_edge_attr, parameter[name[n], name[current], constant[color], constant[black]]] call[name[q].remove, parameter[name[current]]] call[call[name[self].get_node, parameter[name[current]]].set_attr, parameter[constant[color], constant[green]]] call[name[self].display, parameter[]] if name[found] begin[:] variable[path] assign[=] list[[<ast.Name object at 0x7da1b04d7010>]] variable[current] assign[=] name[destination] while compare[name[current] not_equal[!=] name[source]] begin[:] call[name[path].insert, parameter[constant[0], call[name[pred]][name[current]]]] variable[current] assign[=] call[name[pred]][name[current]] return[name[path]] if compare[name[destination] equal[==] constant[None]] begin[:] return[name[pred]]
keyword[def] identifier[search] ( identifier[self] , identifier[source] , identifier[destination] = keyword[None] , identifier[display] = keyword[None] , identifier[component] = keyword[None] , identifier[q] = keyword[None] , identifier[algo] = literal[string] , identifier[reverse] = keyword[False] ,** identifier[kargs] ): literal[string] keyword[if] identifier[display] == keyword[None] : identifier[display] = identifier[self] . identifier[attr] [ literal[string] ] keyword[else] : identifier[self] . identifier[set_display_mode] ( identifier[display] ) keyword[if] identifier[algo] == literal[string] : keyword[if] identifier[q] keyword[is] keyword[None] : identifier[q] = identifier[Stack] () identifier[self] . identifier[get_node] ( identifier[source] ). identifier[set_attr] ( literal[string] , identifier[component] ) keyword[elif] identifier[algo] == literal[string] keyword[or] identifier[algo] == literal[string] : keyword[if] identifier[q] keyword[is] keyword[None] : identifier[q] = identifier[Queue] () identifier[self] . identifier[get_node] ( identifier[source] ). identifier[set_attr] ( literal[string] , identifier[component] ) keyword[elif] identifier[algo] == literal[string] keyword[or] identifier[algo] == literal[string] : keyword[if] identifier[q] keyword[is] keyword[None] : identifier[q] = identifier[PriorityQueue] () keyword[else] : identifier[print] ( literal[string] ) keyword[return] identifier[neighbors] = identifier[self] . identifier[neighbors] keyword[if] identifier[self] . identifier[graph_type] == identifier[DIRECTED_GRAPH] keyword[and] identifier[reverse] : identifier[neighbors] = identifier[self] . identifier[in_neighbors] keyword[for] identifier[i] keyword[in] identifier[self] . identifier[get_node_list] (): identifier[self] . identifier[get_node] ( identifier[i] ). identifier[set_attr] ( literal[string] , literal[string] ) identifier[self] . identifier[get_node] ( identifier[i] ). identifier[attr] . identifier[pop] ( literal[string] , keyword[None] ) identifier[self] . identifier[get_node] ( identifier[i] ). identifier[set_attr] ( literal[string] , keyword[None] ) identifier[self] . identifier[get_node] ( identifier[i] ). identifier[set_attr] ( literal[string] , literal[string] ) keyword[for] identifier[j] keyword[in] identifier[neighbors] [ identifier[i] ]: keyword[if] identifier[reverse] : identifier[self] . identifier[set_edge_attr] ( identifier[j] , identifier[i] , literal[string] , literal[string] ) keyword[else] : identifier[self] . identifier[set_edge_attr] ( identifier[i] , identifier[j] , literal[string] , literal[string] ) identifier[self] . identifier[display] () identifier[pred] ={} identifier[self] . identifier[process_edge_search] ( keyword[None] , identifier[source] , identifier[pred] , identifier[q] , identifier[component] , identifier[algo] , ** identifier[kargs] ) identifier[found] = keyword[True] keyword[if] identifier[source] != identifier[destination] : identifier[found] = keyword[False] keyword[while] keyword[not] identifier[q] . identifier[isEmpty] () keyword[and] keyword[not] identifier[found] : identifier[current] = identifier[q] . identifier[peek] () keyword[if] identifier[self] . identifier[get_node] ( identifier[current] ). identifier[get_attr] ( literal[string] )== literal[string] : identifier[q] . identifier[remove] ( identifier[current] ) keyword[continue] identifier[self] . identifier[process_node_search] ( identifier[current] , identifier[q] ,** identifier[kargs] ) identifier[self] . identifier[get_node] ( identifier[current] ). identifier[set_attr] ( literal[string] , literal[string] ) keyword[if] identifier[current] != identifier[source] : keyword[if] identifier[reverse] : identifier[self] . identifier[set_edge_attr] ( identifier[current] , identifier[pred] [ identifier[current] ], literal[string] , literal[string] ) keyword[else] : identifier[self] . identifier[set_edge_attr] ( identifier[pred] [ identifier[current] ], identifier[current] , literal[string] , literal[string] ) keyword[if] identifier[current] == identifier[destination] : identifier[found] = keyword[True] keyword[break] identifier[self] . identifier[display] () keyword[for] identifier[n] keyword[in] identifier[neighbors] [ identifier[current] ]: keyword[if] identifier[self] . identifier[get_node] ( identifier[n] ). identifier[get_attr] ( literal[string] )!= literal[string] : keyword[if] identifier[reverse] : identifier[self] . identifier[set_edge_attr] ( identifier[n] , identifier[current] , literal[string] , literal[string] ) keyword[else] : identifier[self] . identifier[set_edge_attr] ( identifier[current] , identifier[n] , literal[string] , literal[string] ) identifier[self] . identifier[display] () identifier[self] . identifier[process_edge_search] ( identifier[current] , identifier[n] , identifier[pred] , identifier[q] , identifier[component] , identifier[algo] ,** identifier[kargs] ) keyword[if] identifier[reverse] : identifier[self] . identifier[set_edge_attr] ( identifier[n] , identifier[current] , literal[string] , literal[string] ) keyword[else] : identifier[self] . identifier[set_edge_attr] ( identifier[current] , identifier[n] , literal[string] , literal[string] ) identifier[q] . identifier[remove] ( identifier[current] ) identifier[self] . identifier[get_node] ( identifier[current] ). identifier[set_attr] ( literal[string] , literal[string] ) identifier[self] . identifier[display] () keyword[if] identifier[found] : identifier[path] =[ identifier[destination] ] identifier[current] = identifier[destination] keyword[while] identifier[current] != identifier[source] : identifier[path] . identifier[insert] ( literal[int] , identifier[pred] [ identifier[current] ]) identifier[current] = identifier[pred] [ identifier[current] ] keyword[return] identifier[path] keyword[if] identifier[destination] == keyword[None] : keyword[return] identifier[pred] keyword[else] : keyword[return] keyword[None]
def search(self, source, destination=None, display=None, component=None, q=None, algo='DFS', reverse=False, **kargs): """ API: search(self, source, destination = None, display = None, component = None, q = Stack(), algo = 'DFS', reverse = False, **kargs) Description: Generic search method. Changes behavior (dfs,bfs,dijkstra,prim) according to algo argument. if destination is not specified: This method determines all nodes reachable from "source" ie. creates precedence tree and returns it (dictionary). if destionation is given: If there exists a path from "source" to "destination" it will return list of the nodes is this path. If there is no such path, it will return the precedence tree constructed from source (dictionary). Optionally, it marks all nodes reachable from "source" with a component number. The variable "q" determines the order in which the nodes are searched. Input: source: Search starts from node with this name. destination: Destination node name. display: Display method. algo: Algortihm that specifies search. Available algortihms are 'DFS', 'BFS', 'Dijkstra' and 'Prim'. reverse: Search goes in reverse arc directions if True. kargs: Additional keyword arguments. Post: Nodes will have 'component' attribute that will have component number as value (if component argument provided). Color attribute of nodes and edges may change. Return: Returns predecessor tree in dictionary form if destination is not specified, returns list of node names in the path from source to destionation if destionation is specified and there is a path. If there is no path returns predecessor tree in dictionary form. See description section. """ if display == None: display = self.attr['display'] # depends on [control=['if'], data=['display']] else: self.set_display_mode(display) if algo == 'DFS': if q is None: q = Stack() # depends on [control=['if'], data=['q']] self.get_node(source).set_attr('component', component) # depends on [control=['if'], data=[]] elif algo == 'BFS' or algo == 'UnweightedSPT': if q is None: q = Queue() # depends on [control=['if'], data=['q']] self.get_node(source).set_attr('component', component) # depends on [control=['if'], data=[]] elif algo == 'Dijkstra' or algo == 'Prim': if q is None: q = PriorityQueue() # depends on [control=['if'], data=['q']] # depends on [control=['if'], data=[]] else: print('Unknown search algorithm...exiting') return neighbors = self.neighbors if self.graph_type == DIRECTED_GRAPH and reverse: neighbors = self.in_neighbors # depends on [control=['if'], data=[]] for i in self.get_node_list(): self.get_node(i).set_attr('label', '-') self.get_node(i).attr.pop('priority', None) self.get_node(i).set_attr('distance', None) self.get_node(i).set_attr('color', 'black') for j in neighbors[i]: if reverse: self.set_edge_attr(j, i, 'color', 'black') # depends on [control=['if'], data=[]] else: self.set_edge_attr(i, j, 'color', 'black') # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']] self.display() pred = {} self.process_edge_search(None, source, pred, q, component, algo, **kargs) found = True if source != destination: found = False # depends on [control=['if'], data=[]] while not q.isEmpty() and (not found): current = q.peek() if self.get_node(current).get_attr('color') == 'green': q.remove(current) continue # depends on [control=['if'], data=[]] self.process_node_search(current, q, **kargs) self.get_node(current).set_attr('color', 'blue') if current != source: if reverse: self.set_edge_attr(current, pred[current], 'color', 'green') # depends on [control=['if'], data=[]] else: self.set_edge_attr(pred[current], current, 'color', 'green') # depends on [control=['if'], data=['current']] if current == destination: found = True break # depends on [control=['if'], data=[]] self.display() for n in neighbors[current]: if self.get_node(n).get_attr('color') != 'green': if reverse: self.set_edge_attr(n, current, 'color', 'yellow') # depends on [control=['if'], data=[]] else: self.set_edge_attr(current, n, 'color', 'yellow') self.display() self.process_edge_search(current, n, pred, q, component, algo, **kargs) if reverse: self.set_edge_attr(n, current, 'color', 'black') # depends on [control=['if'], data=[]] else: self.set_edge_attr(current, n, 'color', 'black') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['n']] q.remove(current) self.get_node(current).set_attr('color', 'green') self.display() # depends on [control=['while'], data=[]] if found: path = [destination] current = destination while current != source: path.insert(0, pred[current]) current = pred[current] # depends on [control=['while'], data=['current']] return path # depends on [control=['if'], data=[]] if destination == None: return pred # depends on [control=['if'], data=[]] else: return None
def to_url(self): """ special function for handling 'multi', refer to Swagger 2.0, Parameter Object, collectionFormat """ if self.__collection_format == 'multi': return [str(s) for s in self] else: return [str(self)]
def function[to_url, parameter[self]]: constant[ special function for handling 'multi', refer to Swagger 2.0, Parameter Object, collectionFormat ] if compare[name[self].__collection_format equal[==] constant[multi]] begin[:] return[<ast.ListComp object at 0x7da2044c1000>]
keyword[def] identifier[to_url] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[__collection_format] == literal[string] : keyword[return] [ identifier[str] ( identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[self] ] keyword[else] : keyword[return] [ identifier[str] ( identifier[self] )]
def to_url(self): """ special function for handling 'multi', refer to Swagger 2.0, Parameter Object, collectionFormat """ if self.__collection_format == 'multi': return [str(s) for s in self] # depends on [control=['if'], data=[]] else: return [str(self)]
def update(self, friendly_name=values.unset, unique_name=values.unset, email=values.unset, cc_emails=values.unset, status=values.unset, verification_code=values.unset, verification_type=values.unset, verification_document_sid=values.unset, extension=values.unset, call_delay=values.unset): """ Update the HostedNumberOrderInstance :param unicode friendly_name: A human readable description of this resource. :param unicode unique_name: A unique, developer assigned name of this HostedNumberOrder. :param unicode email: Email. :param unicode cc_emails: A list of emails. :param HostedNumberOrderInstance.Status status: The Status of this HostedNumberOrder. :param unicode verification_code: A verification code. :param HostedNumberOrderInstance.VerificationType verification_type: Verification Type. :param unicode verification_document_sid: Verification Document Sid :param unicode extension: Digits to dial after connecting the verification call. :param unicode call_delay: The number of seconds, between 0 and 60, to delay before initiating the verification call. :returns: Updated HostedNumberOrderInstance :rtype: twilio.rest.preview.hosted_numbers.hosted_number_order.HostedNumberOrderInstance """ data = values.of({ 'FriendlyName': friendly_name, 'UniqueName': unique_name, 'Email': email, 'CcEmails': serialize.map(cc_emails, lambda e: e), 'Status': status, 'VerificationCode': verification_code, 'VerificationType': verification_type, 'VerificationDocumentSid': verification_document_sid, 'Extension': extension, 'CallDelay': call_delay, }) payload = self._version.update( 'POST', self._uri, data=data, ) return HostedNumberOrderInstance(self._version, payload, sid=self._solution['sid'], )
def function[update, parameter[self, friendly_name, unique_name, email, cc_emails, status, verification_code, verification_type, verification_document_sid, extension, call_delay]]: constant[ Update the HostedNumberOrderInstance :param unicode friendly_name: A human readable description of this resource. :param unicode unique_name: A unique, developer assigned name of this HostedNumberOrder. :param unicode email: Email. :param unicode cc_emails: A list of emails. :param HostedNumberOrderInstance.Status status: The Status of this HostedNumberOrder. :param unicode verification_code: A verification code. :param HostedNumberOrderInstance.VerificationType verification_type: Verification Type. :param unicode verification_document_sid: Verification Document Sid :param unicode extension: Digits to dial after connecting the verification call. :param unicode call_delay: The number of seconds, between 0 and 60, to delay before initiating the verification call. :returns: Updated HostedNumberOrderInstance :rtype: twilio.rest.preview.hosted_numbers.hosted_number_order.HostedNumberOrderInstance ] variable[data] assign[=] call[name[values].of, parameter[dictionary[[<ast.Constant object at 0x7da207f9a470>, <ast.Constant object at 0x7da207f99e10>, <ast.Constant object at 0x7da207f9b1f0>, <ast.Constant object at 0x7da207f983a0>, <ast.Constant object at 0x7da207f993f0>, <ast.Constant object at 0x7da207f9a740>, <ast.Constant object at 0x7da207f99bd0>, <ast.Constant object at 0x7da1b1eae890>, <ast.Constant object at 0x7da1b1eae7a0>, <ast.Constant object at 0x7da1b1eaf340>], [<ast.Name object at 0x7da1b1eaef80>, <ast.Name object at 0x7da1b1eacdf0>, <ast.Name object at 0x7da1b1eaea10>, <ast.Call object at 0x7da1b1eafa00>, <ast.Name object at 0x7da1b1eaf430>, <ast.Name object at 0x7da1b1ead000>, <ast.Name object at 0x7da1b1eadd80>, <ast.Name object at 0x7da1b1eafd90>, <ast.Name object at 0x7da1b1eadfc0>, <ast.Name object at 0x7da1b1eaf280>]]]] variable[payload] assign[=] call[name[self]._version.update, parameter[constant[POST], name[self]._uri]] return[call[name[HostedNumberOrderInstance], parameter[name[self]._version, name[payload]]]]
keyword[def] identifier[update] ( identifier[self] , identifier[friendly_name] = identifier[values] . identifier[unset] , identifier[unique_name] = identifier[values] . identifier[unset] , identifier[email] = identifier[values] . identifier[unset] , identifier[cc_emails] = identifier[values] . identifier[unset] , identifier[status] = identifier[values] . identifier[unset] , identifier[verification_code] = identifier[values] . identifier[unset] , identifier[verification_type] = identifier[values] . identifier[unset] , identifier[verification_document_sid] = identifier[values] . identifier[unset] , identifier[extension] = identifier[values] . identifier[unset] , identifier[call_delay] = identifier[values] . identifier[unset] ): literal[string] identifier[data] = identifier[values] . identifier[of] ({ literal[string] : identifier[friendly_name] , literal[string] : identifier[unique_name] , literal[string] : identifier[email] , literal[string] : identifier[serialize] . identifier[map] ( identifier[cc_emails] , keyword[lambda] identifier[e] : identifier[e] ), literal[string] : identifier[status] , literal[string] : identifier[verification_code] , literal[string] : identifier[verification_type] , literal[string] : identifier[verification_document_sid] , literal[string] : identifier[extension] , literal[string] : identifier[call_delay] , }) identifier[payload] = identifier[self] . identifier[_version] . identifier[update] ( literal[string] , identifier[self] . identifier[_uri] , identifier[data] = identifier[data] , ) keyword[return] identifier[HostedNumberOrderInstance] ( identifier[self] . identifier[_version] , identifier[payload] , identifier[sid] = identifier[self] . identifier[_solution] [ literal[string] ],)
def update(self, friendly_name=values.unset, unique_name=values.unset, email=values.unset, cc_emails=values.unset, status=values.unset, verification_code=values.unset, verification_type=values.unset, verification_document_sid=values.unset, extension=values.unset, call_delay=values.unset): """ Update the HostedNumberOrderInstance :param unicode friendly_name: A human readable description of this resource. :param unicode unique_name: A unique, developer assigned name of this HostedNumberOrder. :param unicode email: Email. :param unicode cc_emails: A list of emails. :param HostedNumberOrderInstance.Status status: The Status of this HostedNumberOrder. :param unicode verification_code: A verification code. :param HostedNumberOrderInstance.VerificationType verification_type: Verification Type. :param unicode verification_document_sid: Verification Document Sid :param unicode extension: Digits to dial after connecting the verification call. :param unicode call_delay: The number of seconds, between 0 and 60, to delay before initiating the verification call. :returns: Updated HostedNumberOrderInstance :rtype: twilio.rest.preview.hosted_numbers.hosted_number_order.HostedNumberOrderInstance """ data = values.of({'FriendlyName': friendly_name, 'UniqueName': unique_name, 'Email': email, 'CcEmails': serialize.map(cc_emails, lambda e: e), 'Status': status, 'VerificationCode': verification_code, 'VerificationType': verification_type, 'VerificationDocumentSid': verification_document_sid, 'Extension': extension, 'CallDelay': call_delay}) payload = self._version.update('POST', self._uri, data=data) return HostedNumberOrderInstance(self._version, payload, sid=self._solution['sid'])
def scan_temperature(self, measure, temperature, rate, delay=1): """Performs a temperature scan. Measures until the target temperature is reached. :param measure: A callable called repeatedly until stability at target temperature is reached. :param temperature: The target temperature in kelvin. :param rate: The sweep rate in kelvin per minute. :param delay: The time delay between each call to measure in seconds. """ # set target temperature to current control temperature self.target_temperature = Tset = self.control_temperature # we use a positive sign for the sweep rate if we sweep up and negative # if we sweep down. rate = abs(rate) if temperature - Tset > 0 else -abs(rate) t_last = time.time() time.sleep(delay) while True: measure() # Update setpoint t_now = time.time() dt = t_now - t_last dT = dt * rate / 60. t_last = t_now if abs(temperature - Tset) < abs(dT): self.target_temperature = temperature break else: self.target_temperature = Tset = Tset + dT time.sleep(delay)
def function[scan_temperature, parameter[self, measure, temperature, rate, delay]]: constant[Performs a temperature scan. Measures until the target temperature is reached. :param measure: A callable called repeatedly until stability at target temperature is reached. :param temperature: The target temperature in kelvin. :param rate: The sweep rate in kelvin per minute. :param delay: The time delay between each call to measure in seconds. ] name[self].target_temperature assign[=] name[self].control_temperature variable[rate] assign[=] <ast.IfExp object at 0x7da1b0baad40> variable[t_last] assign[=] call[name[time].time, parameter[]] call[name[time].sleep, parameter[name[delay]]] while constant[True] begin[:] call[name[measure], parameter[]] variable[t_now] assign[=] call[name[time].time, parameter[]] variable[dt] assign[=] binary_operation[name[t_now] - name[t_last]] variable[dT] assign[=] binary_operation[binary_operation[name[dt] * name[rate]] / constant[60.0]] variable[t_last] assign[=] name[t_now] if compare[call[name[abs], parameter[binary_operation[name[temperature] - name[Tset]]]] less[<] call[name[abs], parameter[name[dT]]]] begin[:] name[self].target_temperature assign[=] name[temperature] break call[name[time].sleep, parameter[name[delay]]]
keyword[def] identifier[scan_temperature] ( identifier[self] , identifier[measure] , identifier[temperature] , identifier[rate] , identifier[delay] = literal[int] ): literal[string] identifier[self] . identifier[target_temperature] = identifier[Tset] = identifier[self] . identifier[control_temperature] identifier[rate] = identifier[abs] ( identifier[rate] ) keyword[if] identifier[temperature] - identifier[Tset] > literal[int] keyword[else] - identifier[abs] ( identifier[rate] ) identifier[t_last] = identifier[time] . identifier[time] () identifier[time] . identifier[sleep] ( identifier[delay] ) keyword[while] keyword[True] : identifier[measure] () identifier[t_now] = identifier[time] . identifier[time] () identifier[dt] = identifier[t_now] - identifier[t_last] identifier[dT] = identifier[dt] * identifier[rate] / literal[int] identifier[t_last] = identifier[t_now] keyword[if] identifier[abs] ( identifier[temperature] - identifier[Tset] )< identifier[abs] ( identifier[dT] ): identifier[self] . identifier[target_temperature] = identifier[temperature] keyword[break] keyword[else] : identifier[self] . identifier[target_temperature] = identifier[Tset] = identifier[Tset] + identifier[dT] identifier[time] . identifier[sleep] ( identifier[delay] )
def scan_temperature(self, measure, temperature, rate, delay=1): """Performs a temperature scan. Measures until the target temperature is reached. :param measure: A callable called repeatedly until stability at target temperature is reached. :param temperature: The target temperature in kelvin. :param rate: The sweep rate in kelvin per minute. :param delay: The time delay between each call to measure in seconds. """ # set target temperature to current control temperature self.target_temperature = Tset = self.control_temperature # we use a positive sign for the sweep rate if we sweep up and negative # if we sweep down. rate = abs(rate) if temperature - Tset > 0 else -abs(rate) t_last = time.time() time.sleep(delay) while True: measure() # Update setpoint t_now = time.time() dt = t_now - t_last dT = dt * rate / 60.0 t_last = t_now if abs(temperature - Tset) < abs(dT): self.target_temperature = temperature break # depends on [control=['if'], data=[]] else: self.target_temperature = Tset = Tset + dT time.sleep(delay) # depends on [control=['while'], data=[]]
def compound(clr, flip=False): """ Roughly the complement and some far analogs. """ def _wrap(x, min, threshold, plus): if x - min < threshold: return x + plus else: return x - min d = 1 if flip: d = -1 clr = color(clr) colors = colorlist(clr) c = clr.rotate_ryb(30 * d) c.brightness = _wrap(clr.brightness, 0.25, 0.6, 0.25) colors.append(c) c = clr.rotate_ryb(30 * d) c.saturation = _wrap(clr.saturation, 0.4, 0.1, 0.4) c.brightness = _wrap(clr.brightness, 0.4, 0.2, 0.4) colors.append(c) c = clr.rotate_ryb(160 * d) c.saturation = _wrap(clr.saturation, 0.25, 0.1, 0.25) c.brightness = max(0.2, clr.brightness) colors.append(c) c = clr.rotate_ryb(150 * d) c.saturation = _wrap(clr.saturation, 0.1, 0.8, 0.1) c.brightness = _wrap(clr.brightness, 0.3, 0.6, 0.3) colors.append(c) c = clr.rotate_ryb(150 * d) c.saturation = _wrap(clr.saturation, 0.1, 0.8, 0.1) c.brightness = _wrap(clr.brightness, 0.4, 0.2, 0.4) # colors.append(c) return colors
def function[compound, parameter[clr, flip]]: constant[ Roughly the complement and some far analogs. ] def function[_wrap, parameter[x, min, threshold, plus]]: if compare[binary_operation[name[x] - name[min]] less[<] name[threshold]] begin[:] return[binary_operation[name[x] + name[plus]]] variable[d] assign[=] constant[1] if name[flip] begin[:] variable[d] assign[=] <ast.UnaryOp object at 0x7da1aff568c0> variable[clr] assign[=] call[name[color], parameter[name[clr]]] variable[colors] assign[=] call[name[colorlist], parameter[name[clr]]] variable[c] assign[=] call[name[clr].rotate_ryb, parameter[binary_operation[constant[30] * name[d]]]] name[c].brightness assign[=] call[name[_wrap], parameter[name[clr].brightness, constant[0.25], constant[0.6], constant[0.25]]] call[name[colors].append, parameter[name[c]]] variable[c] assign[=] call[name[clr].rotate_ryb, parameter[binary_operation[constant[30] * name[d]]]] name[c].saturation assign[=] call[name[_wrap], parameter[name[clr].saturation, constant[0.4], constant[0.1], constant[0.4]]] name[c].brightness assign[=] call[name[_wrap], parameter[name[clr].brightness, constant[0.4], constant[0.2], constant[0.4]]] call[name[colors].append, parameter[name[c]]] variable[c] assign[=] call[name[clr].rotate_ryb, parameter[binary_operation[constant[160] * name[d]]]] name[c].saturation assign[=] call[name[_wrap], parameter[name[clr].saturation, constant[0.25], constant[0.1], constant[0.25]]] name[c].brightness assign[=] call[name[max], parameter[constant[0.2], name[clr].brightness]] call[name[colors].append, parameter[name[c]]] variable[c] assign[=] call[name[clr].rotate_ryb, parameter[binary_operation[constant[150] * name[d]]]] name[c].saturation assign[=] call[name[_wrap], parameter[name[clr].saturation, constant[0.1], constant[0.8], constant[0.1]]] name[c].brightness assign[=] call[name[_wrap], parameter[name[clr].brightness, constant[0.3], constant[0.6], constant[0.3]]] call[name[colors].append, parameter[name[c]]] variable[c] assign[=] call[name[clr].rotate_ryb, parameter[binary_operation[constant[150] * name[d]]]] name[c].saturation assign[=] call[name[_wrap], parameter[name[clr].saturation, constant[0.1], constant[0.8], constant[0.1]]] name[c].brightness assign[=] call[name[_wrap], parameter[name[clr].brightness, constant[0.4], constant[0.2], constant[0.4]]] return[name[colors]]
keyword[def] identifier[compound] ( identifier[clr] , identifier[flip] = keyword[False] ): literal[string] keyword[def] identifier[_wrap] ( identifier[x] , identifier[min] , identifier[threshold] , identifier[plus] ): keyword[if] identifier[x] - identifier[min] < identifier[threshold] : keyword[return] identifier[x] + identifier[plus] keyword[else] : keyword[return] identifier[x] - identifier[min] identifier[d] = literal[int] keyword[if] identifier[flip] : identifier[d] =- literal[int] identifier[clr] = identifier[color] ( identifier[clr] ) identifier[colors] = identifier[colorlist] ( identifier[clr] ) identifier[c] = identifier[clr] . identifier[rotate_ryb] ( literal[int] * identifier[d] ) identifier[c] . identifier[brightness] = identifier[_wrap] ( identifier[clr] . identifier[brightness] , literal[int] , literal[int] , literal[int] ) identifier[colors] . identifier[append] ( identifier[c] ) identifier[c] = identifier[clr] . identifier[rotate_ryb] ( literal[int] * identifier[d] ) identifier[c] . identifier[saturation] = identifier[_wrap] ( identifier[clr] . identifier[saturation] , literal[int] , literal[int] , literal[int] ) identifier[c] . identifier[brightness] = identifier[_wrap] ( identifier[clr] . identifier[brightness] , literal[int] , literal[int] , literal[int] ) identifier[colors] . identifier[append] ( identifier[c] ) identifier[c] = identifier[clr] . identifier[rotate_ryb] ( literal[int] * identifier[d] ) identifier[c] . identifier[saturation] = identifier[_wrap] ( identifier[clr] . identifier[saturation] , literal[int] , literal[int] , literal[int] ) identifier[c] . identifier[brightness] = identifier[max] ( literal[int] , identifier[clr] . identifier[brightness] ) identifier[colors] . identifier[append] ( identifier[c] ) identifier[c] = identifier[clr] . identifier[rotate_ryb] ( literal[int] * identifier[d] ) identifier[c] . identifier[saturation] = identifier[_wrap] ( identifier[clr] . identifier[saturation] , literal[int] , literal[int] , literal[int] ) identifier[c] . identifier[brightness] = identifier[_wrap] ( identifier[clr] . identifier[brightness] , literal[int] , literal[int] , literal[int] ) identifier[colors] . identifier[append] ( identifier[c] ) identifier[c] = identifier[clr] . identifier[rotate_ryb] ( literal[int] * identifier[d] ) identifier[c] . identifier[saturation] = identifier[_wrap] ( identifier[clr] . identifier[saturation] , literal[int] , literal[int] , literal[int] ) identifier[c] . identifier[brightness] = identifier[_wrap] ( identifier[clr] . identifier[brightness] , literal[int] , literal[int] , literal[int] ) keyword[return] identifier[colors]
def compound(clr, flip=False): """ Roughly the complement and some far analogs. """ def _wrap(x, min, threshold, plus): if x - min < threshold: return x + plus # depends on [control=['if'], data=[]] else: return x - min d = 1 if flip: d = -1 # depends on [control=['if'], data=[]] clr = color(clr) colors = colorlist(clr) c = clr.rotate_ryb(30 * d) c.brightness = _wrap(clr.brightness, 0.25, 0.6, 0.25) colors.append(c) c = clr.rotate_ryb(30 * d) c.saturation = _wrap(clr.saturation, 0.4, 0.1, 0.4) c.brightness = _wrap(clr.brightness, 0.4, 0.2, 0.4) colors.append(c) c = clr.rotate_ryb(160 * d) c.saturation = _wrap(clr.saturation, 0.25, 0.1, 0.25) c.brightness = max(0.2, clr.brightness) colors.append(c) c = clr.rotate_ryb(150 * d) c.saturation = _wrap(clr.saturation, 0.1, 0.8, 0.1) c.brightness = _wrap(clr.brightness, 0.3, 0.6, 0.3) colors.append(c) c = clr.rotate_ryb(150 * d) c.saturation = _wrap(clr.saturation, 0.1, 0.8, 0.1) c.brightness = _wrap(clr.brightness, 0.4, 0.2, 0.4) # colors.append(c) return colors
def get_config(self, section=None): """ Return the merged end-user configuration for this command or a specific section if set in `section`. """ config = self.session.config section = self.config_section() if section is None else section try: return config[section] except KeyError: config.add_section(section) return config[section]
def function[get_config, parameter[self, section]]: constant[ Return the merged end-user configuration for this command or a specific section if set in `section`. ] variable[config] assign[=] name[self].session.config variable[section] assign[=] <ast.IfExp object at 0x7da18eb56f80> <ast.Try object at 0x7da20c6c5f90>
keyword[def] identifier[get_config] ( identifier[self] , identifier[section] = keyword[None] ): literal[string] identifier[config] = identifier[self] . identifier[session] . identifier[config] identifier[section] = identifier[self] . identifier[config_section] () keyword[if] identifier[section] keyword[is] keyword[None] keyword[else] identifier[section] keyword[try] : keyword[return] identifier[config] [ identifier[section] ] keyword[except] identifier[KeyError] : identifier[config] . identifier[add_section] ( identifier[section] ) keyword[return] identifier[config] [ identifier[section] ]
def get_config(self, section=None): """ Return the merged end-user configuration for this command or a specific section if set in `section`. """ config = self.session.config section = self.config_section() if section is None else section try: return config[section] # depends on [control=['try'], data=[]] except KeyError: config.add_section(section) return config[section] # depends on [control=['except'], data=[]]
def vehicle_type_string(self, hb): '''return vehicle type string from a heartbeat''' if hb.type == mavutil.mavlink.MAV_TYPE_FIXED_WING: return 'Plane' if hb.type == mavutil.mavlink.MAV_TYPE_GROUND_ROVER: return 'Rover' if hb.type == mavutil.mavlink.MAV_TYPE_SURFACE_BOAT: return 'Boat' if hb.type == mavutil.mavlink.MAV_TYPE_SUBMARINE: return 'Sub' if hb.type in [mavutil.mavlink.MAV_TYPE_QUADROTOR, mavutil.mavlink.MAV_TYPE_COAXIAL, mavutil.mavlink.MAV_TYPE_HEXAROTOR, mavutil.mavlink.MAV_TYPE_OCTOROTOR, mavutil.mavlink.MAV_TYPE_TRICOPTER, mavutil.mavlink.MAV_TYPE_DODECAROTOR]: return "Copter" if hb.type == mavutil.mavlink.MAV_TYPE_HELICOPTER: return "Heli" if hb.type == mavutil.mavlink.MAV_TYPE_ANTENNA_TRACKER: return "Tracker" return "UNKNOWN(%u)" % hb.type
def function[vehicle_type_string, parameter[self, hb]]: constant[return vehicle type string from a heartbeat] if compare[name[hb].type equal[==] name[mavutil].mavlink.MAV_TYPE_FIXED_WING] begin[:] return[constant[Plane]] if compare[name[hb].type equal[==] name[mavutil].mavlink.MAV_TYPE_GROUND_ROVER] begin[:] return[constant[Rover]] if compare[name[hb].type equal[==] name[mavutil].mavlink.MAV_TYPE_SURFACE_BOAT] begin[:] return[constant[Boat]] if compare[name[hb].type equal[==] name[mavutil].mavlink.MAV_TYPE_SUBMARINE] begin[:] return[constant[Sub]] if compare[name[hb].type in list[[<ast.Attribute object at 0x7da1b1608970>, <ast.Attribute object at 0x7da1b1608f70>, <ast.Attribute object at 0x7da1b16084f0>, <ast.Attribute object at 0x7da1b1609990>, <ast.Attribute object at 0x7da1b1608130>, <ast.Attribute object at 0x7da1b1608610>]]] begin[:] return[constant[Copter]] if compare[name[hb].type equal[==] name[mavutil].mavlink.MAV_TYPE_HELICOPTER] begin[:] return[constant[Heli]] if compare[name[hb].type equal[==] name[mavutil].mavlink.MAV_TYPE_ANTENNA_TRACKER] begin[:] return[constant[Tracker]] return[binary_operation[constant[UNKNOWN(%u)] <ast.Mod object at 0x7da2590d6920> name[hb].type]]
keyword[def] identifier[vehicle_type_string] ( identifier[self] , identifier[hb] ): literal[string] keyword[if] identifier[hb] . identifier[type] == identifier[mavutil] . identifier[mavlink] . identifier[MAV_TYPE_FIXED_WING] : keyword[return] literal[string] keyword[if] identifier[hb] . identifier[type] == identifier[mavutil] . identifier[mavlink] . identifier[MAV_TYPE_GROUND_ROVER] : keyword[return] literal[string] keyword[if] identifier[hb] . identifier[type] == identifier[mavutil] . identifier[mavlink] . identifier[MAV_TYPE_SURFACE_BOAT] : keyword[return] literal[string] keyword[if] identifier[hb] . identifier[type] == identifier[mavutil] . identifier[mavlink] . identifier[MAV_TYPE_SUBMARINE] : keyword[return] literal[string] keyword[if] identifier[hb] . identifier[type] keyword[in] [ identifier[mavutil] . identifier[mavlink] . identifier[MAV_TYPE_QUADROTOR] , identifier[mavutil] . identifier[mavlink] . identifier[MAV_TYPE_COAXIAL] , identifier[mavutil] . identifier[mavlink] . identifier[MAV_TYPE_HEXAROTOR] , identifier[mavutil] . identifier[mavlink] . identifier[MAV_TYPE_OCTOROTOR] , identifier[mavutil] . identifier[mavlink] . identifier[MAV_TYPE_TRICOPTER] , identifier[mavutil] . identifier[mavlink] . identifier[MAV_TYPE_DODECAROTOR] ]: keyword[return] literal[string] keyword[if] identifier[hb] . identifier[type] == identifier[mavutil] . identifier[mavlink] . identifier[MAV_TYPE_HELICOPTER] : keyword[return] literal[string] keyword[if] identifier[hb] . identifier[type] == identifier[mavutil] . identifier[mavlink] . identifier[MAV_TYPE_ANTENNA_TRACKER] : keyword[return] literal[string] keyword[return] literal[string] % identifier[hb] . identifier[type]
def vehicle_type_string(self, hb): """return vehicle type string from a heartbeat""" if hb.type == mavutil.mavlink.MAV_TYPE_FIXED_WING: return 'Plane' # depends on [control=['if'], data=[]] if hb.type == mavutil.mavlink.MAV_TYPE_GROUND_ROVER: return 'Rover' # depends on [control=['if'], data=[]] if hb.type == mavutil.mavlink.MAV_TYPE_SURFACE_BOAT: return 'Boat' # depends on [control=['if'], data=[]] if hb.type == mavutil.mavlink.MAV_TYPE_SUBMARINE: return 'Sub' # depends on [control=['if'], data=[]] if hb.type in [mavutil.mavlink.MAV_TYPE_QUADROTOR, mavutil.mavlink.MAV_TYPE_COAXIAL, mavutil.mavlink.MAV_TYPE_HEXAROTOR, mavutil.mavlink.MAV_TYPE_OCTOROTOR, mavutil.mavlink.MAV_TYPE_TRICOPTER, mavutil.mavlink.MAV_TYPE_DODECAROTOR]: return 'Copter' # depends on [control=['if'], data=[]] if hb.type == mavutil.mavlink.MAV_TYPE_HELICOPTER: return 'Heli' # depends on [control=['if'], data=[]] if hb.type == mavutil.mavlink.MAV_TYPE_ANTENNA_TRACKER: return 'Tracker' # depends on [control=['if'], data=[]] return 'UNKNOWN(%u)' % hb.type
def headers_present(self, headers): """ Defines a list of headers that must be present in the outgoing request in order to satisfy the matcher, no matter what value the headers hosts. Header keys are case insensitive. Arguments: headers (list|tuple): header keys to match. Returns: self: current Mock instance. Example:: (pook.get('server.com/api') .headers_present(['content-type', 'Authorization'])) """ headers = {name: re.compile('(.*)') for name in headers} self.add_matcher(matcher('HeadersMatcher', headers))
def function[headers_present, parameter[self, headers]]: constant[ Defines a list of headers that must be present in the outgoing request in order to satisfy the matcher, no matter what value the headers hosts. Header keys are case insensitive. Arguments: headers (list|tuple): header keys to match. Returns: self: current Mock instance. Example:: (pook.get('server.com/api') .headers_present(['content-type', 'Authorization'])) ] variable[headers] assign[=] <ast.DictComp object at 0x7da1b0240af0> call[name[self].add_matcher, parameter[call[name[matcher], parameter[constant[HeadersMatcher], name[headers]]]]]
keyword[def] identifier[headers_present] ( identifier[self] , identifier[headers] ): literal[string] identifier[headers] ={ identifier[name] : identifier[re] . identifier[compile] ( literal[string] ) keyword[for] identifier[name] keyword[in] identifier[headers] } identifier[self] . identifier[add_matcher] ( identifier[matcher] ( literal[string] , identifier[headers] ))
def headers_present(self, headers): """ Defines a list of headers that must be present in the outgoing request in order to satisfy the matcher, no matter what value the headers hosts. Header keys are case insensitive. Arguments: headers (list|tuple): header keys to match. Returns: self: current Mock instance. Example:: (pook.get('server.com/api') .headers_present(['content-type', 'Authorization'])) """ headers = {name: re.compile('(.*)') for name in headers} self.add_matcher(matcher('HeadersMatcher', headers))
def scan_band(self, band, **kwargs): """Run Kalibrate for a band. Supported keyword arguments: gain -- Gain in dB device -- Index of device to be used error -- Initial frequency error in ppm """ kal_run_line = fn.build_kal_scan_band_string(self.kal_bin, band, kwargs) raw_output = subprocess.check_output(kal_run_line.split(' '), stderr=subprocess.STDOUT) kal_normalized = fn.parse_kal_scan(raw_output) return kal_normalized
def function[scan_band, parameter[self, band]]: constant[Run Kalibrate for a band. Supported keyword arguments: gain -- Gain in dB device -- Index of device to be used error -- Initial frequency error in ppm ] variable[kal_run_line] assign[=] call[name[fn].build_kal_scan_band_string, parameter[name[self].kal_bin, name[band], name[kwargs]]] variable[raw_output] assign[=] call[name[subprocess].check_output, parameter[call[name[kal_run_line].split, parameter[constant[ ]]]]] variable[kal_normalized] assign[=] call[name[fn].parse_kal_scan, parameter[name[raw_output]]] return[name[kal_normalized]]
keyword[def] identifier[scan_band] ( identifier[self] , identifier[band] ,** identifier[kwargs] ): literal[string] identifier[kal_run_line] = identifier[fn] . identifier[build_kal_scan_band_string] ( identifier[self] . identifier[kal_bin] , identifier[band] , identifier[kwargs] ) identifier[raw_output] = identifier[subprocess] . identifier[check_output] ( identifier[kal_run_line] . identifier[split] ( literal[string] ), identifier[stderr] = identifier[subprocess] . identifier[STDOUT] ) identifier[kal_normalized] = identifier[fn] . identifier[parse_kal_scan] ( identifier[raw_output] ) keyword[return] identifier[kal_normalized]
def scan_band(self, band, **kwargs): """Run Kalibrate for a band. Supported keyword arguments: gain -- Gain in dB device -- Index of device to be used error -- Initial frequency error in ppm """ kal_run_line = fn.build_kal_scan_band_string(self.kal_bin, band, kwargs) raw_output = subprocess.check_output(kal_run_line.split(' '), stderr=subprocess.STDOUT) kal_normalized = fn.parse_kal_scan(raw_output) return kal_normalized
def shared_prefix(args): """ Find the shared prefix between the strings. For instance: sharedPrefix(['blahblah', 'blahwhat']) returns 'blah'. """ i = 0 while i < min(map(len, args)): if len(set(map(operator.itemgetter(i), args))) != 1: break i += 1 return args[0][:i]
def function[shared_prefix, parameter[args]]: constant[ Find the shared prefix between the strings. For instance: sharedPrefix(['blahblah', 'blahwhat']) returns 'blah'. ] variable[i] assign[=] constant[0] while compare[name[i] less[<] call[name[min], parameter[call[name[map], parameter[name[len], name[args]]]]]] begin[:] if compare[call[name[len], parameter[call[name[set], parameter[call[name[map], parameter[call[name[operator].itemgetter, parameter[name[i]]], name[args]]]]]]] not_equal[!=] constant[1]] begin[:] break <ast.AugAssign object at 0x7da20e74b610> return[call[call[name[args]][constant[0]]][<ast.Slice object at 0x7da18bc708b0>]]
keyword[def] identifier[shared_prefix] ( identifier[args] ): literal[string] identifier[i] = literal[int] keyword[while] identifier[i] < identifier[min] ( identifier[map] ( identifier[len] , identifier[args] )): keyword[if] identifier[len] ( identifier[set] ( identifier[map] ( identifier[operator] . identifier[itemgetter] ( identifier[i] ), identifier[args] )))!= literal[int] : keyword[break] identifier[i] += literal[int] keyword[return] identifier[args] [ literal[int] ][: identifier[i] ]
def shared_prefix(args): """ Find the shared prefix between the strings. For instance: sharedPrefix(['blahblah', 'blahwhat']) returns 'blah'. """ i = 0 while i < min(map(len, args)): if len(set(map(operator.itemgetter(i), args))) != 1: break # depends on [control=['if'], data=[]] i += 1 # depends on [control=['while'], data=['i']] return args[0][:i]
def write(self, more): """Append the Unicode representation of `s` to our output.""" if more: self.output += str(more).upper() self.output += '\n'
def function[write, parameter[self, more]]: constant[Append the Unicode representation of `s` to our output.] if name[more] begin[:] <ast.AugAssign object at 0x7da1b1a943d0> <ast.AugAssign object at 0x7da1b1a94910>
keyword[def] identifier[write] ( identifier[self] , identifier[more] ): literal[string] keyword[if] identifier[more] : identifier[self] . identifier[output] += identifier[str] ( identifier[more] ). identifier[upper] () identifier[self] . identifier[output] += literal[string]
def write(self, more): """Append the Unicode representation of `s` to our output.""" if more: self.output += str(more).upper() self.output += '\n' # depends on [control=['if'], data=[]]
def layout_circle(self): '''Position vertices evenly around a circle.''' n = self.num_vertices() t = np.linspace(0, 2*np.pi, n+1)[:n] return np.column_stack((np.cos(t), np.sin(t)))
def function[layout_circle, parameter[self]]: constant[Position vertices evenly around a circle.] variable[n] assign[=] call[name[self].num_vertices, parameter[]] variable[t] assign[=] call[call[name[np].linspace, parameter[constant[0], binary_operation[constant[2] * name[np].pi], binary_operation[name[n] + constant[1]]]]][<ast.Slice object at 0x7da18bcc86d0>] return[call[name[np].column_stack, parameter[tuple[[<ast.Call object at 0x7da18bcc8a90>, <ast.Call object at 0x7da18bccbcd0>]]]]]
keyword[def] identifier[layout_circle] ( identifier[self] ): literal[string] identifier[n] = identifier[self] . identifier[num_vertices] () identifier[t] = identifier[np] . identifier[linspace] ( literal[int] , literal[int] * identifier[np] . identifier[pi] , identifier[n] + literal[int] )[: identifier[n] ] keyword[return] identifier[np] . identifier[column_stack] (( identifier[np] . identifier[cos] ( identifier[t] ), identifier[np] . identifier[sin] ( identifier[t] )))
def layout_circle(self): """Position vertices evenly around a circle.""" n = self.num_vertices() t = np.linspace(0, 2 * np.pi, n + 1)[:n] return np.column_stack((np.cos(t), np.sin(t)))
def merge(self): """ Merge contained schemas into one. @return: The merged schema. @rtype: L{Schema} """ if self.children: schema = self.children[0] for s in self.children[1:]: schema.merge(s) return schema
def function[merge, parameter[self]]: constant[ Merge contained schemas into one. @return: The merged schema. @rtype: L{Schema} ] if name[self].children begin[:] variable[schema] assign[=] call[name[self].children][constant[0]] for taget[name[s]] in starred[call[name[self].children][<ast.Slice object at 0x7da18f09e860>]] begin[:] call[name[schema].merge, parameter[name[s]]] return[name[schema]]
keyword[def] identifier[merge] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[children] : identifier[schema] = identifier[self] . identifier[children] [ literal[int] ] keyword[for] identifier[s] keyword[in] identifier[self] . identifier[children] [ literal[int] :]: identifier[schema] . identifier[merge] ( identifier[s] ) keyword[return] identifier[schema]
def merge(self): """ Merge contained schemas into one. @return: The merged schema. @rtype: L{Schema} """ if self.children: schema = self.children[0] for s in self.children[1:]: schema.merge(s) # depends on [control=['for'], data=['s']] return schema # depends on [control=['if'], data=[]]
def get_cached_parent_for_taxon(self, child_taxon): """If the taxa are being cached, this call will create a the lineage "spike" for taxon child_taxon Expecting child_taxon to have a non-empty _taxonomic_lineage with response dicts that can create an ancestral TaxonWrapper. """ if self._ott_id2taxon is None: resp = child_taxon._taxonomic_lineage[0] tl = child_taxon._taxonomic_lineage[1:] assert 'taxonomic_lineage' not in resp resp['taxonomic_lineage'] = tl return TaxonWrapper(taxonomy=child_taxon.taxonomy, taxomachine_wrapper=self._wr, prop_dict=resp) # TODO recursive (indirectly) else: anc = [] prev = None for resp in reversed(child_taxon._taxonomic_lineage): ott_id = resp['ot:ottId'] curr = self._ott_id2taxon.get(ott_id) if curr is None: assert 'taxonomic_lineage' not in resp assert 'parent' not in resp resp['parent'] = prev resp['taxonomic_lineage'] = anc curr = TaxonWrapper(taxonomy=child_taxon.taxonomy, taxomachine_wrapper=self._wr, prop_dict=resp) elif curr._parent is None and prev is not None: curr._parent = prev prev = curr anc.insert(0, curr) return prev
def function[get_cached_parent_for_taxon, parameter[self, child_taxon]]: constant[If the taxa are being cached, this call will create a the lineage "spike" for taxon child_taxon Expecting child_taxon to have a non-empty _taxonomic_lineage with response dicts that can create an ancestral TaxonWrapper. ] if compare[name[self]._ott_id2taxon is constant[None]] begin[:] variable[resp] assign[=] call[name[child_taxon]._taxonomic_lineage][constant[0]] variable[tl] assign[=] call[name[child_taxon]._taxonomic_lineage][<ast.Slice object at 0x7da18fe929e0>] assert[compare[constant[taxonomic_lineage] <ast.NotIn object at 0x7da2590d7190> name[resp]]] call[name[resp]][constant[taxonomic_lineage]] assign[=] name[tl] return[call[name[TaxonWrapper], parameter[]]]
keyword[def] identifier[get_cached_parent_for_taxon] ( identifier[self] , identifier[child_taxon] ): literal[string] keyword[if] identifier[self] . identifier[_ott_id2taxon] keyword[is] keyword[None] : identifier[resp] = identifier[child_taxon] . identifier[_taxonomic_lineage] [ literal[int] ] identifier[tl] = identifier[child_taxon] . identifier[_taxonomic_lineage] [ literal[int] :] keyword[assert] literal[string] keyword[not] keyword[in] identifier[resp] identifier[resp] [ literal[string] ]= identifier[tl] keyword[return] identifier[TaxonWrapper] ( identifier[taxonomy] = identifier[child_taxon] . identifier[taxonomy] , identifier[taxomachine_wrapper] = identifier[self] . identifier[_wr] , identifier[prop_dict] = identifier[resp] ) keyword[else] : identifier[anc] =[] identifier[prev] = keyword[None] keyword[for] identifier[resp] keyword[in] identifier[reversed] ( identifier[child_taxon] . identifier[_taxonomic_lineage] ): identifier[ott_id] = identifier[resp] [ literal[string] ] identifier[curr] = identifier[self] . identifier[_ott_id2taxon] . identifier[get] ( identifier[ott_id] ) keyword[if] identifier[curr] keyword[is] keyword[None] : keyword[assert] literal[string] keyword[not] keyword[in] identifier[resp] keyword[assert] literal[string] keyword[not] keyword[in] identifier[resp] identifier[resp] [ literal[string] ]= identifier[prev] identifier[resp] [ literal[string] ]= identifier[anc] identifier[curr] = identifier[TaxonWrapper] ( identifier[taxonomy] = identifier[child_taxon] . identifier[taxonomy] , identifier[taxomachine_wrapper] = identifier[self] . identifier[_wr] , identifier[prop_dict] = identifier[resp] ) keyword[elif] identifier[curr] . identifier[_parent] keyword[is] keyword[None] keyword[and] identifier[prev] keyword[is] keyword[not] keyword[None] : identifier[curr] . identifier[_parent] = identifier[prev] identifier[prev] = identifier[curr] identifier[anc] . identifier[insert] ( literal[int] , identifier[curr] ) keyword[return] identifier[prev]
def get_cached_parent_for_taxon(self, child_taxon): """If the taxa are being cached, this call will create a the lineage "spike" for taxon child_taxon Expecting child_taxon to have a non-empty _taxonomic_lineage with response dicts that can create an ancestral TaxonWrapper. """ if self._ott_id2taxon is None: resp = child_taxon._taxonomic_lineage[0] tl = child_taxon._taxonomic_lineage[1:] assert 'taxonomic_lineage' not in resp resp['taxonomic_lineage'] = tl return TaxonWrapper(taxonomy=child_taxon.taxonomy, taxomachine_wrapper=self._wr, prop_dict=resp) # TODO recursive (indirectly) # depends on [control=['if'], data=[]] else: anc = [] prev = None for resp in reversed(child_taxon._taxonomic_lineage): ott_id = resp['ot:ottId'] curr = self._ott_id2taxon.get(ott_id) if curr is None: assert 'taxonomic_lineage' not in resp assert 'parent' not in resp resp['parent'] = prev resp['taxonomic_lineage'] = anc curr = TaxonWrapper(taxonomy=child_taxon.taxonomy, taxomachine_wrapper=self._wr, prop_dict=resp) # depends on [control=['if'], data=['curr']] elif curr._parent is None and prev is not None: curr._parent = prev # depends on [control=['if'], data=[]] prev = curr anc.insert(0, curr) # depends on [control=['for'], data=['resp']] return prev
def _unwrap(variable_parts: VariablePartsType): """ Yield URL parts. The given parts are usually in reverse order. """ curr_parts = variable_parts var_any = [] while curr_parts: curr_parts, (var_type, part) = curr_parts if var_type == Routes._VAR_ANY_NODE: var_any.append(part) continue if var_type == Routes._VAR_ANY_BREAK: if var_any: yield tuple(reversed(var_any)) var_any.clear() var_any.append(part) continue if var_any: yield tuple(reversed(var_any)) var_any.clear() yield part continue yield part if var_any: yield tuple(reversed(var_any))
def function[_unwrap, parameter[variable_parts]]: constant[ Yield URL parts. The given parts are usually in reverse order. ] variable[curr_parts] assign[=] name[variable_parts] variable[var_any] assign[=] list[[]] while name[curr_parts] begin[:] <ast.Tuple object at 0x7da20c6c6ef0> assign[=] name[curr_parts] if compare[name[var_type] equal[==] name[Routes]._VAR_ANY_NODE] begin[:] call[name[var_any].append, parameter[name[part]]] continue if compare[name[var_type] equal[==] name[Routes]._VAR_ANY_BREAK] begin[:] if name[var_any] begin[:] <ast.Yield object at 0x7da20c6c5030> call[name[var_any].clear, parameter[]] call[name[var_any].append, parameter[name[part]]] continue if name[var_any] begin[:] <ast.Yield object at 0x7da20c6c6710> call[name[var_any].clear, parameter[]] <ast.Yield object at 0x7da20c6c7cd0> continue <ast.Yield object at 0x7da20c6c5a20> if name[var_any] begin[:] <ast.Yield object at 0x7da20c6c57e0>
keyword[def] identifier[_unwrap] ( identifier[variable_parts] : identifier[VariablePartsType] ): literal[string] identifier[curr_parts] = identifier[variable_parts] identifier[var_any] =[] keyword[while] identifier[curr_parts] : identifier[curr_parts] ,( identifier[var_type] , identifier[part] )= identifier[curr_parts] keyword[if] identifier[var_type] == identifier[Routes] . identifier[_VAR_ANY_NODE] : identifier[var_any] . identifier[append] ( identifier[part] ) keyword[continue] keyword[if] identifier[var_type] == identifier[Routes] . identifier[_VAR_ANY_BREAK] : keyword[if] identifier[var_any] : keyword[yield] identifier[tuple] ( identifier[reversed] ( identifier[var_any] )) identifier[var_any] . identifier[clear] () identifier[var_any] . identifier[append] ( identifier[part] ) keyword[continue] keyword[if] identifier[var_any] : keyword[yield] identifier[tuple] ( identifier[reversed] ( identifier[var_any] )) identifier[var_any] . identifier[clear] () keyword[yield] identifier[part] keyword[continue] keyword[yield] identifier[part] keyword[if] identifier[var_any] : keyword[yield] identifier[tuple] ( identifier[reversed] ( identifier[var_any] ))
def _unwrap(variable_parts: VariablePartsType): """ Yield URL parts. The given parts are usually in reverse order. """ curr_parts = variable_parts var_any = [] while curr_parts: (curr_parts, (var_type, part)) = curr_parts if var_type == Routes._VAR_ANY_NODE: var_any.append(part) continue # depends on [control=['if'], data=[]] if var_type == Routes._VAR_ANY_BREAK: if var_any: yield tuple(reversed(var_any)) var_any.clear() # depends on [control=['if'], data=[]] var_any.append(part) continue # depends on [control=['if'], data=[]] if var_any: yield tuple(reversed(var_any)) var_any.clear() yield part continue # depends on [control=['if'], data=[]] yield part # depends on [control=['while'], data=[]] if var_any: yield tuple(reversed(var_any)) # depends on [control=['if'], data=[]]
def precess_coordinates(ra, dec, epoch_one, epoch_two, jd=None, mu_ra=0.0, mu_dec=0.0, outscalar=False): '''Precesses target coordinates `ra`, `dec` from `epoch_one` to `epoch_two`. This takes into account the jd of the observations, as well as the proper motion of the target mu_ra, mu_dec. Adapted from J. D. Hartman's VARTOOLS/converttime.c [coordprecess]. Parameters ---------- ra,dec : float The equatorial coordinates of the object at `epoch_one` to precess in decimal degrees. epoch_one : float Origin epoch to precess from to target epoch. This is a float, like: 1985.0, 2000.0, etc. epoch_two : float Target epoch to precess from origin epoch. This is a float, like: 2000.0, 2018.0, etc. jd : float The full Julian date to use along with the propermotions in `mu_ra`, and `mu_dec` to handle proper motion along with the coordinate frame precession. If one of `jd`, `mu_ra`, or `mu_dec` is missing, the proper motion will not be used to calculate the final precessed coordinates. mu_ra,mu_dec : float The proper motion in mas/yr in right ascension and declination. If these are provided along with `jd`, the total proper motion of the object will be taken into account to calculate the final precessed coordinates. outscalar : bool If True, converts the output coordinates from one-element np.arrays to scalars. Returns ------- precessed_ra, precessed_dec : float A tuple of precessed equatorial coordinates in decimal degrees at `epoch_two` taking into account proper motion if `jd`, `mu_ra`, and `mu_dec` are provided. ''' raproc, decproc = np.radians(ra), np.radians(dec) if ((mu_ra != 0.0) and (mu_dec != 0.0) and jd): jd_epoch_one = JD2000 + (epoch_one - epoch_two)*365.25 raproc = ( raproc + (jd - jd_epoch_one)*mu_ra*MAS_P_YR_TO_RAD_P_DAY/np.cos(decproc) ) decproc = decproc + (jd - jd_epoch_one)*mu_dec*MAS_P_YR_TO_RAD_P_DAY ca = np.cos(raproc) cd = np.cos(decproc) sa = np.sin(raproc) sd = np.sin(decproc) if epoch_one != epoch_two: t1 = 1.0e-3 * (epoch_two - epoch_one) t2 = 1.0e-3 * (epoch_one - 2000.0) a = ( t1*ARCSEC_TO_RADIANS * (23062.181 + t2*(139.656 + 0.0139*t2) + t1*(30.188 - 0.344*t2+17.998*t1)) ) b = t1*t1*ARCSEC_TO_RADIANS*(79.280 + 0.410*t2 + 0.205*t1) + a c = ( ARCSEC_TO_RADIANS*t1*(20043.109 - t2*(85.33 + 0.217*t2) + t1*(-42.665 - 0.217*t2 - 41.833*t2)) ) sina, sinb, sinc = np.sin(a), np.sin(b), np.sin(c) cosa, cosb, cosc = np.cos(a), np.cos(b), np.cos(c) precmatrix = np.matrix([[cosa*cosb*cosc - sina*sinb, sina*cosb + cosa*sinb*cosc, cosa*sinc], [-cosa*sinb - sina*cosb*cosc, cosa*cosb - sina*sinb*cosc, -sina*sinc], [-cosb*sinc, -sinb*sinc, cosc]]) precmatrix = precmatrix.transpose() x = (np.matrix([cd*ca, cd*sa, sd])).transpose() x2 = precmatrix * x outra = np.arctan2(x2[1],x2[0]) outdec = np.arcsin(x2[2]) outradeg = np.rad2deg(outra) outdecdeg = np.rad2deg(outdec) if outradeg < 0.0: outradeg = outradeg + 360.0 if outscalar: return float(outradeg), float(outdecdeg) else: return outradeg, outdecdeg else: # if the epochs are the same and no proper motion, this will be the same # as the input values. if the epochs are the same, but there IS proper # motion (and a given JD), then these will be perturbed from the input # values of ra, dec by the appropriate amount of motion return np.degrees(raproc), np.degrees(decproc)
def function[precess_coordinates, parameter[ra, dec, epoch_one, epoch_two, jd, mu_ra, mu_dec, outscalar]]: constant[Precesses target coordinates `ra`, `dec` from `epoch_one` to `epoch_two`. This takes into account the jd of the observations, as well as the proper motion of the target mu_ra, mu_dec. Adapted from J. D. Hartman's VARTOOLS/converttime.c [coordprecess]. Parameters ---------- ra,dec : float The equatorial coordinates of the object at `epoch_one` to precess in decimal degrees. epoch_one : float Origin epoch to precess from to target epoch. This is a float, like: 1985.0, 2000.0, etc. epoch_two : float Target epoch to precess from origin epoch. This is a float, like: 2000.0, 2018.0, etc. jd : float The full Julian date to use along with the propermotions in `mu_ra`, and `mu_dec` to handle proper motion along with the coordinate frame precession. If one of `jd`, `mu_ra`, or `mu_dec` is missing, the proper motion will not be used to calculate the final precessed coordinates. mu_ra,mu_dec : float The proper motion in mas/yr in right ascension and declination. If these are provided along with `jd`, the total proper motion of the object will be taken into account to calculate the final precessed coordinates. outscalar : bool If True, converts the output coordinates from one-element np.arrays to scalars. Returns ------- precessed_ra, precessed_dec : float A tuple of precessed equatorial coordinates in decimal degrees at `epoch_two` taking into account proper motion if `jd`, `mu_ra`, and `mu_dec` are provided. ] <ast.Tuple object at 0x7da1afff7c40> assign[=] tuple[[<ast.Call object at 0x7da1afff7b80>, <ast.Call object at 0x7da1afff7ac0>]] if <ast.BoolOp object at 0x7da1afff79d0> begin[:] variable[jd_epoch_one] assign[=] binary_operation[name[JD2000] + binary_operation[binary_operation[name[epoch_one] - name[epoch_two]] * constant[365.25]]] variable[raproc] assign[=] binary_operation[name[raproc] + binary_operation[binary_operation[binary_operation[binary_operation[name[jd] - name[jd_epoch_one]] * name[mu_ra]] * name[MAS_P_YR_TO_RAD_P_DAY]] / call[name[np].cos, parameter[name[decproc]]]]] variable[decproc] assign[=] binary_operation[name[decproc] + binary_operation[binary_operation[binary_operation[name[jd] - name[jd_epoch_one]] * name[mu_dec]] * name[MAS_P_YR_TO_RAD_P_DAY]]] variable[ca] assign[=] call[name[np].cos, parameter[name[raproc]]] variable[cd] assign[=] call[name[np].cos, parameter[name[decproc]]] variable[sa] assign[=] call[name[np].sin, parameter[name[raproc]]] variable[sd] assign[=] call[name[np].sin, parameter[name[decproc]]] if compare[name[epoch_one] not_equal[!=] name[epoch_two]] begin[:] variable[t1] assign[=] binary_operation[constant[0.001] * binary_operation[name[epoch_two] - name[epoch_one]]] variable[t2] assign[=] binary_operation[constant[0.001] * binary_operation[name[epoch_one] - constant[2000.0]]] variable[a] assign[=] binary_operation[binary_operation[name[t1] * name[ARCSEC_TO_RADIANS]] * binary_operation[binary_operation[constant[23062.181] + binary_operation[name[t2] * binary_operation[constant[139.656] + binary_operation[constant[0.0139] * name[t2]]]]] + binary_operation[name[t1] * binary_operation[binary_operation[constant[30.188] - binary_operation[constant[0.344] * name[t2]]] + binary_operation[constant[17.998] * name[t1]]]]]] variable[b] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[t1] * name[t1]] * name[ARCSEC_TO_RADIANS]] * binary_operation[binary_operation[constant[79.28] + binary_operation[constant[0.41] * name[t2]]] + binary_operation[constant[0.205] * name[t1]]]] + name[a]] variable[c] assign[=] binary_operation[binary_operation[name[ARCSEC_TO_RADIANS] * name[t1]] * binary_operation[binary_operation[constant[20043.109] - binary_operation[name[t2] * binary_operation[constant[85.33] + binary_operation[constant[0.217] * name[t2]]]]] + binary_operation[name[t1] * binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1afff6da0> - binary_operation[constant[0.217] * name[t2]]] - binary_operation[constant[41.833] * name[t2]]]]]] <ast.Tuple object at 0x7da1afff6f50> assign[=] tuple[[<ast.Call object at 0x7da1afff7040>, <ast.Call object at 0x7da1afff7100>, <ast.Call object at 0x7da1afff71c0>]] <ast.Tuple object at 0x7da1afff72b0> assign[=] tuple[[<ast.Call object at 0x7da1afff7850>, <ast.Call object at 0x7da1afff7790>, <ast.Call object at 0x7da1afff76d0>]] variable[precmatrix] assign[=] call[name[np].matrix, parameter[list[[<ast.List object at 0x7da1afff74f0>, <ast.List object at 0x7da1aff1f670>, <ast.List object at 0x7da1aff1fd30>]]]] variable[precmatrix] assign[=] call[name[precmatrix].transpose, parameter[]] variable[x] assign[=] call[call[name[np].matrix, parameter[list[[<ast.BinOp object at 0x7da1aff1f7f0>, <ast.BinOp object at 0x7da1aff1f760>, <ast.Name object at 0x7da1aff1f490>]]]].transpose, parameter[]] variable[x2] assign[=] binary_operation[name[precmatrix] * name[x]] variable[outra] assign[=] call[name[np].arctan2, parameter[call[name[x2]][constant[1]], call[name[x2]][constant[0]]]] variable[outdec] assign[=] call[name[np].arcsin, parameter[call[name[x2]][constant[2]]]] variable[outradeg] assign[=] call[name[np].rad2deg, parameter[name[outra]]] variable[outdecdeg] assign[=] call[name[np].rad2deg, parameter[name[outdec]]] if compare[name[outradeg] less[<] constant[0.0]] begin[:] variable[outradeg] assign[=] binary_operation[name[outradeg] + constant[360.0]] if name[outscalar] begin[:] return[tuple[[<ast.Call object at 0x7da1aff368f0>, <ast.Call object at 0x7da1aff36800>]]]
keyword[def] identifier[precess_coordinates] ( identifier[ra] , identifier[dec] , identifier[epoch_one] , identifier[epoch_two] , identifier[jd] = keyword[None] , identifier[mu_ra] = literal[int] , identifier[mu_dec] = literal[int] , identifier[outscalar] = keyword[False] ): literal[string] identifier[raproc] , identifier[decproc] = identifier[np] . identifier[radians] ( identifier[ra] ), identifier[np] . identifier[radians] ( identifier[dec] ) keyword[if] (( identifier[mu_ra] != literal[int] ) keyword[and] ( identifier[mu_dec] != literal[int] ) keyword[and] identifier[jd] ): identifier[jd_epoch_one] = identifier[JD2000] +( identifier[epoch_one] - identifier[epoch_two] )* literal[int] identifier[raproc] =( identifier[raproc] + ( identifier[jd] - identifier[jd_epoch_one] )* identifier[mu_ra] * identifier[MAS_P_YR_TO_RAD_P_DAY] / identifier[np] . identifier[cos] ( identifier[decproc] ) ) identifier[decproc] = identifier[decproc] +( identifier[jd] - identifier[jd_epoch_one] )* identifier[mu_dec] * identifier[MAS_P_YR_TO_RAD_P_DAY] identifier[ca] = identifier[np] . identifier[cos] ( identifier[raproc] ) identifier[cd] = identifier[np] . identifier[cos] ( identifier[decproc] ) identifier[sa] = identifier[np] . identifier[sin] ( identifier[raproc] ) identifier[sd] = identifier[np] . identifier[sin] ( identifier[decproc] ) keyword[if] identifier[epoch_one] != identifier[epoch_two] : identifier[t1] = literal[int] *( identifier[epoch_two] - identifier[epoch_one] ) identifier[t2] = literal[int] *( identifier[epoch_one] - literal[int] ) identifier[a] =( identifier[t1] * identifier[ARCSEC_TO_RADIANS] *( literal[int] + identifier[t2] *( literal[int] + literal[int] * identifier[t2] )+ identifier[t1] *( literal[int] - literal[int] * identifier[t2] + literal[int] * identifier[t1] ))) identifier[b] = identifier[t1] * identifier[t1] * identifier[ARCSEC_TO_RADIANS] *( literal[int] + literal[int] * identifier[t2] + literal[int] * identifier[t1] )+ identifier[a] identifier[c] =( identifier[ARCSEC_TO_RADIANS] * identifier[t1] *( literal[int] - identifier[t2] *( literal[int] + literal[int] * identifier[t2] )+ identifier[t1] *(- literal[int] - literal[int] * identifier[t2] - literal[int] * identifier[t2] )) ) identifier[sina] , identifier[sinb] , identifier[sinc] = identifier[np] . identifier[sin] ( identifier[a] ), identifier[np] . identifier[sin] ( identifier[b] ), identifier[np] . identifier[sin] ( identifier[c] ) identifier[cosa] , identifier[cosb] , identifier[cosc] = identifier[np] . identifier[cos] ( identifier[a] ), identifier[np] . identifier[cos] ( identifier[b] ), identifier[np] . identifier[cos] ( identifier[c] ) identifier[precmatrix] = identifier[np] . identifier[matrix] ([[ identifier[cosa] * identifier[cosb] * identifier[cosc] - identifier[sina] * identifier[sinb] , identifier[sina] * identifier[cosb] + identifier[cosa] * identifier[sinb] * identifier[cosc] , identifier[cosa] * identifier[sinc] ], [- identifier[cosa] * identifier[sinb] - identifier[sina] * identifier[cosb] * identifier[cosc] , identifier[cosa] * identifier[cosb] - identifier[sina] * identifier[sinb] * identifier[cosc] , - identifier[sina] * identifier[sinc] ], [- identifier[cosb] * identifier[sinc] , - identifier[sinb] * identifier[sinc] , identifier[cosc] ]]) identifier[precmatrix] = identifier[precmatrix] . identifier[transpose] () identifier[x] =( identifier[np] . identifier[matrix] ([ identifier[cd] * identifier[ca] , identifier[cd] * identifier[sa] , identifier[sd] ])). identifier[transpose] () identifier[x2] = identifier[precmatrix] * identifier[x] identifier[outra] = identifier[np] . identifier[arctan2] ( identifier[x2] [ literal[int] ], identifier[x2] [ literal[int] ]) identifier[outdec] = identifier[np] . identifier[arcsin] ( identifier[x2] [ literal[int] ]) identifier[outradeg] = identifier[np] . identifier[rad2deg] ( identifier[outra] ) identifier[outdecdeg] = identifier[np] . identifier[rad2deg] ( identifier[outdec] ) keyword[if] identifier[outradeg] < literal[int] : identifier[outradeg] = identifier[outradeg] + literal[int] keyword[if] identifier[outscalar] : keyword[return] identifier[float] ( identifier[outradeg] ), identifier[float] ( identifier[outdecdeg] ) keyword[else] : keyword[return] identifier[outradeg] , identifier[outdecdeg] keyword[else] : keyword[return] identifier[np] . identifier[degrees] ( identifier[raproc] ), identifier[np] . identifier[degrees] ( identifier[decproc] )
def precess_coordinates(ra, dec, epoch_one, epoch_two, jd=None, mu_ra=0.0, mu_dec=0.0, outscalar=False): """Precesses target coordinates `ra`, `dec` from `epoch_one` to `epoch_two`. This takes into account the jd of the observations, as well as the proper motion of the target mu_ra, mu_dec. Adapted from J. D. Hartman's VARTOOLS/converttime.c [coordprecess]. Parameters ---------- ra,dec : float The equatorial coordinates of the object at `epoch_one` to precess in decimal degrees. epoch_one : float Origin epoch to precess from to target epoch. This is a float, like: 1985.0, 2000.0, etc. epoch_two : float Target epoch to precess from origin epoch. This is a float, like: 2000.0, 2018.0, etc. jd : float The full Julian date to use along with the propermotions in `mu_ra`, and `mu_dec` to handle proper motion along with the coordinate frame precession. If one of `jd`, `mu_ra`, or `mu_dec` is missing, the proper motion will not be used to calculate the final precessed coordinates. mu_ra,mu_dec : float The proper motion in mas/yr in right ascension and declination. If these are provided along with `jd`, the total proper motion of the object will be taken into account to calculate the final precessed coordinates. outscalar : bool If True, converts the output coordinates from one-element np.arrays to scalars. Returns ------- precessed_ra, precessed_dec : float A tuple of precessed equatorial coordinates in decimal degrees at `epoch_two` taking into account proper motion if `jd`, `mu_ra`, and `mu_dec` are provided. """ (raproc, decproc) = (np.radians(ra), np.radians(dec)) if mu_ra != 0.0 and mu_dec != 0.0 and jd: jd_epoch_one = JD2000 + (epoch_one - epoch_two) * 365.25 raproc = raproc + (jd - jd_epoch_one) * mu_ra * MAS_P_YR_TO_RAD_P_DAY / np.cos(decproc) decproc = decproc + (jd - jd_epoch_one) * mu_dec * MAS_P_YR_TO_RAD_P_DAY # depends on [control=['if'], data=[]] ca = np.cos(raproc) cd = np.cos(decproc) sa = np.sin(raproc) sd = np.sin(decproc) if epoch_one != epoch_two: t1 = 0.001 * (epoch_two - epoch_one) t2 = 0.001 * (epoch_one - 2000.0) a = t1 * ARCSEC_TO_RADIANS * (23062.181 + t2 * (139.656 + 0.0139 * t2) + t1 * (30.188 - 0.344 * t2 + 17.998 * t1)) b = t1 * t1 * ARCSEC_TO_RADIANS * (79.28 + 0.41 * t2 + 0.205 * t1) + a c = ARCSEC_TO_RADIANS * t1 * (20043.109 - t2 * (85.33 + 0.217 * t2) + t1 * (-42.665 - 0.217 * t2 - 41.833 * t2)) (sina, sinb, sinc) = (np.sin(a), np.sin(b), np.sin(c)) (cosa, cosb, cosc) = (np.cos(a), np.cos(b), np.cos(c)) precmatrix = np.matrix([[cosa * cosb * cosc - sina * sinb, sina * cosb + cosa * sinb * cosc, cosa * sinc], [-cosa * sinb - sina * cosb * cosc, cosa * cosb - sina * sinb * cosc, -sina * sinc], [-cosb * sinc, -sinb * sinc, cosc]]) precmatrix = precmatrix.transpose() x = np.matrix([cd * ca, cd * sa, sd]).transpose() x2 = precmatrix * x outra = np.arctan2(x2[1], x2[0]) outdec = np.arcsin(x2[2]) outradeg = np.rad2deg(outra) outdecdeg = np.rad2deg(outdec) if outradeg < 0.0: outradeg = outradeg + 360.0 # depends on [control=['if'], data=['outradeg']] if outscalar: return (float(outradeg), float(outdecdeg)) # depends on [control=['if'], data=[]] else: return (outradeg, outdecdeg) # depends on [control=['if'], data=['epoch_one', 'epoch_two']] else: # if the epochs are the same and no proper motion, this will be the same # as the input values. if the epochs are the same, but there IS proper # motion (and a given JD), then these will be perturbed from the input # values of ra, dec by the appropriate amount of motion return (np.degrees(raproc), np.degrees(decproc))
def make_unpublished(self, request, queryset): """ Marks selected news items as unpublished """ rows_updated = queryset.update(is_published=False) self.message_user(request, ungettext('%(count)d newsitem was unpublished', '%(count)d newsitems were unpublished', rows_updated) % {'count': rows_updated})
def function[make_unpublished, parameter[self, request, queryset]]: constant[ Marks selected news items as unpublished ] variable[rows_updated] assign[=] call[name[queryset].update, parameter[]] call[name[self].message_user, parameter[name[request], binary_operation[call[name[ungettext], parameter[constant[%(count)d newsitem was unpublished], constant[%(count)d newsitems were unpublished], name[rows_updated]]] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da1b04185b0>], [<ast.Name object at 0x7da1b041bee0>]]]]]
keyword[def] identifier[make_unpublished] ( identifier[self] , identifier[request] , identifier[queryset] ): literal[string] identifier[rows_updated] = identifier[queryset] . identifier[update] ( identifier[is_published] = keyword[False] ) identifier[self] . identifier[message_user] ( identifier[request] , identifier[ungettext] ( literal[string] , literal[string] , identifier[rows_updated] )%{ literal[string] : identifier[rows_updated] })
def make_unpublished(self, request, queryset): """ Marks selected news items as unpublished """ rows_updated = queryset.update(is_published=False) self.message_user(request, ungettext('%(count)d newsitem was unpublished', '%(count)d newsitems were unpublished', rows_updated) % {'count': rows_updated})
def translate(self, desired_locale=None): """Translate this message to the desired locale. :param desired_locale: The desired locale to translate the message to, if no locale is provided the message will be translated to the system's default locale. :returns: the translated message in unicode """ translated_message = Message._translate_msgid(self.msgid, self.domain, desired_locale) if self.params is None: # No need for more translation return translated_message # This Message object may have been formatted with one or more # Message objects as substitution arguments, given either as a single # argument, part of a tuple, or as one or more values in a dictionary. # When translating this Message we need to translate those Messages too translated_params = _translate_args(self.params, desired_locale) translated_message = translated_message % translated_params return translated_message
def function[translate, parameter[self, desired_locale]]: constant[Translate this message to the desired locale. :param desired_locale: The desired locale to translate the message to, if no locale is provided the message will be translated to the system's default locale. :returns: the translated message in unicode ] variable[translated_message] assign[=] call[name[Message]._translate_msgid, parameter[name[self].msgid, name[self].domain, name[desired_locale]]] if compare[name[self].params is constant[None]] begin[:] return[name[translated_message]] variable[translated_params] assign[=] call[name[_translate_args], parameter[name[self].params, name[desired_locale]]] variable[translated_message] assign[=] binary_operation[name[translated_message] <ast.Mod object at 0x7da2590d6920> name[translated_params]] return[name[translated_message]]
keyword[def] identifier[translate] ( identifier[self] , identifier[desired_locale] = keyword[None] ): literal[string] identifier[translated_message] = identifier[Message] . identifier[_translate_msgid] ( identifier[self] . identifier[msgid] , identifier[self] . identifier[domain] , identifier[desired_locale] ) keyword[if] identifier[self] . identifier[params] keyword[is] keyword[None] : keyword[return] identifier[translated_message] identifier[translated_params] = identifier[_translate_args] ( identifier[self] . identifier[params] , identifier[desired_locale] ) identifier[translated_message] = identifier[translated_message] % identifier[translated_params] keyword[return] identifier[translated_message]
def translate(self, desired_locale=None): """Translate this message to the desired locale. :param desired_locale: The desired locale to translate the message to, if no locale is provided the message will be translated to the system's default locale. :returns: the translated message in unicode """ translated_message = Message._translate_msgid(self.msgid, self.domain, desired_locale) if self.params is None: # No need for more translation return translated_message # depends on [control=['if'], data=[]] # This Message object may have been formatted with one or more # Message objects as substitution arguments, given either as a single # argument, part of a tuple, or as one or more values in a dictionary. # When translating this Message we need to translate those Messages too translated_params = _translate_args(self.params, desired_locale) translated_message = translated_message % translated_params return translated_message
def filesFromHere_explore(self, astr_startPath = '/'): """ Return a list of path/files from "here" in the stree, using the child explore access. :param astr_startPath: path from which to start :return: """ self.l_fwd = [] self.treeExplore(startPath = astr_startPath, f=self.fwd) self.l_allFiles = [f.split('/') for f in self.l_fwd] for i in range(0, len(self.l_allFiles)): self.l_allFiles[i][0] = '/' return self.l_fwd
def function[filesFromHere_explore, parameter[self, astr_startPath]]: constant[ Return a list of path/files from "here" in the stree, using the child explore access. :param astr_startPath: path from which to start :return: ] name[self].l_fwd assign[=] list[[]] call[name[self].treeExplore, parameter[]] name[self].l_allFiles assign[=] <ast.ListComp object at 0x7da1b09df4f0> for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[self].l_allFiles]]]]] begin[:] call[call[name[self].l_allFiles][name[i]]][constant[0]] assign[=] constant[/] return[name[self].l_fwd]
keyword[def] identifier[filesFromHere_explore] ( identifier[self] , identifier[astr_startPath] = literal[string] ): literal[string] identifier[self] . identifier[l_fwd] =[] identifier[self] . identifier[treeExplore] ( identifier[startPath] = identifier[astr_startPath] , identifier[f] = identifier[self] . identifier[fwd] ) identifier[self] . identifier[l_allFiles] =[ identifier[f] . identifier[split] ( literal[string] ) keyword[for] identifier[f] keyword[in] identifier[self] . identifier[l_fwd] ] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[self] . identifier[l_allFiles] )): identifier[self] . identifier[l_allFiles] [ identifier[i] ][ literal[int] ]= literal[string] keyword[return] identifier[self] . identifier[l_fwd]
def filesFromHere_explore(self, astr_startPath='/'): """ Return a list of path/files from "here" in the stree, using the child explore access. :param astr_startPath: path from which to start :return: """ self.l_fwd = [] self.treeExplore(startPath=astr_startPath, f=self.fwd) self.l_allFiles = [f.split('/') for f in self.l_fwd] for i in range(0, len(self.l_allFiles)): self.l_allFiles[i][0] = '/' # depends on [control=['for'], data=['i']] return self.l_fwd
def _parse_value(self): # type: () -> Item """ Attempts to parse a value at the current position. """ self.mark() c = self._current trivia = Trivia() if c == StringType.SLB.value: return self._parse_basic_string() elif c == StringType.SLL.value: return self._parse_literal_string() elif c == BoolType.TRUE.value[0]: return self._parse_true() elif c == BoolType.FALSE.value[0]: return self._parse_false() elif c == "[": return self._parse_array() elif c == "{": return self._parse_inline_table() elif c in "+-" or self._peek(4) in { "+inf", "-inf", "inf", "+nan", "-nan", "nan", }: # Number while self._current not in " \t\n\r#,]}" and self.inc(): pass raw = self.extract() item = self._parse_number(raw, trivia) if item is not None: return item raise self.parse_error(InvalidNumberError) elif c in string.digits: # Integer, Float, Date, Time or DateTime while self._current not in " \t\n\r#,]}" and self.inc(): pass raw = self.extract() m = RFC_3339_LOOSE.match(raw) if m: if m.group(1) and m.group(5): # datetime try: return DateTime(parse_rfc3339(raw), trivia, raw) except ValueError: raise self.parse_error(InvalidDateTimeError) if m.group(1): try: return Date(parse_rfc3339(raw), trivia, raw) except ValueError: raise self.parse_error(InvalidDateError) if m.group(5): try: return Time(parse_rfc3339(raw), trivia, raw) except ValueError: raise self.parse_error(InvalidTimeError) item = self._parse_number(raw, trivia) if item is not None: return item raise self.parse_error(InvalidNumberError) else: raise self.parse_error(UnexpectedCharError, c)
def function[_parse_value, parameter[self]]: constant[ Attempts to parse a value at the current position. ] call[name[self].mark, parameter[]] variable[c] assign[=] name[self]._current variable[trivia] assign[=] call[name[Trivia], parameter[]] if compare[name[c] equal[==] name[StringType].SLB.value] begin[:] return[call[name[self]._parse_basic_string, parameter[]]]
keyword[def] identifier[_parse_value] ( identifier[self] ): literal[string] identifier[self] . identifier[mark] () identifier[c] = identifier[self] . identifier[_current] identifier[trivia] = identifier[Trivia] () keyword[if] identifier[c] == identifier[StringType] . identifier[SLB] . identifier[value] : keyword[return] identifier[self] . identifier[_parse_basic_string] () keyword[elif] identifier[c] == identifier[StringType] . identifier[SLL] . identifier[value] : keyword[return] identifier[self] . identifier[_parse_literal_string] () keyword[elif] identifier[c] == identifier[BoolType] . identifier[TRUE] . identifier[value] [ literal[int] ]: keyword[return] identifier[self] . identifier[_parse_true] () keyword[elif] identifier[c] == identifier[BoolType] . identifier[FALSE] . identifier[value] [ literal[int] ]: keyword[return] identifier[self] . identifier[_parse_false] () keyword[elif] identifier[c] == literal[string] : keyword[return] identifier[self] . identifier[_parse_array] () keyword[elif] identifier[c] == literal[string] : keyword[return] identifier[self] . identifier[_parse_inline_table] () keyword[elif] identifier[c] keyword[in] literal[string] keyword[or] identifier[self] . identifier[_peek] ( literal[int] ) keyword[in] { literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , }: keyword[while] identifier[self] . identifier[_current] keyword[not] keyword[in] literal[string] keyword[and] identifier[self] . identifier[inc] (): keyword[pass] identifier[raw] = identifier[self] . identifier[extract] () identifier[item] = identifier[self] . identifier[_parse_number] ( identifier[raw] , identifier[trivia] ) keyword[if] identifier[item] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[item] keyword[raise] identifier[self] . identifier[parse_error] ( identifier[InvalidNumberError] ) keyword[elif] identifier[c] keyword[in] identifier[string] . identifier[digits] : keyword[while] identifier[self] . identifier[_current] keyword[not] keyword[in] literal[string] keyword[and] identifier[self] . identifier[inc] (): keyword[pass] identifier[raw] = identifier[self] . identifier[extract] () identifier[m] = identifier[RFC_3339_LOOSE] . identifier[match] ( identifier[raw] ) keyword[if] identifier[m] : keyword[if] identifier[m] . identifier[group] ( literal[int] ) keyword[and] identifier[m] . identifier[group] ( literal[int] ): keyword[try] : keyword[return] identifier[DateTime] ( identifier[parse_rfc3339] ( identifier[raw] ), identifier[trivia] , identifier[raw] ) keyword[except] identifier[ValueError] : keyword[raise] identifier[self] . identifier[parse_error] ( identifier[InvalidDateTimeError] ) keyword[if] identifier[m] . identifier[group] ( literal[int] ): keyword[try] : keyword[return] identifier[Date] ( identifier[parse_rfc3339] ( identifier[raw] ), identifier[trivia] , identifier[raw] ) keyword[except] identifier[ValueError] : keyword[raise] identifier[self] . identifier[parse_error] ( identifier[InvalidDateError] ) keyword[if] identifier[m] . identifier[group] ( literal[int] ): keyword[try] : keyword[return] identifier[Time] ( identifier[parse_rfc3339] ( identifier[raw] ), identifier[trivia] , identifier[raw] ) keyword[except] identifier[ValueError] : keyword[raise] identifier[self] . identifier[parse_error] ( identifier[InvalidTimeError] ) identifier[item] = identifier[self] . identifier[_parse_number] ( identifier[raw] , identifier[trivia] ) keyword[if] identifier[item] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[item] keyword[raise] identifier[self] . identifier[parse_error] ( identifier[InvalidNumberError] ) keyword[else] : keyword[raise] identifier[self] . identifier[parse_error] ( identifier[UnexpectedCharError] , identifier[c] )
def _parse_value(self): # type: () -> Item '\n Attempts to parse a value at the current position.\n ' self.mark() c = self._current trivia = Trivia() if c == StringType.SLB.value: return self._parse_basic_string() # depends on [control=['if'], data=[]] elif c == StringType.SLL.value: return self._parse_literal_string() # depends on [control=['if'], data=[]] elif c == BoolType.TRUE.value[0]: return self._parse_true() # depends on [control=['if'], data=[]] elif c == BoolType.FALSE.value[0]: return self._parse_false() # depends on [control=['if'], data=[]] elif c == '[': return self._parse_array() # depends on [control=['if'], data=[]] elif c == '{': return self._parse_inline_table() # depends on [control=['if'], data=[]] elif c in '+-' or self._peek(4) in {'+inf', '-inf', 'inf', '+nan', '-nan', 'nan'}: # Number while self._current not in ' \t\n\r#,]}' and self.inc(): pass # depends on [control=['while'], data=[]] raw = self.extract() item = self._parse_number(raw, trivia) if item is not None: return item # depends on [control=['if'], data=['item']] raise self.parse_error(InvalidNumberError) # depends on [control=['if'], data=[]] elif c in string.digits: # Integer, Float, Date, Time or DateTime while self._current not in ' \t\n\r#,]}' and self.inc(): pass # depends on [control=['while'], data=[]] raw = self.extract() m = RFC_3339_LOOSE.match(raw) if m: if m.group(1) and m.group(5): # datetime try: return DateTime(parse_rfc3339(raw), trivia, raw) # depends on [control=['try'], data=[]] except ValueError: raise self.parse_error(InvalidDateTimeError) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] if m.group(1): try: return Date(parse_rfc3339(raw), trivia, raw) # depends on [control=['try'], data=[]] except ValueError: raise self.parse_error(InvalidDateError) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] if m.group(5): try: return Time(parse_rfc3339(raw), trivia, raw) # depends on [control=['try'], data=[]] except ValueError: raise self.parse_error(InvalidTimeError) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] item = self._parse_number(raw, trivia) if item is not None: return item # depends on [control=['if'], data=['item']] raise self.parse_error(InvalidNumberError) # depends on [control=['if'], data=[]] else: raise self.parse_error(UnexpectedCharError, c)
def get_vcs_details_output_vcs_details_node_vcs_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vcs_details = ET.Element("get_vcs_details") config = get_vcs_details output = ET.SubElement(get_vcs_details, "output") vcs_details = ET.SubElement(output, "vcs-details") node_vcs_type = ET.SubElement(vcs_details, "node-vcs-type") node_vcs_type.text = kwargs.pop('node_vcs_type') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[get_vcs_details_output_vcs_details_node_vcs_type, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[get_vcs_details] assign[=] call[name[ET].Element, parameter[constant[get_vcs_details]]] variable[config] assign[=] name[get_vcs_details] variable[output] assign[=] call[name[ET].SubElement, parameter[name[get_vcs_details], constant[output]]] variable[vcs_details] assign[=] call[name[ET].SubElement, parameter[name[output], constant[vcs-details]]] variable[node_vcs_type] assign[=] call[name[ET].SubElement, parameter[name[vcs_details], constant[node-vcs-type]]] name[node_vcs_type].text assign[=] call[name[kwargs].pop, parameter[constant[node_vcs_type]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[get_vcs_details_output_vcs_details_node_vcs_type] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[get_vcs_details] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[config] = identifier[get_vcs_details] identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[get_vcs_details] , literal[string] ) identifier[vcs_details] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] ) identifier[node_vcs_type] = identifier[ET] . identifier[SubElement] ( identifier[vcs_details] , literal[string] ) identifier[node_vcs_type] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def get_vcs_details_output_vcs_details_node_vcs_type(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') get_vcs_details = ET.Element('get_vcs_details') config = get_vcs_details output = ET.SubElement(get_vcs_details, 'output') vcs_details = ET.SubElement(output, 'vcs-details') node_vcs_type = ET.SubElement(vcs_details, 'node-vcs-type') node_vcs_type.text = kwargs.pop('node_vcs_type') callback = kwargs.pop('callback', self._callback) return callback(config)
def push_build_set(id, tag_prefix): """ Push build set to Brew """ req = swagger_client.BuildConfigSetRecordPushRequestRest() req.tag_prefix = tag_prefix req.build_config_set_record_id = id response = utils.checked_api_call(pnc_api.build_push, 'push_record_set', body=req) if response: return utils.format_json_list(response)
def function[push_build_set, parameter[id, tag_prefix]]: constant[ Push build set to Brew ] variable[req] assign[=] call[name[swagger_client].BuildConfigSetRecordPushRequestRest, parameter[]] name[req].tag_prefix assign[=] name[tag_prefix] name[req].build_config_set_record_id assign[=] name[id] variable[response] assign[=] call[name[utils].checked_api_call, parameter[name[pnc_api].build_push, constant[push_record_set]]] if name[response] begin[:] return[call[name[utils].format_json_list, parameter[name[response]]]]
keyword[def] identifier[push_build_set] ( identifier[id] , identifier[tag_prefix] ): literal[string] identifier[req] = identifier[swagger_client] . identifier[BuildConfigSetRecordPushRequestRest] () identifier[req] . identifier[tag_prefix] = identifier[tag_prefix] identifier[req] . identifier[build_config_set_record_id] = identifier[id] identifier[response] = identifier[utils] . identifier[checked_api_call] ( identifier[pnc_api] . identifier[build_push] , literal[string] , identifier[body] = identifier[req] ) keyword[if] identifier[response] : keyword[return] identifier[utils] . identifier[format_json_list] ( identifier[response] )
def push_build_set(id, tag_prefix): """ Push build set to Brew """ req = swagger_client.BuildConfigSetRecordPushRequestRest() req.tag_prefix = tag_prefix req.build_config_set_record_id = id response = utils.checked_api_call(pnc_api.build_push, 'push_record_set', body=req) if response: return utils.format_json_list(response) # depends on [control=['if'], data=[]]
def check_signature(self, signature, timestamp, nonce): """ 验证微信消息真实性 :param signature: 微信加密签名 :param timestamp: 时间戳 :param nonce: 随机数 :return: 通过验证返回 True, 未通过验证返回 False """ if not signature or not timestamp or not nonce: return False tmp_list = [self.conf.token, timestamp, nonce] tmp_list.sort() tmp_str = ''.join(tmp_list) if signature != hashlib.sha1(tmp_str.encode('utf-8')).hexdigest(): return False return True
def function[check_signature, parameter[self, signature, timestamp, nonce]]: constant[ 验证微信消息真实性 :param signature: 微信加密签名 :param timestamp: 时间戳 :param nonce: 随机数 :return: 通过验证返回 True, 未通过验证返回 False ] if <ast.BoolOp object at 0x7da2054a4ee0> begin[:] return[constant[False]] variable[tmp_list] assign[=] list[[<ast.Attribute object at 0x7da2054a4430>, <ast.Name object at 0x7da2054a7460>, <ast.Name object at 0x7da2054a6b00>]] call[name[tmp_list].sort, parameter[]] variable[tmp_str] assign[=] call[constant[].join, parameter[name[tmp_list]]] if compare[name[signature] not_equal[!=] call[call[name[hashlib].sha1, parameter[call[name[tmp_str].encode, parameter[constant[utf-8]]]]].hexdigest, parameter[]]] begin[:] return[constant[False]] return[constant[True]]
keyword[def] identifier[check_signature] ( identifier[self] , identifier[signature] , identifier[timestamp] , identifier[nonce] ): literal[string] keyword[if] keyword[not] identifier[signature] keyword[or] keyword[not] identifier[timestamp] keyword[or] keyword[not] identifier[nonce] : keyword[return] keyword[False] identifier[tmp_list] =[ identifier[self] . identifier[conf] . identifier[token] , identifier[timestamp] , identifier[nonce] ] identifier[tmp_list] . identifier[sort] () identifier[tmp_str] = literal[string] . identifier[join] ( identifier[tmp_list] ) keyword[if] identifier[signature] != identifier[hashlib] . identifier[sha1] ( identifier[tmp_str] . identifier[encode] ( literal[string] )). identifier[hexdigest] (): keyword[return] keyword[False] keyword[return] keyword[True]
def check_signature(self, signature, timestamp, nonce): """ 验证微信消息真实性 :param signature: 微信加密签名 :param timestamp: 时间戳 :param nonce: 随机数 :return: 通过验证返回 True, 未通过验证返回 False """ if not signature or not timestamp or (not nonce): return False # depends on [control=['if'], data=[]] tmp_list = [self.conf.token, timestamp, nonce] tmp_list.sort() tmp_str = ''.join(tmp_list) if signature != hashlib.sha1(tmp_str.encode('utf-8')).hexdigest(): return False # depends on [control=['if'], data=[]] return True
def _process_stream_delta(self, delta_stream): """Bookkeeping on internal data structures while iterating a stream.""" for pchange in delta_stream: if pchange.kind == ChangeType.ADD: self.policy_files.setdefault( pchange.file_path, PolicyCollection()).add(pchange.policy) elif pchange.kind == ChangeType.REMOVE: self.policy_files[pchange.file_path].remove(pchange.policy) elif pchange.kind in (ChangeType.MOVED, ChangeType.MODIFIED): if pchange.policy.file_path != pchange.previous.file_path: self.policy_files[pchange.previous.file_path].remove(pchange.previous) if (pchange.policy.file_path in self.policy_files and pchange.policy.name in self.policy_files[pchange.file_path]): self.policy_files[pchange.file_path][pchange.policy.name] = pchange.policy else: self.policy_files.setdefault( pchange.file_path, PolicyCollection()).add(pchange.policy) else: self.policy_files[pchange.file_path][pchange.policy.name] = pchange.policy yield pchange
def function[_process_stream_delta, parameter[self, delta_stream]]: constant[Bookkeeping on internal data structures while iterating a stream.] for taget[name[pchange]] in starred[name[delta_stream]] begin[:] if compare[name[pchange].kind equal[==] name[ChangeType].ADD] begin[:] call[call[name[self].policy_files.setdefault, parameter[name[pchange].file_path, call[name[PolicyCollection], parameter[]]]].add, parameter[name[pchange].policy]] <ast.Yield object at 0x7da1b1fddf90>
keyword[def] identifier[_process_stream_delta] ( identifier[self] , identifier[delta_stream] ): literal[string] keyword[for] identifier[pchange] keyword[in] identifier[delta_stream] : keyword[if] identifier[pchange] . identifier[kind] == identifier[ChangeType] . identifier[ADD] : identifier[self] . identifier[policy_files] . identifier[setdefault] ( identifier[pchange] . identifier[file_path] , identifier[PolicyCollection] ()). identifier[add] ( identifier[pchange] . identifier[policy] ) keyword[elif] identifier[pchange] . identifier[kind] == identifier[ChangeType] . identifier[REMOVE] : identifier[self] . identifier[policy_files] [ identifier[pchange] . identifier[file_path] ]. identifier[remove] ( identifier[pchange] . identifier[policy] ) keyword[elif] identifier[pchange] . identifier[kind] keyword[in] ( identifier[ChangeType] . identifier[MOVED] , identifier[ChangeType] . identifier[MODIFIED] ): keyword[if] identifier[pchange] . identifier[policy] . identifier[file_path] != identifier[pchange] . identifier[previous] . identifier[file_path] : identifier[self] . identifier[policy_files] [ identifier[pchange] . identifier[previous] . identifier[file_path] ]. identifier[remove] ( identifier[pchange] . identifier[previous] ) keyword[if] ( identifier[pchange] . identifier[policy] . identifier[file_path] keyword[in] identifier[self] . identifier[policy_files] keyword[and] identifier[pchange] . identifier[policy] . identifier[name] keyword[in] identifier[self] . identifier[policy_files] [ identifier[pchange] . identifier[file_path] ]): identifier[self] . identifier[policy_files] [ identifier[pchange] . identifier[file_path] ][ identifier[pchange] . identifier[policy] . identifier[name] ]= identifier[pchange] . identifier[policy] keyword[else] : identifier[self] . identifier[policy_files] . identifier[setdefault] ( identifier[pchange] . identifier[file_path] , identifier[PolicyCollection] ()). identifier[add] ( identifier[pchange] . identifier[policy] ) keyword[else] : identifier[self] . identifier[policy_files] [ identifier[pchange] . identifier[file_path] ][ identifier[pchange] . identifier[policy] . identifier[name] ]= identifier[pchange] . identifier[policy] keyword[yield] identifier[pchange]
def _process_stream_delta(self, delta_stream): """Bookkeeping on internal data structures while iterating a stream.""" for pchange in delta_stream: if pchange.kind == ChangeType.ADD: self.policy_files.setdefault(pchange.file_path, PolicyCollection()).add(pchange.policy) # depends on [control=['if'], data=[]] elif pchange.kind == ChangeType.REMOVE: self.policy_files[pchange.file_path].remove(pchange.policy) # depends on [control=['if'], data=[]] elif pchange.kind in (ChangeType.MOVED, ChangeType.MODIFIED): if pchange.policy.file_path != pchange.previous.file_path: self.policy_files[pchange.previous.file_path].remove(pchange.previous) if pchange.policy.file_path in self.policy_files and pchange.policy.name in self.policy_files[pchange.file_path]: self.policy_files[pchange.file_path][pchange.policy.name] = pchange.policy # depends on [control=['if'], data=[]] else: self.policy_files.setdefault(pchange.file_path, PolicyCollection()).add(pchange.policy) # depends on [control=['if'], data=[]] else: self.policy_files[pchange.file_path][pchange.policy.name] = pchange.policy # depends on [control=['if'], data=[]] yield pchange # depends on [control=['for'], data=['pchange']]
def get_entries(self, criteria, inc_structure=False, optional_data=None): """ Get ComputedEntries satisfying a particular criteria. .. note:: The get_entries_in_system and get_entries methods should be used with care. In essence, all entries, GGA, GGA+U or otherwise, are returned. The dataset is very heterogeneous and not directly comparable. It is highly recommended that you perform post-processing using pymatgen.entries.compatibility. Args: criteria: Criteria obeying the same syntax as query. inc_structure: Optional parameter as to whether to include a structure with the ComputedEntry. Defaults to False. Use with care - including structures with a large number of entries can potentially slow down your code to a crawl. optional_data: Optional data to include with the entry. This allows the data to be access via entry.data[key]. Returns: List of pymatgen.entries.ComputedEntries satisfying criteria. """ all_entries = list() optional_data = [] if not optional_data else list(optional_data) optional_data.append("oxide_type") fields = [k for k in optional_data] fields.extend(["task_id", "unit_cell_formula", "energy", "is_hubbard", "hubbards", "pseudo_potential.labels", "pseudo_potential.functional", "run_type", "input.is_lasph", "input.xc_override", "input.potcar_spec"]) if inc_structure: fields.append("output.crystal") for c in self.query(fields, criteria): func = c["pseudo_potential.functional"] labels = c["pseudo_potential.labels"] symbols = ["{} {}".format(func, label) for label in labels] parameters = {"run_type": c["run_type"], "is_hubbard": c["is_hubbard"], "hubbards": c["hubbards"], "potcar_symbols": symbols, "is_lasph": c.get("input.is_lasph") or False, "potcar_spec": c.get("input.potcar_spec"), "xc_override": c.get("input.xc_override")} optional_data = {k: c[k] for k in optional_data} if inc_structure: struct = Structure.from_dict(c["output.crystal"]) entry = ComputedStructureEntry(struct, c["energy"], 0.0, parameters=parameters, data=optional_data, entry_id=c["task_id"]) else: entry = ComputedEntry(Composition(c["unit_cell_formula"]), c["energy"], 0.0, parameters=parameters, data=optional_data, entry_id=c["task_id"]) all_entries.append(entry) return all_entries
def function[get_entries, parameter[self, criteria, inc_structure, optional_data]]: constant[ Get ComputedEntries satisfying a particular criteria. .. note:: The get_entries_in_system and get_entries methods should be used with care. In essence, all entries, GGA, GGA+U or otherwise, are returned. The dataset is very heterogeneous and not directly comparable. It is highly recommended that you perform post-processing using pymatgen.entries.compatibility. Args: criteria: Criteria obeying the same syntax as query. inc_structure: Optional parameter as to whether to include a structure with the ComputedEntry. Defaults to False. Use with care - including structures with a large number of entries can potentially slow down your code to a crawl. optional_data: Optional data to include with the entry. This allows the data to be access via entry.data[key]. Returns: List of pymatgen.entries.ComputedEntries satisfying criteria. ] variable[all_entries] assign[=] call[name[list], parameter[]] variable[optional_data] assign[=] <ast.IfExp object at 0x7da18f58e200> call[name[optional_data].append, parameter[constant[oxide_type]]] variable[fields] assign[=] <ast.ListComp object at 0x7da1b03827d0> call[name[fields].extend, parameter[list[[<ast.Constant object at 0x7da1b03801f0>, <ast.Constant object at 0x7da1b0381000>, <ast.Constant object at 0x7da1b0380100>, <ast.Constant object at 0x7da1b0380220>, <ast.Constant object at 0x7da1b0381360>, <ast.Constant object at 0x7da1b0383e50>, <ast.Constant object at 0x7da1b0383a30>, <ast.Constant object at 0x7da1b03812a0>, <ast.Constant object at 0x7da1b03820b0>, <ast.Constant object at 0x7da1b0382ad0>, <ast.Constant object at 0x7da1b03804c0>]]]] if name[inc_structure] begin[:] call[name[fields].append, parameter[constant[output.crystal]]] for taget[name[c]] in starred[call[name[self].query, parameter[name[fields], name[criteria]]]] begin[:] variable[func] assign[=] call[name[c]][constant[pseudo_potential.functional]] variable[labels] assign[=] call[name[c]][constant[pseudo_potential.labels]] variable[symbols] assign[=] <ast.ListComp object at 0x7da18ede4c10> variable[parameters] assign[=] dictionary[[<ast.Constant object at 0x7da18ede6860>, <ast.Constant object at 0x7da18ede75b0>, <ast.Constant object at 0x7da18ede7e80>, <ast.Constant object at 0x7da18ede43d0>, <ast.Constant object at 0x7da18ede7d30>, <ast.Constant object at 0x7da18ede42b0>, <ast.Constant object at 0x7da18ede6f80>], [<ast.Subscript object at 0x7da18ede6ef0>, <ast.Subscript object at 0x7da18ede5810>, <ast.Subscript object at 0x7da18ede55d0>, <ast.Name object at 0x7da18ede56c0>, <ast.BoolOp object at 0x7da18ede5930>, <ast.Call object at 0x7da18ede7070>, <ast.Call object at 0x7da18ede7700>]] variable[optional_data] assign[=] <ast.DictComp object at 0x7da18ede6830> if name[inc_structure] begin[:] variable[struct] assign[=] call[name[Structure].from_dict, parameter[call[name[c]][constant[output.crystal]]]] variable[entry] assign[=] call[name[ComputedStructureEntry], parameter[name[struct], call[name[c]][constant[energy]], constant[0.0]]] call[name[all_entries].append, parameter[name[entry]]] return[name[all_entries]]
keyword[def] identifier[get_entries] ( identifier[self] , identifier[criteria] , identifier[inc_structure] = keyword[False] , identifier[optional_data] = keyword[None] ): literal[string] identifier[all_entries] = identifier[list] () identifier[optional_data] =[] keyword[if] keyword[not] identifier[optional_data] keyword[else] identifier[list] ( identifier[optional_data] ) identifier[optional_data] . identifier[append] ( literal[string] ) identifier[fields] =[ identifier[k] keyword[for] identifier[k] keyword[in] identifier[optional_data] ] identifier[fields] . identifier[extend] ([ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]) keyword[if] identifier[inc_structure] : identifier[fields] . identifier[append] ( literal[string] ) keyword[for] identifier[c] keyword[in] identifier[self] . identifier[query] ( identifier[fields] , identifier[criteria] ): identifier[func] = identifier[c] [ literal[string] ] identifier[labels] = identifier[c] [ literal[string] ] identifier[symbols] =[ literal[string] . identifier[format] ( identifier[func] , identifier[label] ) keyword[for] identifier[label] keyword[in] identifier[labels] ] identifier[parameters] ={ literal[string] : identifier[c] [ literal[string] ], literal[string] : identifier[c] [ literal[string] ], literal[string] : identifier[c] [ literal[string] ], literal[string] : identifier[symbols] , literal[string] : identifier[c] . identifier[get] ( literal[string] ) keyword[or] keyword[False] , literal[string] : identifier[c] . identifier[get] ( literal[string] ), literal[string] : identifier[c] . identifier[get] ( literal[string] )} identifier[optional_data] ={ identifier[k] : identifier[c] [ identifier[k] ] keyword[for] identifier[k] keyword[in] identifier[optional_data] } keyword[if] identifier[inc_structure] : identifier[struct] = identifier[Structure] . identifier[from_dict] ( identifier[c] [ literal[string] ]) identifier[entry] = identifier[ComputedStructureEntry] ( identifier[struct] , identifier[c] [ literal[string] ], literal[int] , identifier[parameters] = identifier[parameters] , identifier[data] = identifier[optional_data] , identifier[entry_id] = identifier[c] [ literal[string] ]) keyword[else] : identifier[entry] = identifier[ComputedEntry] ( identifier[Composition] ( identifier[c] [ literal[string] ]), identifier[c] [ literal[string] ], literal[int] , identifier[parameters] = identifier[parameters] , identifier[data] = identifier[optional_data] , identifier[entry_id] = identifier[c] [ literal[string] ]) identifier[all_entries] . identifier[append] ( identifier[entry] ) keyword[return] identifier[all_entries]
def get_entries(self, criteria, inc_structure=False, optional_data=None): """ Get ComputedEntries satisfying a particular criteria. .. note:: The get_entries_in_system and get_entries methods should be used with care. In essence, all entries, GGA, GGA+U or otherwise, are returned. The dataset is very heterogeneous and not directly comparable. It is highly recommended that you perform post-processing using pymatgen.entries.compatibility. Args: criteria: Criteria obeying the same syntax as query. inc_structure: Optional parameter as to whether to include a structure with the ComputedEntry. Defaults to False. Use with care - including structures with a large number of entries can potentially slow down your code to a crawl. optional_data: Optional data to include with the entry. This allows the data to be access via entry.data[key]. Returns: List of pymatgen.entries.ComputedEntries satisfying criteria. """ all_entries = list() optional_data = [] if not optional_data else list(optional_data) optional_data.append('oxide_type') fields = [k for k in optional_data] fields.extend(['task_id', 'unit_cell_formula', 'energy', 'is_hubbard', 'hubbards', 'pseudo_potential.labels', 'pseudo_potential.functional', 'run_type', 'input.is_lasph', 'input.xc_override', 'input.potcar_spec']) if inc_structure: fields.append('output.crystal') # depends on [control=['if'], data=[]] for c in self.query(fields, criteria): func = c['pseudo_potential.functional'] labels = c['pseudo_potential.labels'] symbols = ['{} {}'.format(func, label) for label in labels] parameters = {'run_type': c['run_type'], 'is_hubbard': c['is_hubbard'], 'hubbards': c['hubbards'], 'potcar_symbols': symbols, 'is_lasph': c.get('input.is_lasph') or False, 'potcar_spec': c.get('input.potcar_spec'), 'xc_override': c.get('input.xc_override')} optional_data = {k: c[k] for k in optional_data} if inc_structure: struct = Structure.from_dict(c['output.crystal']) entry = ComputedStructureEntry(struct, c['energy'], 0.0, parameters=parameters, data=optional_data, entry_id=c['task_id']) # depends on [control=['if'], data=[]] else: entry = ComputedEntry(Composition(c['unit_cell_formula']), c['energy'], 0.0, parameters=parameters, data=optional_data, entry_id=c['task_id']) all_entries.append(entry) # depends on [control=['for'], data=['c']] return all_entries
def _checkDocstringFormat(self, node_type, node, linenoDocstring): """ Check opening/closing of docstring. @param node_type: type of node @param node: current node of pylint @param linenoDocstring: linenumber of docstring """ # Check the opening/closing of docstring. docstringStrippedSpaces = node.doc.strip(" ") if (not docstringStrippedSpaces.startswith("\n") or not docstringStrippedSpaces.endswith("\n")): # If the docstring is in one line, then do not check indentations. self.add_message('W9201', line=linenoDocstring, node=node) else: # If the docstring's opening and closing quotes are on separate # lines, then we check its indentation. # Generating warnings about indentation when the quotes aren't # done right only clutters the output. self._checkIndentationIssue(node, node_type, linenoDocstring)
def function[_checkDocstringFormat, parameter[self, node_type, node, linenoDocstring]]: constant[ Check opening/closing of docstring. @param node_type: type of node @param node: current node of pylint @param linenoDocstring: linenumber of docstring ] variable[docstringStrippedSpaces] assign[=] call[name[node].doc.strip, parameter[constant[ ]]] if <ast.BoolOp object at 0x7da18bccb6d0> begin[:] call[name[self].add_message, parameter[constant[W9201]]]
keyword[def] identifier[_checkDocstringFormat] ( identifier[self] , identifier[node_type] , identifier[node] , identifier[linenoDocstring] ): literal[string] identifier[docstringStrippedSpaces] = identifier[node] . identifier[doc] . identifier[strip] ( literal[string] ) keyword[if] ( keyword[not] identifier[docstringStrippedSpaces] . identifier[startswith] ( literal[string] ) keyword[or] keyword[not] identifier[docstringStrippedSpaces] . identifier[endswith] ( literal[string] )): identifier[self] . identifier[add_message] ( literal[string] , identifier[line] = identifier[linenoDocstring] , identifier[node] = identifier[node] ) keyword[else] : identifier[self] . identifier[_checkIndentationIssue] ( identifier[node] , identifier[node_type] , identifier[linenoDocstring] )
def _checkDocstringFormat(self, node_type, node, linenoDocstring): """ Check opening/closing of docstring. @param node_type: type of node @param node: current node of pylint @param linenoDocstring: linenumber of docstring """ # Check the opening/closing of docstring. docstringStrippedSpaces = node.doc.strip(' ') if not docstringStrippedSpaces.startswith('\n') or not docstringStrippedSpaces.endswith('\n'): # If the docstring is in one line, then do not check indentations. self.add_message('W9201', line=linenoDocstring, node=node) # depends on [control=['if'], data=[]] else: # If the docstring's opening and closing quotes are on separate # lines, then we check its indentation. # Generating warnings about indentation when the quotes aren't # done right only clutters the output. self._checkIndentationIssue(node, node_type, linenoDocstring)
def lock(self): """This method locks the database.""" self.password = None self.keyfile = None self.groups[:] = [] self.entries[:] = [] self._group_order[:] = [] self._entry_order[:] = [] self.root_group = v1Group() self._num_groups = 1 self._num_entries = 0 return True
def function[lock, parameter[self]]: constant[This method locks the database.] name[self].password assign[=] constant[None] name[self].keyfile assign[=] constant[None] call[name[self].groups][<ast.Slice object at 0x7da1b26c4790>] assign[=] list[[]] call[name[self].entries][<ast.Slice object at 0x7da1b26c5300>] assign[=] list[[]] call[name[self]._group_order][<ast.Slice object at 0x7da1b26c5720>] assign[=] list[[]] call[name[self]._entry_order][<ast.Slice object at 0x7da1b26c52a0>] assign[=] list[[]] name[self].root_group assign[=] call[name[v1Group], parameter[]] name[self]._num_groups assign[=] constant[1] name[self]._num_entries assign[=] constant[0] return[constant[True]]
keyword[def] identifier[lock] ( identifier[self] ): literal[string] identifier[self] . identifier[password] = keyword[None] identifier[self] . identifier[keyfile] = keyword[None] identifier[self] . identifier[groups] [:]=[] identifier[self] . identifier[entries] [:]=[] identifier[self] . identifier[_group_order] [:]=[] identifier[self] . identifier[_entry_order] [:]=[] identifier[self] . identifier[root_group] = identifier[v1Group] () identifier[self] . identifier[_num_groups] = literal[int] identifier[self] . identifier[_num_entries] = literal[int] keyword[return] keyword[True]
def lock(self): """This method locks the database.""" self.password = None self.keyfile = None self.groups[:] = [] self.entries[:] = [] self._group_order[:] = [] self._entry_order[:] = [] self.root_group = v1Group() self._num_groups = 1 self._num_entries = 0 return True
def get_event_question(self, id, question_id, **data): """ GET /events/:id/questions/:question_id/ This endpoint will return :format:`question` for a specific question id. """ return self.get("/events/{0}/questions/{0}/".format(id,question_id), data=data)
def function[get_event_question, parameter[self, id, question_id]]: constant[ GET /events/:id/questions/:question_id/ This endpoint will return :format:`question` for a specific question id. ] return[call[name[self].get, parameter[call[constant[/events/{0}/questions/{0}/].format, parameter[name[id], name[question_id]]]]]]
keyword[def] identifier[get_event_question] ( identifier[self] , identifier[id] , identifier[question_id] ,** identifier[data] ): literal[string] keyword[return] identifier[self] . identifier[get] ( literal[string] . identifier[format] ( identifier[id] , identifier[question_id] ), identifier[data] = identifier[data] )
def get_event_question(self, id, question_id, **data): """ GET /events/:id/questions/:question_id/ This endpoint will return :format:`question` for a specific question id. """ return self.get('/events/{0}/questions/{0}/'.format(id, question_id), data=data)
def scheduled(self, offset=0, count=25): '''Return all the currently-scheduled jobs''' return self.client('jobs', 'scheduled', self.name, offset, count)
def function[scheduled, parameter[self, offset, count]]: constant[Return all the currently-scheduled jobs] return[call[name[self].client, parameter[constant[jobs], constant[scheduled], name[self].name, name[offset], name[count]]]]
keyword[def] identifier[scheduled] ( identifier[self] , identifier[offset] = literal[int] , identifier[count] = literal[int] ): literal[string] keyword[return] identifier[self] . identifier[client] ( literal[string] , literal[string] , identifier[self] . identifier[name] , identifier[offset] , identifier[count] )
def scheduled(self, offset=0, count=25): """Return all the currently-scheduled jobs""" return self.client('jobs', 'scheduled', self.name, offset, count)
def _cleanUpdatesList(self, col, cellIdx, seg): """ Removes any update that would be for the given col, cellIdx, segIdx. NOTE: logically, we need to do this when we delete segments, so that if an update refers to a segment that was just deleted, we also remove that update from the update list. However, I haven't seen it trigger in any of the unit tests yet, so it might mean that it's not needed and that situation doesn't occur, by construction. """ # TODO: check if the situation described in the docstring above actually # occurs. for key, updateList in self.segmentUpdates.iteritems(): c, i = key[0], key[1] if c == col and i == cellIdx: for update in updateList: if update[1].segment == seg: self._removeSegmentUpdate(update)
def function[_cleanUpdatesList, parameter[self, col, cellIdx, seg]]: constant[ Removes any update that would be for the given col, cellIdx, segIdx. NOTE: logically, we need to do this when we delete segments, so that if an update refers to a segment that was just deleted, we also remove that update from the update list. However, I haven't seen it trigger in any of the unit tests yet, so it might mean that it's not needed and that situation doesn't occur, by construction. ] for taget[tuple[[<ast.Name object at 0x7da18bccb640>, <ast.Name object at 0x7da18bcc94e0>]]] in starred[call[name[self].segmentUpdates.iteritems, parameter[]]] begin[:] <ast.Tuple object at 0x7da18bcca590> assign[=] tuple[[<ast.Subscript object at 0x7da18bcca7d0>, <ast.Subscript object at 0x7da18bccb880>]] if <ast.BoolOp object at 0x7da18bcc9e10> begin[:] for taget[name[update]] in starred[name[updateList]] begin[:] if compare[call[name[update]][constant[1]].segment equal[==] name[seg]] begin[:] call[name[self]._removeSegmentUpdate, parameter[name[update]]]
keyword[def] identifier[_cleanUpdatesList] ( identifier[self] , identifier[col] , identifier[cellIdx] , identifier[seg] ): literal[string] keyword[for] identifier[key] , identifier[updateList] keyword[in] identifier[self] . identifier[segmentUpdates] . identifier[iteritems] (): identifier[c] , identifier[i] = identifier[key] [ literal[int] ], identifier[key] [ literal[int] ] keyword[if] identifier[c] == identifier[col] keyword[and] identifier[i] == identifier[cellIdx] : keyword[for] identifier[update] keyword[in] identifier[updateList] : keyword[if] identifier[update] [ literal[int] ]. identifier[segment] == identifier[seg] : identifier[self] . identifier[_removeSegmentUpdate] ( identifier[update] )
def _cleanUpdatesList(self, col, cellIdx, seg): """ Removes any update that would be for the given col, cellIdx, segIdx. NOTE: logically, we need to do this when we delete segments, so that if an update refers to a segment that was just deleted, we also remove that update from the update list. However, I haven't seen it trigger in any of the unit tests yet, so it might mean that it's not needed and that situation doesn't occur, by construction. """ # TODO: check if the situation described in the docstring above actually # occurs. for (key, updateList) in self.segmentUpdates.iteritems(): (c, i) = (key[0], key[1]) if c == col and i == cellIdx: for update in updateList: if update[1].segment == seg: self._removeSegmentUpdate(update) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['update']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
def check_sla(self, sla, diff_metric): """ Check whether the SLA has passed or failed """ try: if sla.display is '%': diff_val = float(diff_metric['percent_diff']) else: diff_val = float(diff_metric['absolute_diff']) except ValueError: return False if not (sla.check_sla_passed(diff_val)): self.sla_failures += 1 self.sla_failure_list.append(DiffSLAFailure(sla, diff_metric)) return True
def function[check_sla, parameter[self, sla, diff_metric]]: constant[ Check whether the SLA has passed or failed ] <ast.Try object at 0x7da1b00b7bb0> if <ast.UnaryOp object at 0x7da18f00c250> begin[:] <ast.AugAssign object at 0x7da18f00d600> call[name[self].sla_failure_list.append, parameter[call[name[DiffSLAFailure], parameter[name[sla], name[diff_metric]]]]] return[constant[True]]
keyword[def] identifier[check_sla] ( identifier[self] , identifier[sla] , identifier[diff_metric] ): literal[string] keyword[try] : keyword[if] identifier[sla] . identifier[display] keyword[is] literal[string] : identifier[diff_val] = identifier[float] ( identifier[diff_metric] [ literal[string] ]) keyword[else] : identifier[diff_val] = identifier[float] ( identifier[diff_metric] [ literal[string] ]) keyword[except] identifier[ValueError] : keyword[return] keyword[False] keyword[if] keyword[not] ( identifier[sla] . identifier[check_sla_passed] ( identifier[diff_val] )): identifier[self] . identifier[sla_failures] += literal[int] identifier[self] . identifier[sla_failure_list] . identifier[append] ( identifier[DiffSLAFailure] ( identifier[sla] , identifier[diff_metric] )) keyword[return] keyword[True]
def check_sla(self, sla, diff_metric): """ Check whether the SLA has passed or failed """ try: if sla.display is '%': diff_val = float(diff_metric['percent_diff']) # depends on [control=['if'], data=[]] else: diff_val = float(diff_metric['absolute_diff']) # depends on [control=['try'], data=[]] except ValueError: return False # depends on [control=['except'], data=[]] if not sla.check_sla_passed(diff_val): self.sla_failures += 1 self.sla_failure_list.append(DiffSLAFailure(sla, diff_metric)) # depends on [control=['if'], data=[]] return True
def get_x_inds(self, *dynac_type): """ Return the indices into the lattice list attribute of elements whose Dynac type matches the input string. Multiple input strings can be given, either as a comma-separated list or as a genuine Python list. """ return [i for i, x in enumerate(self.lattice) for y in dynac_type if dynac_from_ele(x) == y]
def function[get_x_inds, parameter[self]]: constant[ Return the indices into the lattice list attribute of elements whose Dynac type matches the input string. Multiple input strings can be given, either as a comma-separated list or as a genuine Python list. ] return[<ast.ListComp object at 0x7da204960ac0>]
keyword[def] identifier[get_x_inds] ( identifier[self] ,* identifier[dynac_type] ): literal[string] keyword[return] [ identifier[i] keyword[for] identifier[i] , identifier[x] keyword[in] identifier[enumerate] ( identifier[self] . identifier[lattice] ) keyword[for] identifier[y] keyword[in] identifier[dynac_type] keyword[if] identifier[dynac_from_ele] ( identifier[x] )== identifier[y] ]
def get_x_inds(self, *dynac_type): """ Return the indices into the lattice list attribute of elements whose Dynac type matches the input string. Multiple input strings can be given, either as a comma-separated list or as a genuine Python list. """ return [i for (i, x) in enumerate(self.lattice) for y in dynac_type if dynac_from_ele(x) == y]
def count(self, strg, case_sensitive=False, *args, **kwargs): """Get the count of a word or phrase `s` within this WordList. :param strg: The string to count. :param case_sensitive: A boolean, whether or not the search is case-sensitive. """ if not case_sensitive: return [word.lower() for word in self].count(strg.lower(), *args, **kwargs) return self._collection.count(strg, *args, **kwargs)
def function[count, parameter[self, strg, case_sensitive]]: constant[Get the count of a word or phrase `s` within this WordList. :param strg: The string to count. :param case_sensitive: A boolean, whether or not the search is case-sensitive. ] if <ast.UnaryOp object at 0x7da18f09c4f0> begin[:] return[call[<ast.ListComp object at 0x7da18f09f130>.count, parameter[call[name[strg].lower, parameter[]], <ast.Starred object at 0x7da18f09dd80>]]] return[call[name[self]._collection.count, parameter[name[strg], <ast.Starred object at 0x7da18f09df00>]]]
keyword[def] identifier[count] ( identifier[self] , identifier[strg] , identifier[case_sensitive] = keyword[False] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[if] keyword[not] identifier[case_sensitive] : keyword[return] [ identifier[word] . identifier[lower] () keyword[for] identifier[word] keyword[in] identifier[self] ]. identifier[count] ( identifier[strg] . identifier[lower] (),* identifier[args] , ** identifier[kwargs] ) keyword[return] identifier[self] . identifier[_collection] . identifier[count] ( identifier[strg] ,* identifier[args] ,** identifier[kwargs] )
def count(self, strg, case_sensitive=False, *args, **kwargs): """Get the count of a word or phrase `s` within this WordList. :param strg: The string to count. :param case_sensitive: A boolean, whether or not the search is case-sensitive. """ if not case_sensitive: return [word.lower() for word in self].count(strg.lower(), *args, **kwargs) # depends on [control=['if'], data=[]] return self._collection.count(strg, *args, **kwargs)
def _compute_childtab(self, lcptab): """Computes the child 'up' and 'down' arrays in O(n) based on the LCP table. Abouelhoda et al. (2004). """ last_index = -1 stack = [0] n = len(lcptab) childtab_up = np.zeros(n, dtype=np.int) # Zeros / -1 ? childtab_down = np.zeros(n, dtype=np.int) for i in xrange(n): while lcptab[i] < lcptab[stack[-1]]: last_index = stack.pop() if lcptab[i] <= lcptab[stack[-1]] and lcptab[stack[-1]] != lcptab[last_index]: childtab_down[stack[-1]] = last_index if last_index != -1: childtab_up[i] = last_index last_index = -1 stack.append(i) return childtab_up, childtab_down
def function[_compute_childtab, parameter[self, lcptab]]: constant[Computes the child 'up' and 'down' arrays in O(n) based on the LCP table. Abouelhoda et al. (2004). ] variable[last_index] assign[=] <ast.UnaryOp object at 0x7da18dc986d0> variable[stack] assign[=] list[[<ast.Constant object at 0x7da18bcca050>]] variable[n] assign[=] call[name[len], parameter[name[lcptab]]] variable[childtab_up] assign[=] call[name[np].zeros, parameter[name[n]]] variable[childtab_down] assign[=] call[name[np].zeros, parameter[name[n]]] for taget[name[i]] in starred[call[name[xrange], parameter[name[n]]]] begin[:] while compare[call[name[lcptab]][name[i]] less[<] call[name[lcptab]][call[name[stack]][<ast.UnaryOp object at 0x7da18bcc9240>]]] begin[:] variable[last_index] assign[=] call[name[stack].pop, parameter[]] if <ast.BoolOp object at 0x7da18bcca290> begin[:] call[name[childtab_down]][call[name[stack]][<ast.UnaryOp object at 0x7da18bcca8c0>]] assign[=] name[last_index] if compare[name[last_index] not_equal[!=] <ast.UnaryOp object at 0x7da18f58cb20>] begin[:] call[name[childtab_up]][name[i]] assign[=] name[last_index] variable[last_index] assign[=] <ast.UnaryOp object at 0x7da18f58caf0> call[name[stack].append, parameter[name[i]]] return[tuple[[<ast.Name object at 0x7da18dc9a830>, <ast.Name object at 0x7da18dc99c60>]]]
keyword[def] identifier[_compute_childtab] ( identifier[self] , identifier[lcptab] ): literal[string] identifier[last_index] =- literal[int] identifier[stack] =[ literal[int] ] identifier[n] = identifier[len] ( identifier[lcptab] ) identifier[childtab_up] = identifier[np] . identifier[zeros] ( identifier[n] , identifier[dtype] = identifier[np] . identifier[int] ) identifier[childtab_down] = identifier[np] . identifier[zeros] ( identifier[n] , identifier[dtype] = identifier[np] . identifier[int] ) keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[n] ): keyword[while] identifier[lcptab] [ identifier[i] ]< identifier[lcptab] [ identifier[stack] [- literal[int] ]]: identifier[last_index] = identifier[stack] . identifier[pop] () keyword[if] identifier[lcptab] [ identifier[i] ]<= identifier[lcptab] [ identifier[stack] [- literal[int] ]] keyword[and] identifier[lcptab] [ identifier[stack] [- literal[int] ]]!= identifier[lcptab] [ identifier[last_index] ]: identifier[childtab_down] [ identifier[stack] [- literal[int] ]]= identifier[last_index] keyword[if] identifier[last_index] !=- literal[int] : identifier[childtab_up] [ identifier[i] ]= identifier[last_index] identifier[last_index] =- literal[int] identifier[stack] . identifier[append] ( identifier[i] ) keyword[return] identifier[childtab_up] , identifier[childtab_down]
def _compute_childtab(self, lcptab): """Computes the child 'up' and 'down' arrays in O(n) based on the LCP table. Abouelhoda et al. (2004). """ last_index = -1 stack = [0] n = len(lcptab) childtab_up = np.zeros(n, dtype=np.int) # Zeros / -1 ? childtab_down = np.zeros(n, dtype=np.int) for i in xrange(n): while lcptab[i] < lcptab[stack[-1]]: last_index = stack.pop() if lcptab[i] <= lcptab[stack[-1]] and lcptab[stack[-1]] != lcptab[last_index]: childtab_down[stack[-1]] = last_index # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] if last_index != -1: childtab_up[i] = last_index last_index = -1 # depends on [control=['if'], data=['last_index']] stack.append(i) # depends on [control=['for'], data=['i']] return (childtab_up, childtab_down)
def machine_info(): """Retrieve core and memory information for the current machine. """ import psutil BYTES_IN_GIG = 1073741824.0 free_bytes = psutil.virtual_memory().total return [{"memory": float("%.1f" % (free_bytes / BYTES_IN_GIG)), "cores": multiprocessing.cpu_count(), "name": socket.gethostname()}]
def function[machine_info, parameter[]]: constant[Retrieve core and memory information for the current machine. ] import module[psutil] variable[BYTES_IN_GIG] assign[=] constant[1073741824.0] variable[free_bytes] assign[=] call[name[psutil].virtual_memory, parameter[]].total return[list[[<ast.Dict object at 0x7da1b18ff310>]]]
keyword[def] identifier[machine_info] (): literal[string] keyword[import] identifier[psutil] identifier[BYTES_IN_GIG] = literal[int] identifier[free_bytes] = identifier[psutil] . identifier[virtual_memory] (). identifier[total] keyword[return] [{ literal[string] : identifier[float] ( literal[string] %( identifier[free_bytes] / identifier[BYTES_IN_GIG] )), literal[string] : identifier[multiprocessing] . identifier[cpu_count] (), literal[string] : identifier[socket] . identifier[gethostname] ()}]
def machine_info(): """Retrieve core and memory information for the current machine. """ import psutil BYTES_IN_GIG = 1073741824.0 free_bytes = psutil.virtual_memory().total return [{'memory': float('%.1f' % (free_bytes / BYTES_IN_GIG)), 'cores': multiprocessing.cpu_count(), 'name': socket.gethostname()}]
def subintf_real_ip_check_gw_port(self, gw_port, ip_addr, netmask): """ checks running-cfg derived ip_addr and netmask against neutron-db gw_port """ if gw_port is not None: found = False for i in range(len(gw_port['fixed_ips'])): target_ip = gw_port['fixed_ips'][i]['ip_address'] if ip_addr == target_ip: found = True break if found is False: LOG.info("Subintf real IP is incorrect, deleting") return False subnet_id = gw_port['fixed_ips'][i]['subnet_id'] subnet = next( sn for sn in gw_port['subnets'] if sn['id'] == subnet_id) target_net = netaddr.IPNetwork(subnet['cidr']) if netmask != str(target_net.netmask): LOG.info("Subintf has incorrect netmask, deleting") return False return True return False
def function[subintf_real_ip_check_gw_port, parameter[self, gw_port, ip_addr, netmask]]: constant[ checks running-cfg derived ip_addr and netmask against neutron-db gw_port ] if compare[name[gw_port] is_not constant[None]] begin[:] variable[found] assign[=] constant[False] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[call[name[gw_port]][constant[fixed_ips]]]]]]] begin[:] variable[target_ip] assign[=] call[call[call[name[gw_port]][constant[fixed_ips]]][name[i]]][constant[ip_address]] if compare[name[ip_addr] equal[==] name[target_ip]] begin[:] variable[found] assign[=] constant[True] break if compare[name[found] is constant[False]] begin[:] call[name[LOG].info, parameter[constant[Subintf real IP is incorrect, deleting]]] return[constant[False]] variable[subnet_id] assign[=] call[call[call[name[gw_port]][constant[fixed_ips]]][name[i]]][constant[subnet_id]] variable[subnet] assign[=] call[name[next], parameter[<ast.GeneratorExp object at 0x7da1b1b1b3d0>]] variable[target_net] assign[=] call[name[netaddr].IPNetwork, parameter[call[name[subnet]][constant[cidr]]]] if compare[name[netmask] not_equal[!=] call[name[str], parameter[name[target_net].netmask]]] begin[:] call[name[LOG].info, parameter[constant[Subintf has incorrect netmask, deleting]]] return[constant[False]] return[constant[True]] return[constant[False]]
keyword[def] identifier[subintf_real_ip_check_gw_port] ( identifier[self] , identifier[gw_port] , identifier[ip_addr] , identifier[netmask] ): literal[string] keyword[if] identifier[gw_port] keyword[is] keyword[not] keyword[None] : identifier[found] = keyword[False] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[gw_port] [ literal[string] ])): identifier[target_ip] = identifier[gw_port] [ literal[string] ][ identifier[i] ][ literal[string] ] keyword[if] identifier[ip_addr] == identifier[target_ip] : identifier[found] = keyword[True] keyword[break] keyword[if] identifier[found] keyword[is] keyword[False] : identifier[LOG] . identifier[info] ( literal[string] ) keyword[return] keyword[False] identifier[subnet_id] = identifier[gw_port] [ literal[string] ][ identifier[i] ][ literal[string] ] identifier[subnet] = identifier[next] ( identifier[sn] keyword[for] identifier[sn] keyword[in] identifier[gw_port] [ literal[string] ] keyword[if] identifier[sn] [ literal[string] ]== identifier[subnet_id] ) identifier[target_net] = identifier[netaddr] . identifier[IPNetwork] ( identifier[subnet] [ literal[string] ]) keyword[if] identifier[netmask] != identifier[str] ( identifier[target_net] . identifier[netmask] ): identifier[LOG] . identifier[info] ( literal[string] ) keyword[return] keyword[False] keyword[return] keyword[True] keyword[return] keyword[False]
def subintf_real_ip_check_gw_port(self, gw_port, ip_addr, netmask): """ checks running-cfg derived ip_addr and netmask against neutron-db gw_port """ if gw_port is not None: found = False for i in range(len(gw_port['fixed_ips'])): target_ip = gw_port['fixed_ips'][i]['ip_address'] if ip_addr == target_ip: found = True break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] if found is False: LOG.info('Subintf real IP is incorrect, deleting') return False # depends on [control=['if'], data=[]] subnet_id = gw_port['fixed_ips'][i]['subnet_id'] subnet = next((sn for sn in gw_port['subnets'] if sn['id'] == subnet_id)) target_net = netaddr.IPNetwork(subnet['cidr']) if netmask != str(target_net.netmask): LOG.info('Subintf has incorrect netmask, deleting') return False # depends on [control=['if'], data=[]] return True # depends on [control=['if'], data=['gw_port']] return False
def room(request, slug, template="room.html"): """ Show a room. """ context = {"room": get_object_or_404(ChatRoom, slug=slug)} return render(request, template, context)
def function[room, parameter[request, slug, template]]: constant[ Show a room. ] variable[context] assign[=] dictionary[[<ast.Constant object at 0x7da18fe92f80>], [<ast.Call object at 0x7da18fe92bf0>]] return[call[name[render], parameter[name[request], name[template], name[context]]]]
keyword[def] identifier[room] ( identifier[request] , identifier[slug] , identifier[template] = literal[string] ): literal[string] identifier[context] ={ literal[string] : identifier[get_object_or_404] ( identifier[ChatRoom] , identifier[slug] = identifier[slug] )} keyword[return] identifier[render] ( identifier[request] , identifier[template] , identifier[context] )
def room(request, slug, template='room.html'): """ Show a room. """ context = {'room': get_object_or_404(ChatRoom, slug=slug)} return render(request, template, context)
def me(self): """Similar to :attr:`Client.user` except an instance of :class:`Member`. This is essentially used to get the member version of yourself. """ self_id = self._state.user.id return self.get_member(self_id)
def function[me, parameter[self]]: constant[Similar to :attr:`Client.user` except an instance of :class:`Member`. This is essentially used to get the member version of yourself. ] variable[self_id] assign[=] name[self]._state.user.id return[call[name[self].get_member, parameter[name[self_id]]]]
keyword[def] identifier[me] ( identifier[self] ): literal[string] identifier[self_id] = identifier[self] . identifier[_state] . identifier[user] . identifier[id] keyword[return] identifier[self] . identifier[get_member] ( identifier[self_id] )
def me(self): """Similar to :attr:`Client.user` except an instance of :class:`Member`. This is essentially used to get the member version of yourself. """ self_id = self._state.user.id return self.get_member(self_id)
def is_list(str): """ Determines if an item in a paragraph is a list. If all of the lines in the markup start with a "*" or "1." this indicates a list as parsed by parse_paragraphs(). It can be drawn with draw_list(). """ for chunk in str.split("\n"): chunk = chunk.replace("\t", "") if not chunk.lstrip().startswith("*") \ and not re.search(r"^([0-9]{1,3}\. )", chunk.lstrip()): return False return True
def function[is_list, parameter[str]]: constant[ Determines if an item in a paragraph is a list. If all of the lines in the markup start with a "*" or "1." this indicates a list as parsed by parse_paragraphs(). It can be drawn with draw_list(). ] for taget[name[chunk]] in starred[call[name[str].split, parameter[constant[ ]]]] begin[:] variable[chunk] assign[=] call[name[chunk].replace, parameter[constant[ ], constant[]]] if <ast.BoolOp object at 0x7da1b2346200> begin[:] return[constant[False]] return[constant[True]]
keyword[def] identifier[is_list] ( identifier[str] ): literal[string] keyword[for] identifier[chunk] keyword[in] identifier[str] . identifier[split] ( literal[string] ): identifier[chunk] = identifier[chunk] . identifier[replace] ( literal[string] , literal[string] ) keyword[if] keyword[not] identifier[chunk] . identifier[lstrip] (). identifier[startswith] ( literal[string] ) keyword[and] keyword[not] identifier[re] . identifier[search] ( literal[string] , identifier[chunk] . identifier[lstrip] ()): keyword[return] keyword[False] keyword[return] keyword[True]
def is_list(str): """ Determines if an item in a paragraph is a list. If all of the lines in the markup start with a "*" or "1." this indicates a list as parsed by parse_paragraphs(). It can be drawn with draw_list(). """ for chunk in str.split('\n'): chunk = chunk.replace('\t', '') if not chunk.lstrip().startswith('*') and (not re.search('^([0-9]{1,3}\\. )', chunk.lstrip())): return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['chunk']] return True
def get_py_dtypes(data_frame): ''' Return a `pandas.DataFrame` containing Python type information for the columns in `data_frame`. Args: data_frame (pandas.DataFrame) : Data frame containing data columns. Returns: (pandas.DataFrame) : Data frame indexed by the column names from `data_frame`, with the columns `'i'` and `'dtype'` indicating the index and Python type of the corresponding `data_frame` column, respectively. ''' df_py_dtypes = data_frame.dtypes.map(get_py_dtype).to_frame('dtype').copy() df_py_dtypes.loc[df_py_dtypes.dtype == object, 'dtype'] = \ (df_py_dtypes.loc[df_py_dtypes.dtype == object].index .map(lambda c: str if data_frame[c] .map(lambda v: isinstance(v, str)).all() else object)) df_py_dtypes.insert(0, 'i', range(df_py_dtypes.shape[0])) df_py_dtypes.index.name = 'column' return df_py_dtypes
def function[get_py_dtypes, parameter[data_frame]]: constant[ Return a `pandas.DataFrame` containing Python type information for the columns in `data_frame`. Args: data_frame (pandas.DataFrame) : Data frame containing data columns. Returns: (pandas.DataFrame) : Data frame indexed by the column names from `data_frame`, with the columns `'i'` and `'dtype'` indicating the index and Python type of the corresponding `data_frame` column, respectively. ] variable[df_py_dtypes] assign[=] call[call[call[name[data_frame].dtypes.map, parameter[name[get_py_dtype]]].to_frame, parameter[constant[dtype]]].copy, parameter[]] call[name[df_py_dtypes].loc][tuple[[<ast.Compare object at 0x7da1b25813f0>, <ast.Constant object at 0x7da1b2582dd0>]]] assign[=] call[call[name[df_py_dtypes].loc][compare[name[df_py_dtypes].dtype equal[==] name[object]]].index.map, parameter[<ast.Lambda object at 0x7da1b2583cd0>]] call[name[df_py_dtypes].insert, parameter[constant[0], constant[i], call[name[range], parameter[call[name[df_py_dtypes].shape][constant[0]]]]]] name[df_py_dtypes].index.name assign[=] constant[column] return[name[df_py_dtypes]]
keyword[def] identifier[get_py_dtypes] ( identifier[data_frame] ): literal[string] identifier[df_py_dtypes] = identifier[data_frame] . identifier[dtypes] . identifier[map] ( identifier[get_py_dtype] ). identifier[to_frame] ( literal[string] ). identifier[copy] () identifier[df_py_dtypes] . identifier[loc] [ identifier[df_py_dtypes] . identifier[dtype] == identifier[object] , literal[string] ]=( identifier[df_py_dtypes] . identifier[loc] [ identifier[df_py_dtypes] . identifier[dtype] == identifier[object] ]. identifier[index] . identifier[map] ( keyword[lambda] identifier[c] : identifier[str] keyword[if] identifier[data_frame] [ identifier[c] ] . identifier[map] ( keyword[lambda] identifier[v] : identifier[isinstance] ( identifier[v] , identifier[str] )). identifier[all] () keyword[else] identifier[object] )) identifier[df_py_dtypes] . identifier[insert] ( literal[int] , literal[string] , identifier[range] ( identifier[df_py_dtypes] . identifier[shape] [ literal[int] ])) identifier[df_py_dtypes] . identifier[index] . identifier[name] = literal[string] keyword[return] identifier[df_py_dtypes]
def get_py_dtypes(data_frame): """ Return a `pandas.DataFrame` containing Python type information for the columns in `data_frame`. Args: data_frame (pandas.DataFrame) : Data frame containing data columns. Returns: (pandas.DataFrame) : Data frame indexed by the column names from `data_frame`, with the columns `'i'` and `'dtype'` indicating the index and Python type of the corresponding `data_frame` column, respectively. """ df_py_dtypes = data_frame.dtypes.map(get_py_dtype).to_frame('dtype').copy() df_py_dtypes.loc[df_py_dtypes.dtype == object, 'dtype'] = df_py_dtypes.loc[df_py_dtypes.dtype == object].index.map(lambda c: str if data_frame[c].map(lambda v: isinstance(v, str)).all() else object) df_py_dtypes.insert(0, 'i', range(df_py_dtypes.shape[0])) df_py_dtypes.index.name = 'column' return df_py_dtypes
def _print_general_vs_table(self, idset1, idset2): """ :param idset1: :param idset2: """ ref1name = '' set1_hasref = isinstance(idset1, idset_with_reference) if set1_hasref: ref1arr = np.array(idset1.reflst) ref1name = idset1.refname ref2name = ref1name set2_hasref = isinstance(idset2, idset_with_reference) if set2_hasref: ref2arr = np.array(idset2.reflst) ref2name = idset2.refname else: ref2name = ref1name #First show a general table hdr11 = '{0} > {1}'.format(idset1.name, idset2.name) hdr12 = '{0} > {1} {2}'.format(idset1.name, idset2.name, ref2name) hdr13 = '{0} < {1}'.format(idset1.name, idset2.name) hdr14 = '{0} < {1} {2}'.format(idset1.name, idset2.name, ref1name) table = [[hdr11, hdr12, hdr13, hdr14]] set1 = set(idset1) set2 = set(idset2) row11 = list(set1 - set2) if set1_hasref: row12 = [ref1arr[np.where(idset1 == nom)][0] for nom in row11] else: row12 = ['Not found' for _ in row11] row13 = list(set2 - set1) if set2_hasref: row14 = [ref2arr[np.where(idset2 == nom)][0] for nom in row13] else: row14 = ['Not found' for _ in row13] tablst = self._tabulate_4_lists(row11, row12, row13, row14) table.extend(tablst) if len(table) > 1: print(tabulate(table, headers='firstrow')) print('\n')
def function[_print_general_vs_table, parameter[self, idset1, idset2]]: constant[ :param idset1: :param idset2: ] variable[ref1name] assign[=] constant[] variable[set1_hasref] assign[=] call[name[isinstance], parameter[name[idset1], name[idset_with_reference]]] if name[set1_hasref] begin[:] variable[ref1arr] assign[=] call[name[np].array, parameter[name[idset1].reflst]] variable[ref1name] assign[=] name[idset1].refname variable[ref2name] assign[=] name[ref1name] variable[set2_hasref] assign[=] call[name[isinstance], parameter[name[idset2], name[idset_with_reference]]] if name[set2_hasref] begin[:] variable[ref2arr] assign[=] call[name[np].array, parameter[name[idset2].reflst]] variable[ref2name] assign[=] name[idset2].refname variable[hdr11] assign[=] call[constant[{0} > {1}].format, parameter[name[idset1].name, name[idset2].name]] variable[hdr12] assign[=] call[constant[{0} > {1} {2}].format, parameter[name[idset1].name, name[idset2].name, name[ref2name]]] variable[hdr13] assign[=] call[constant[{0} < {1}].format, parameter[name[idset1].name, name[idset2].name]] variable[hdr14] assign[=] call[constant[{0} < {1} {2}].format, parameter[name[idset1].name, name[idset2].name, name[ref1name]]] variable[table] assign[=] list[[<ast.List object at 0x7da1b008cd60>]] variable[set1] assign[=] call[name[set], parameter[name[idset1]]] variable[set2] assign[=] call[name[set], parameter[name[idset2]]] variable[row11] assign[=] call[name[list], parameter[binary_operation[name[set1] - name[set2]]]] if name[set1_hasref] begin[:] variable[row12] assign[=] <ast.ListComp object at 0x7da1b008d270> variable[row13] assign[=] call[name[list], parameter[binary_operation[name[set2] - name[set1]]]] if name[set2_hasref] begin[:] variable[row14] assign[=] <ast.ListComp object at 0x7da1b004f160> variable[tablst] assign[=] call[name[self]._tabulate_4_lists, parameter[name[row11], name[row12], name[row13], name[row14]]] call[name[table].extend, parameter[name[tablst]]] if compare[call[name[len], parameter[name[table]]] greater[>] constant[1]] begin[:] call[name[print], parameter[call[name[tabulate], parameter[name[table]]]]] call[name[print], parameter[constant[ ]]]
keyword[def] identifier[_print_general_vs_table] ( identifier[self] , identifier[idset1] , identifier[idset2] ): literal[string] identifier[ref1name] = literal[string] identifier[set1_hasref] = identifier[isinstance] ( identifier[idset1] , identifier[idset_with_reference] ) keyword[if] identifier[set1_hasref] : identifier[ref1arr] = identifier[np] . identifier[array] ( identifier[idset1] . identifier[reflst] ) identifier[ref1name] = identifier[idset1] . identifier[refname] identifier[ref2name] = identifier[ref1name] identifier[set2_hasref] = identifier[isinstance] ( identifier[idset2] , identifier[idset_with_reference] ) keyword[if] identifier[set2_hasref] : identifier[ref2arr] = identifier[np] . identifier[array] ( identifier[idset2] . identifier[reflst] ) identifier[ref2name] = identifier[idset2] . identifier[refname] keyword[else] : identifier[ref2name] = identifier[ref1name] identifier[hdr11] = literal[string] . identifier[format] ( identifier[idset1] . identifier[name] , identifier[idset2] . identifier[name] ) identifier[hdr12] = literal[string] . identifier[format] ( identifier[idset1] . identifier[name] , identifier[idset2] . identifier[name] , identifier[ref2name] ) identifier[hdr13] = literal[string] . identifier[format] ( identifier[idset1] . identifier[name] , identifier[idset2] . identifier[name] ) identifier[hdr14] = literal[string] . identifier[format] ( identifier[idset1] . identifier[name] , identifier[idset2] . identifier[name] , identifier[ref1name] ) identifier[table] =[[ identifier[hdr11] , identifier[hdr12] , identifier[hdr13] , identifier[hdr14] ]] identifier[set1] = identifier[set] ( identifier[idset1] ) identifier[set2] = identifier[set] ( identifier[idset2] ) identifier[row11] = identifier[list] ( identifier[set1] - identifier[set2] ) keyword[if] identifier[set1_hasref] : identifier[row12] =[ identifier[ref1arr] [ identifier[np] . identifier[where] ( identifier[idset1] == identifier[nom] )][ literal[int] ] keyword[for] identifier[nom] keyword[in] identifier[row11] ] keyword[else] : identifier[row12] =[ literal[string] keyword[for] identifier[_] keyword[in] identifier[row11] ] identifier[row13] = identifier[list] ( identifier[set2] - identifier[set1] ) keyword[if] identifier[set2_hasref] : identifier[row14] =[ identifier[ref2arr] [ identifier[np] . identifier[where] ( identifier[idset2] == identifier[nom] )][ literal[int] ] keyword[for] identifier[nom] keyword[in] identifier[row13] ] keyword[else] : identifier[row14] =[ literal[string] keyword[for] identifier[_] keyword[in] identifier[row13] ] identifier[tablst] = identifier[self] . identifier[_tabulate_4_lists] ( identifier[row11] , identifier[row12] , identifier[row13] , identifier[row14] ) identifier[table] . identifier[extend] ( identifier[tablst] ) keyword[if] identifier[len] ( identifier[table] )> literal[int] : identifier[print] ( identifier[tabulate] ( identifier[table] , identifier[headers] = literal[string] )) identifier[print] ( literal[string] )
def _print_general_vs_table(self, idset1, idset2): """ :param idset1: :param idset2: """ ref1name = '' set1_hasref = isinstance(idset1, idset_with_reference) if set1_hasref: ref1arr = np.array(idset1.reflst) ref1name = idset1.refname # depends on [control=['if'], data=[]] ref2name = ref1name set2_hasref = isinstance(idset2, idset_with_reference) if set2_hasref: ref2arr = np.array(idset2.reflst) ref2name = idset2.refname # depends on [control=['if'], data=[]] else: ref2name = ref1name #First show a general table hdr11 = '{0} > {1}'.format(idset1.name, idset2.name) hdr12 = '{0} > {1} {2}'.format(idset1.name, idset2.name, ref2name) hdr13 = '{0} < {1}'.format(idset1.name, idset2.name) hdr14 = '{0} < {1} {2}'.format(idset1.name, idset2.name, ref1name) table = [[hdr11, hdr12, hdr13, hdr14]] set1 = set(idset1) set2 = set(idset2) row11 = list(set1 - set2) if set1_hasref: row12 = [ref1arr[np.where(idset1 == nom)][0] for nom in row11] # depends on [control=['if'], data=[]] else: row12 = ['Not found' for _ in row11] row13 = list(set2 - set1) if set2_hasref: row14 = [ref2arr[np.where(idset2 == nom)][0] for nom in row13] # depends on [control=['if'], data=[]] else: row14 = ['Not found' for _ in row13] tablst = self._tabulate_4_lists(row11, row12, row13, row14) table.extend(tablst) if len(table) > 1: print(tabulate(table, headers='firstrow')) print('\n') # depends on [control=['if'], data=[]]
def get_by_type(self, _type): """ Return all of the instances of :class:`ComponentType` ``_type``. """ r = {} for k, v in self.items(): if get_component_type(k) is _type: r[k] = v return r
def function[get_by_type, parameter[self, _type]]: constant[ Return all of the instances of :class:`ComponentType` ``_type``. ] variable[r] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da18dc99cc0>, <ast.Name object at 0x7da18dc9b310>]]] in starred[call[name[self].items, parameter[]]] begin[:] if compare[call[name[get_component_type], parameter[name[k]]] is name[_type]] begin[:] call[name[r]][name[k]] assign[=] name[v] return[name[r]]
keyword[def] identifier[get_by_type] ( identifier[self] , identifier[_type] ): literal[string] identifier[r] ={} keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[items] (): keyword[if] identifier[get_component_type] ( identifier[k] ) keyword[is] identifier[_type] : identifier[r] [ identifier[k] ]= identifier[v] keyword[return] identifier[r]
def get_by_type(self, _type): """ Return all of the instances of :class:`ComponentType` ``_type``. """ r = {} for (k, v) in self.items(): if get_component_type(k) is _type: r[k] = v # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return r
def _eval_summary(self, context: MonitorContext, feed_dict: Optional[Dict]=None) -> None: """ Evaluates the summary tensor and writes the result to the event file. :param context: Monitor context :param feed_dict: Input values dictionary to be provided to the `session.run` when evaluating the summary tensor. """ if self._summary is None: raise RuntimeError('TensorBoard monitor task should set the Tensorflow.Summary object') if context.session is None: raise RuntimeError('To run a TensorBoard monitor task the TF session object' ' must be provided when creating an instance of the Monitor') summary = context.session.run(self._summary, feed_dict=feed_dict) self._file_writer.add_summary(summary, context.global_step) if self._flush_immediately: self.flush()
def function[_eval_summary, parameter[self, context, feed_dict]]: constant[ Evaluates the summary tensor and writes the result to the event file. :param context: Monitor context :param feed_dict: Input values dictionary to be provided to the `session.run` when evaluating the summary tensor. ] if compare[name[self]._summary is constant[None]] begin[:] <ast.Raise object at 0x7da1b1cecc10> if compare[name[context].session is constant[None]] begin[:] <ast.Raise object at 0x7da1b1cef1f0> variable[summary] assign[=] call[name[context].session.run, parameter[name[self]._summary]] call[name[self]._file_writer.add_summary, parameter[name[summary], name[context].global_step]] if name[self]._flush_immediately begin[:] call[name[self].flush, parameter[]]
keyword[def] identifier[_eval_summary] ( identifier[self] , identifier[context] : identifier[MonitorContext] , identifier[feed_dict] : identifier[Optional] [ identifier[Dict] ]= keyword[None] )-> keyword[None] : literal[string] keyword[if] identifier[self] . identifier[_summary] keyword[is] keyword[None] : keyword[raise] identifier[RuntimeError] ( literal[string] ) keyword[if] identifier[context] . identifier[session] keyword[is] keyword[None] : keyword[raise] identifier[RuntimeError] ( literal[string] literal[string] ) identifier[summary] = identifier[context] . identifier[session] . identifier[run] ( identifier[self] . identifier[_summary] , identifier[feed_dict] = identifier[feed_dict] ) identifier[self] . identifier[_file_writer] . identifier[add_summary] ( identifier[summary] , identifier[context] . identifier[global_step] ) keyword[if] identifier[self] . identifier[_flush_immediately] : identifier[self] . identifier[flush] ()
def _eval_summary(self, context: MonitorContext, feed_dict: Optional[Dict]=None) -> None: """ Evaluates the summary tensor and writes the result to the event file. :param context: Monitor context :param feed_dict: Input values dictionary to be provided to the `session.run` when evaluating the summary tensor. """ if self._summary is None: raise RuntimeError('TensorBoard monitor task should set the Tensorflow.Summary object') # depends on [control=['if'], data=[]] if context.session is None: raise RuntimeError('To run a TensorBoard monitor task the TF session object must be provided when creating an instance of the Monitor') # depends on [control=['if'], data=[]] summary = context.session.run(self._summary, feed_dict=feed_dict) self._file_writer.add_summary(summary, context.global_step) if self._flush_immediately: self.flush() # depends on [control=['if'], data=[]]
def trim(hdu, datasec='DATASEC'): """TRIM a CFHT MEGAPRIME frame using the DATASEC keyword""" datasec = re.findall(r'(\d+)', hdu.header.get(datasec)) l = int(datasec[0]) - 1 r = int(datasec[1]) b = int(datasec[2]) - 1 t = int(datasec[3]) logger.info("Trimming [%d:%d,%d:%d]" % ( l, r, b, t)) hdu.data = hdu.data[b:t, l:r] hdu.header.update('DATASEC', "[%d:%d,%d:%d]" % (1, r - l + 1, 1, t - b + 1), comment="Image was trimmed") hdu.header.update('ODATASEC', "[%d:%d,%d:%d]" % (l + 1, r, b + 1, t), comment="previous DATASEC") return
def function[trim, parameter[hdu, datasec]]: constant[TRIM a CFHT MEGAPRIME frame using the DATASEC keyword] variable[datasec] assign[=] call[name[re].findall, parameter[constant[(\d+)], call[name[hdu].header.get, parameter[name[datasec]]]]] variable[l] assign[=] binary_operation[call[name[int], parameter[call[name[datasec]][constant[0]]]] - constant[1]] variable[r] assign[=] call[name[int], parameter[call[name[datasec]][constant[1]]]] variable[b] assign[=] binary_operation[call[name[int], parameter[call[name[datasec]][constant[2]]]] - constant[1]] variable[t] assign[=] call[name[int], parameter[call[name[datasec]][constant[3]]]] call[name[logger].info, parameter[binary_operation[constant[Trimming [%d:%d,%d:%d]] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1a4ad70>, <ast.Name object at 0x7da1b1a48970>, <ast.Name object at 0x7da1b1a49ea0>, <ast.Name object at 0x7da1b1a49e10>]]]]] name[hdu].data assign[=] call[name[hdu].data][tuple[[<ast.Slice object at 0x7da1b1a49630>, <ast.Slice object at 0x7da1b1a48370>]]] call[name[hdu].header.update, parameter[constant[DATASEC], binary_operation[constant[[%d:%d,%d:%d]] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Constant object at 0x7da1b1a4ac80>, <ast.BinOp object at 0x7da1b1a49360>, <ast.Constant object at 0x7da1b1a48c70>, <ast.BinOp object at 0x7da1b1a49840>]]]]] call[name[hdu].header.update, parameter[constant[ODATASEC], binary_operation[constant[[%d:%d,%d:%d]] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da1b19d8820>, <ast.Name object at 0x7da1b19d8400>, <ast.BinOp object at 0x7da1b19d87f0>, <ast.Name object at 0x7da1b19d8cd0>]]]]] return[None]
keyword[def] identifier[trim] ( identifier[hdu] , identifier[datasec] = literal[string] ): literal[string] identifier[datasec] = identifier[re] . identifier[findall] ( literal[string] , identifier[hdu] . identifier[header] . identifier[get] ( identifier[datasec] )) identifier[l] = identifier[int] ( identifier[datasec] [ literal[int] ])- literal[int] identifier[r] = identifier[int] ( identifier[datasec] [ literal[int] ]) identifier[b] = identifier[int] ( identifier[datasec] [ literal[int] ])- literal[int] identifier[t] = identifier[int] ( identifier[datasec] [ literal[int] ]) identifier[logger] . identifier[info] ( literal[string] %( identifier[l] , identifier[r] , identifier[b] , identifier[t] )) identifier[hdu] . identifier[data] = identifier[hdu] . identifier[data] [ identifier[b] : identifier[t] , identifier[l] : identifier[r] ] identifier[hdu] . identifier[header] . identifier[update] ( literal[string] , literal[string] %( literal[int] , identifier[r] - identifier[l] + literal[int] , literal[int] , identifier[t] - identifier[b] + literal[int] ), identifier[comment] = literal[string] ) identifier[hdu] . identifier[header] . identifier[update] ( literal[string] , literal[string] %( identifier[l] + literal[int] , identifier[r] , identifier[b] + literal[int] , identifier[t] ), identifier[comment] = literal[string] ) keyword[return]
def trim(hdu, datasec='DATASEC'): """TRIM a CFHT MEGAPRIME frame using the DATASEC keyword""" datasec = re.findall('(\\d+)', hdu.header.get(datasec)) l = int(datasec[0]) - 1 r = int(datasec[1]) b = int(datasec[2]) - 1 t = int(datasec[3]) logger.info('Trimming [%d:%d,%d:%d]' % (l, r, b, t)) hdu.data = hdu.data[b:t, l:r] hdu.header.update('DATASEC', '[%d:%d,%d:%d]' % (1, r - l + 1, 1, t - b + 1), comment='Image was trimmed') hdu.header.update('ODATASEC', '[%d:%d,%d:%d]' % (l + 1, r, b + 1, t), comment='previous DATASEC') return
async def set_tz(self): """ set the environment timezone to the timezone set in your twitter settings """ settings = await self.api.account.settings.get() tz = settings.time_zone.tzinfo_name os.environ['TZ'] = tz time.tzset()
<ast.AsyncFunctionDef object at 0x7da1b01e57e0>
keyword[async] keyword[def] identifier[set_tz] ( identifier[self] ): literal[string] identifier[settings] = keyword[await] identifier[self] . identifier[api] . identifier[account] . identifier[settings] . identifier[get] () identifier[tz] = identifier[settings] . identifier[time_zone] . identifier[tzinfo_name] identifier[os] . identifier[environ] [ literal[string] ]= identifier[tz] identifier[time] . identifier[tzset] ()
async def set_tz(self): """ set the environment timezone to the timezone set in your twitter settings """ settings = await self.api.account.settings.get() tz = settings.time_zone.tzinfo_name os.environ['TZ'] = tz time.tzset()
def get_submodule_list(package_path: str) -> Tuple[ModuleDescription, ...]: """Get list of submodules for some package by its path. E.g ``pkg.subpackage``""" pkg = importlib.import_module(package_path) subs = ( ModuleDescription( name=modname, path="{}.{}".format(package_path, modname), is_package=ispkg ) for importer, modname, ispkg in pkgutil.iter_modules(pkg.__path__) ) result = tuple(subs) return result
def function[get_submodule_list, parameter[package_path]]: constant[Get list of submodules for some package by its path. E.g ``pkg.subpackage``] variable[pkg] assign[=] call[name[importlib].import_module, parameter[name[package_path]]] variable[subs] assign[=] <ast.GeneratorExp object at 0x7da2054a79d0> variable[result] assign[=] call[name[tuple], parameter[name[subs]]] return[name[result]]
keyword[def] identifier[get_submodule_list] ( identifier[package_path] : identifier[str] )-> identifier[Tuple] [ identifier[ModuleDescription] ,...]: literal[string] identifier[pkg] = identifier[importlib] . identifier[import_module] ( identifier[package_path] ) identifier[subs] =( identifier[ModuleDescription] ( identifier[name] = identifier[modname] , identifier[path] = literal[string] . identifier[format] ( identifier[package_path] , identifier[modname] ), identifier[is_package] = identifier[ispkg] ) keyword[for] identifier[importer] , identifier[modname] , identifier[ispkg] keyword[in] identifier[pkgutil] . identifier[iter_modules] ( identifier[pkg] . identifier[__path__] ) ) identifier[result] = identifier[tuple] ( identifier[subs] ) keyword[return] identifier[result]
def get_submodule_list(package_path: str) -> Tuple[ModuleDescription, ...]: """Get list of submodules for some package by its path. E.g ``pkg.subpackage``""" pkg = importlib.import_module(package_path) subs = (ModuleDescription(name=modname, path='{}.{}'.format(package_path, modname), is_package=ispkg) for (importer, modname, ispkg) in pkgutil.iter_modules(pkg.__path__)) result = tuple(subs) return result
def _handle_function_exists(self, node, scope, ctxt, stream): """Handle the function_exists unary operator :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """ res = fields.Int() try: func = self._handle_node(node.expr, scope, ctxt, stream) if isinstance(func, functions.BaseFunction): res._pfp__set_value(1) else: res._pfp__set_value(0) except errors.UnresolvedID: res._pfp__set_value(0) return res
def function[_handle_function_exists, parameter[self, node, scope, ctxt, stream]]: constant[Handle the function_exists unary operator :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO ] variable[res] assign[=] call[name[fields].Int, parameter[]] <ast.Try object at 0x7da2041d8220> return[name[res]]
keyword[def] identifier[_handle_function_exists] ( identifier[self] , identifier[node] , identifier[scope] , identifier[ctxt] , identifier[stream] ): literal[string] identifier[res] = identifier[fields] . identifier[Int] () keyword[try] : identifier[func] = identifier[self] . identifier[_handle_node] ( identifier[node] . identifier[expr] , identifier[scope] , identifier[ctxt] , identifier[stream] ) keyword[if] identifier[isinstance] ( identifier[func] , identifier[functions] . identifier[BaseFunction] ): identifier[res] . identifier[_pfp__set_value] ( literal[int] ) keyword[else] : identifier[res] . identifier[_pfp__set_value] ( literal[int] ) keyword[except] identifier[errors] . identifier[UnresolvedID] : identifier[res] . identifier[_pfp__set_value] ( literal[int] ) keyword[return] identifier[res]
def _handle_function_exists(self, node, scope, ctxt, stream): """Handle the function_exists unary operator :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """ res = fields.Int() try: func = self._handle_node(node.expr, scope, ctxt, stream) if isinstance(func, functions.BaseFunction): res._pfp__set_value(1) # depends on [control=['if'], data=[]] else: res._pfp__set_value(0) # depends on [control=['try'], data=[]] except errors.UnresolvedID: res._pfp__set_value(0) # depends on [control=['except'], data=[]] return res
def setup(self, bottom, top): """ Setup data layer according to parameters: - voc_dir: path to PASCAL VOC year dir - split: train / val / test - mean: tuple of mean values to subtract - randomize: load in random order (default: True) - seed: seed for randomization (default: None / current time) for PASCAL VOC semantic segmentation. example params = dict(voc_dir="/path/to/PASCAL/VOC2011", mean=(104.00698793, 116.66876762, 122.67891434), split="val") """ # config params = eval(self.param_str) self.voc_dir = params['voc_dir'] self.split = params['split'] self.mean = np.array(params['mean']) self.random = params.get('randomize', True) self.seed = params.get('seed', None) # two tops: data and label if len(top) != 2: raise Exception("Need to define two tops: data and label.") # data layers have no bottoms if len(bottom) != 0: raise Exception("Do not define a bottom.") # load indices for images and labels split_f = '{}/ImageSets/Segmentation/{}.txt'.format(self.voc_dir, self.split) self.indices = open(split_f, 'r').read().splitlines() self.idx = 0 # make eval deterministic if 'train' not in self.split: self.random = False # randomization: seed and pick if self.random: random.seed(self.seed) self.idx = random.randint(0, len(self.indices)-1)
def function[setup, parameter[self, bottom, top]]: constant[ Setup data layer according to parameters: - voc_dir: path to PASCAL VOC year dir - split: train / val / test - mean: tuple of mean values to subtract - randomize: load in random order (default: True) - seed: seed for randomization (default: None / current time) for PASCAL VOC semantic segmentation. example params = dict(voc_dir="/path/to/PASCAL/VOC2011", mean=(104.00698793, 116.66876762, 122.67891434), split="val") ] variable[params] assign[=] call[name[eval], parameter[name[self].param_str]] name[self].voc_dir assign[=] call[name[params]][constant[voc_dir]] name[self].split assign[=] call[name[params]][constant[split]] name[self].mean assign[=] call[name[np].array, parameter[call[name[params]][constant[mean]]]] name[self].random assign[=] call[name[params].get, parameter[constant[randomize], constant[True]]] name[self].seed assign[=] call[name[params].get, parameter[constant[seed], constant[None]]] if compare[call[name[len], parameter[name[top]]] not_equal[!=] constant[2]] begin[:] <ast.Raise object at 0x7da1b1a679d0> if compare[call[name[len], parameter[name[bottom]]] not_equal[!=] constant[0]] begin[:] <ast.Raise object at 0x7da1b1a65d80> variable[split_f] assign[=] call[constant[{}/ImageSets/Segmentation/{}.txt].format, parameter[name[self].voc_dir, name[self].split]] name[self].indices assign[=] call[call[call[name[open], parameter[name[split_f], constant[r]]].read, parameter[]].splitlines, parameter[]] name[self].idx assign[=] constant[0] if compare[constant[train] <ast.NotIn object at 0x7da2590d7190> name[self].split] begin[:] name[self].random assign[=] constant[False] if name[self].random begin[:] call[name[random].seed, parameter[name[self].seed]] name[self].idx assign[=] call[name[random].randint, parameter[constant[0], binary_operation[call[name[len], parameter[name[self].indices]] - constant[1]]]]
keyword[def] identifier[setup] ( identifier[self] , identifier[bottom] , identifier[top] ): literal[string] identifier[params] = identifier[eval] ( identifier[self] . identifier[param_str] ) identifier[self] . identifier[voc_dir] = identifier[params] [ literal[string] ] identifier[self] . identifier[split] = identifier[params] [ literal[string] ] identifier[self] . identifier[mean] = identifier[np] . identifier[array] ( identifier[params] [ literal[string] ]) identifier[self] . identifier[random] = identifier[params] . identifier[get] ( literal[string] , keyword[True] ) identifier[self] . identifier[seed] = identifier[params] . identifier[get] ( literal[string] , keyword[None] ) keyword[if] identifier[len] ( identifier[top] )!= literal[int] : keyword[raise] identifier[Exception] ( literal[string] ) keyword[if] identifier[len] ( identifier[bottom] )!= literal[int] : keyword[raise] identifier[Exception] ( literal[string] ) identifier[split_f] = literal[string] . identifier[format] ( identifier[self] . identifier[voc_dir] , identifier[self] . identifier[split] ) identifier[self] . identifier[indices] = identifier[open] ( identifier[split_f] , literal[string] ). identifier[read] (). identifier[splitlines] () identifier[self] . identifier[idx] = literal[int] keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[split] : identifier[self] . identifier[random] = keyword[False] keyword[if] identifier[self] . identifier[random] : identifier[random] . identifier[seed] ( identifier[self] . identifier[seed] ) identifier[self] . identifier[idx] = identifier[random] . identifier[randint] ( literal[int] , identifier[len] ( identifier[self] . identifier[indices] )- literal[int] )
def setup(self, bottom, top): """ Setup data layer according to parameters: - voc_dir: path to PASCAL VOC year dir - split: train / val / test - mean: tuple of mean values to subtract - randomize: load in random order (default: True) - seed: seed for randomization (default: None / current time) for PASCAL VOC semantic segmentation. example params = dict(voc_dir="/path/to/PASCAL/VOC2011", mean=(104.00698793, 116.66876762, 122.67891434), split="val") """ # config params = eval(self.param_str) self.voc_dir = params['voc_dir'] self.split = params['split'] self.mean = np.array(params['mean']) self.random = params.get('randomize', True) self.seed = params.get('seed', None) # two tops: data and label if len(top) != 2: raise Exception('Need to define two tops: data and label.') # depends on [control=['if'], data=[]] # data layers have no bottoms if len(bottom) != 0: raise Exception('Do not define a bottom.') # depends on [control=['if'], data=[]] # load indices for images and labels split_f = '{}/ImageSets/Segmentation/{}.txt'.format(self.voc_dir, self.split) self.indices = open(split_f, 'r').read().splitlines() self.idx = 0 # make eval deterministic if 'train' not in self.split: self.random = False # depends on [control=['if'], data=[]] # randomization: seed and pick if self.random: random.seed(self.seed) self.idx = random.randint(0, len(self.indices) - 1) # depends on [control=['if'], data=[]]
def export_model(self, export_formats, export_dir=None): """Exports model based on export_formats. Subclasses should override _export_model() to actually export model to local directory. Args: export_formats (list): List of formats that should be exported. export_dir (str): Optional dir to place the exported model. Defaults to self.logdir. Return: A dict that maps ExportFormats to successfully exported models. """ export_dir = export_dir or self.logdir return self._export_model(export_formats, export_dir)
def function[export_model, parameter[self, export_formats, export_dir]]: constant[Exports model based on export_formats. Subclasses should override _export_model() to actually export model to local directory. Args: export_formats (list): List of formats that should be exported. export_dir (str): Optional dir to place the exported model. Defaults to self.logdir. Return: A dict that maps ExportFormats to successfully exported models. ] variable[export_dir] assign[=] <ast.BoolOp object at 0x7da18f00fa30> return[call[name[self]._export_model, parameter[name[export_formats], name[export_dir]]]]
keyword[def] identifier[export_model] ( identifier[self] , identifier[export_formats] , identifier[export_dir] = keyword[None] ): literal[string] identifier[export_dir] = identifier[export_dir] keyword[or] identifier[self] . identifier[logdir] keyword[return] identifier[self] . identifier[_export_model] ( identifier[export_formats] , identifier[export_dir] )
def export_model(self, export_formats, export_dir=None): """Exports model based on export_formats. Subclasses should override _export_model() to actually export model to local directory. Args: export_formats (list): List of formats that should be exported. export_dir (str): Optional dir to place the exported model. Defaults to self.logdir. Return: A dict that maps ExportFormats to successfully exported models. """ export_dir = export_dir or self.logdir return self._export_model(export_formats, export_dir)
def make_idd_index(extract_func, fname, debug): """generate the iddindex""" astr = _readfname(fname) # fname is exhausted by the above read # reconstitute fname as a StringIO fname = StringIO(astr) # glist = iddgroups.iddtxt2grouplist(astr.decode('ISO-8859-2')) blocklst, commlst, commdct = extract_func(fname) name2refs = iddindex.makename2refdct(commdct) ref2namesdct = iddindex.makeref2namesdct(name2refs) idd_index = dict(name2refs=name2refs, ref2names=ref2namesdct) commdct = iddindex.ref2names2commdct(ref2namesdct, commdct) return blocklst, commlst, commdct, idd_index
def function[make_idd_index, parameter[extract_func, fname, debug]]: constant[generate the iddindex] variable[astr] assign[=] call[name[_readfname], parameter[name[fname]]] variable[fname] assign[=] call[name[StringIO], parameter[name[astr]]] <ast.Tuple object at 0x7da20e9b3eb0> assign[=] call[name[extract_func], parameter[name[fname]]] variable[name2refs] assign[=] call[name[iddindex].makename2refdct, parameter[name[commdct]]] variable[ref2namesdct] assign[=] call[name[iddindex].makeref2namesdct, parameter[name[name2refs]]] variable[idd_index] assign[=] call[name[dict], parameter[]] variable[commdct] assign[=] call[name[iddindex].ref2names2commdct, parameter[name[ref2namesdct], name[commdct]]] return[tuple[[<ast.Name object at 0x7da18ede5a20>, <ast.Name object at 0x7da18ede5d20>, <ast.Name object at 0x7da18ede7220>, <ast.Name object at 0x7da18ede7c10>]]]
keyword[def] identifier[make_idd_index] ( identifier[extract_func] , identifier[fname] , identifier[debug] ): literal[string] identifier[astr] = identifier[_readfname] ( identifier[fname] ) identifier[fname] = identifier[StringIO] ( identifier[astr] ) identifier[blocklst] , identifier[commlst] , identifier[commdct] = identifier[extract_func] ( identifier[fname] ) identifier[name2refs] = identifier[iddindex] . identifier[makename2refdct] ( identifier[commdct] ) identifier[ref2namesdct] = identifier[iddindex] . identifier[makeref2namesdct] ( identifier[name2refs] ) identifier[idd_index] = identifier[dict] ( identifier[name2refs] = identifier[name2refs] , identifier[ref2names] = identifier[ref2namesdct] ) identifier[commdct] = identifier[iddindex] . identifier[ref2names2commdct] ( identifier[ref2namesdct] , identifier[commdct] ) keyword[return] identifier[blocklst] , identifier[commlst] , identifier[commdct] , identifier[idd_index]
def make_idd_index(extract_func, fname, debug): """generate the iddindex""" astr = _readfname(fname) # fname is exhausted by the above read # reconstitute fname as a StringIO fname = StringIO(astr) # glist = iddgroups.iddtxt2grouplist(astr.decode('ISO-8859-2')) (blocklst, commlst, commdct) = extract_func(fname) name2refs = iddindex.makename2refdct(commdct) ref2namesdct = iddindex.makeref2namesdct(name2refs) idd_index = dict(name2refs=name2refs, ref2names=ref2namesdct) commdct = iddindex.ref2names2commdct(ref2namesdct, commdct) return (blocklst, commlst, commdct, idd_index)