code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def start(self): """ This method must be called immediately after the class is instantiated. It instantiates the serial interface and then performs auto pin discovery. It is intended for use by pymata3 applications that do not use asyncio coroutines directly. :returns: No return value. """ # check if user specified a socket transport if self.ip_address: self.socket = PymataSocket(self.ip_address, self.ip_port, self.loop) self.loop.run_until_complete((self.socket.start())) # set the read and write handles self.read = self.socket.read self.write = self.socket.write for i in range(0, len(self.ip_handshake)): self.loop.run_until_complete((self.read())) else: try: self.serial_port = PymataSerial(self.com_port, 57600, self.sleep_tune, self.log_output) # set the read and write handles self.read = self.serial_port.read self.write = self.serial_port.write except serial.SerialException: if self.log_output: log_string = 'Cannot instantiate serial interface: ' \ + self.com_port logging.exception(log_string) else: print( 'Cannot instantiate serial interface: ' + self.com_port) print('To see a list of serial ports, type: "list_serial_ports" in your console.') sys.exit(0) # wait for arduino to go through a reset cycle if need be time.sleep(self.arduino_wait) # register the get_command method with the event loop # self.loop = asyncio.get_event_loop() self.the_task = self.loop.create_task(self._command_dispatcher()) # get arduino firmware version and print it try: firmware_version = self.loop.run_until_complete(self.get_firmware_version()) if self.log_output: log_string = "\nArduino Firmware ID: " + firmware_version logging.exception(log_string) else: print("\nArduino Firmware ID: " + firmware_version) except TypeError: print('\nIs your serial cable plugged in and do you have the correct Firmata sketch loaded?') print('Is the COM port correct?') print('To see a list of serial ports, type: "list_serial_ports" in your console.') sys.exit(0) # try to get an analog pin map. if it comes back as none - shutdown report = self.loop.run_until_complete(self.get_analog_map()) if not report: if self.log_output: log_string = '*** Analog map retrieval timed out. ***' logging.exception(log_string) log_string = '\nDo you have Arduino connectivity and do you ' \ 'have a Firmata sketch uploaded to the board?' logging.exception(log_string) else: print('*** Analog map retrieval timed out. ***') print('\nDo you have Arduino connectivity and do you have a ' 'Firmata sketch uploaded to the board?') try: loop = self.loop for t in asyncio.Task.all_tasks(loop): t.cancel() loop.run_until_complete(asyncio.sleep(.1)) loop.close() loop.stop() sys.exit(0) except RuntimeError: # this suppresses the Event Loop Is Running message, which may # be a bug in python 3 sys.exit(0) except TypeError: sys.exit(0) # custom assemble the pin lists for pin in report: digital_data = PinData() self.digital_pins.append(digital_data) if pin != Constants.IGNORE: analog_data = PinData() self.analog_pins.append(analog_data) if self.log_output: log_string = 'Auto-discovery complete. Found ' + \ str(len(self.digital_pins)) + ' Digital Pins and ' + \ str(len(self.analog_pins)) + ' Analog Pins' logging.info(log_string) else: print('{} {} {} {} {}'.format('Auto-discovery complete. Found', len(self.digital_pins), 'Digital Pins and', len(self.analog_pins), 'Analog Pins\n\n')) self.first_analog_pin = len(self.digital_pins) - len(self.analog_pins)
def function[start, parameter[self]]: constant[ This method must be called immediately after the class is instantiated. It instantiates the serial interface and then performs auto pin discovery. It is intended for use by pymata3 applications that do not use asyncio coroutines directly. :returns: No return value. ] if name[self].ip_address begin[:] name[self].socket assign[=] call[name[PymataSocket], parameter[name[self].ip_address, name[self].ip_port, name[self].loop]] call[name[self].loop.run_until_complete, parameter[call[name[self].socket.start, parameter[]]]] name[self].read assign[=] name[self].socket.read name[self].write assign[=] name[self].socket.write for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[self].ip_handshake]]]]] begin[:] call[name[self].loop.run_until_complete, parameter[call[name[self].read, parameter[]]]] call[name[time].sleep, parameter[name[self].arduino_wait]] name[self].the_task assign[=] call[name[self].loop.create_task, parameter[call[name[self]._command_dispatcher, parameter[]]]] <ast.Try object at 0x7da207f01d20> variable[report] assign[=] call[name[self].loop.run_until_complete, parameter[call[name[self].get_analog_map, parameter[]]]] if <ast.UnaryOp object at 0x7da207f00af0> begin[:] if name[self].log_output begin[:] variable[log_string] assign[=] constant[*** Analog map retrieval timed out. ***] call[name[logging].exception, parameter[name[log_string]]] variable[log_string] assign[=] constant[ Do you have Arduino connectivity and do you have a Firmata sketch uploaded to the board?] call[name[logging].exception, parameter[name[log_string]]] <ast.Try object at 0x7da207f007f0> for taget[name[pin]] in starred[name[report]] begin[:] variable[digital_data] assign[=] call[name[PinData], parameter[]] call[name[self].digital_pins.append, parameter[name[digital_data]]] if compare[name[pin] not_equal[!=] name[Constants].IGNORE] begin[:] variable[analog_data] assign[=] call[name[PinData], parameter[]] call[name[self].analog_pins.append, parameter[name[analog_data]]] if name[self].log_output begin[:] variable[log_string] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[constant[Auto-discovery complete. Found ] + call[name[str], parameter[call[name[len], parameter[name[self].digital_pins]]]]] + constant[ Digital Pins and ]] + call[name[str], parameter[call[name[len], parameter[name[self].analog_pins]]]]] + constant[ Analog Pins]] call[name[logging].info, parameter[name[log_string]]] name[self].first_analog_pin assign[=] binary_operation[call[name[len], parameter[name[self].digital_pins]] - call[name[len], parameter[name[self].analog_pins]]]
keyword[def] identifier[start] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[ip_address] : identifier[self] . identifier[socket] = identifier[PymataSocket] ( identifier[self] . identifier[ip_address] , identifier[self] . identifier[ip_port] , identifier[self] . identifier[loop] ) identifier[self] . identifier[loop] . identifier[run_until_complete] (( identifier[self] . identifier[socket] . identifier[start] ())) identifier[self] . identifier[read] = identifier[self] . identifier[socket] . identifier[read] identifier[self] . identifier[write] = identifier[self] . identifier[socket] . identifier[write] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[self] . identifier[ip_handshake] )): identifier[self] . identifier[loop] . identifier[run_until_complete] (( identifier[self] . identifier[read] ())) keyword[else] : keyword[try] : identifier[self] . identifier[serial_port] = identifier[PymataSerial] ( identifier[self] . identifier[com_port] , literal[int] , identifier[self] . identifier[sleep_tune] , identifier[self] . identifier[log_output] ) identifier[self] . identifier[read] = identifier[self] . identifier[serial_port] . identifier[read] identifier[self] . identifier[write] = identifier[self] . identifier[serial_port] . identifier[write] keyword[except] identifier[serial] . identifier[SerialException] : keyword[if] identifier[self] . identifier[log_output] : identifier[log_string] = literal[string] + identifier[self] . identifier[com_port] identifier[logging] . identifier[exception] ( identifier[log_string] ) keyword[else] : identifier[print] ( literal[string] + identifier[self] . identifier[com_port] ) identifier[print] ( literal[string] ) identifier[sys] . identifier[exit] ( literal[int] ) identifier[time] . identifier[sleep] ( identifier[self] . identifier[arduino_wait] ) identifier[self] . identifier[the_task] = identifier[self] . identifier[loop] . identifier[create_task] ( identifier[self] . identifier[_command_dispatcher] ()) keyword[try] : identifier[firmware_version] = identifier[self] . identifier[loop] . identifier[run_until_complete] ( identifier[self] . identifier[get_firmware_version] ()) keyword[if] identifier[self] . identifier[log_output] : identifier[log_string] = literal[string] + identifier[firmware_version] identifier[logging] . identifier[exception] ( identifier[log_string] ) keyword[else] : identifier[print] ( literal[string] + identifier[firmware_version] ) keyword[except] identifier[TypeError] : identifier[print] ( literal[string] ) identifier[print] ( literal[string] ) identifier[print] ( literal[string] ) identifier[sys] . identifier[exit] ( literal[int] ) identifier[report] = identifier[self] . identifier[loop] . identifier[run_until_complete] ( identifier[self] . identifier[get_analog_map] ()) keyword[if] keyword[not] identifier[report] : keyword[if] identifier[self] . identifier[log_output] : identifier[log_string] = literal[string] identifier[logging] . identifier[exception] ( identifier[log_string] ) identifier[log_string] = literal[string] literal[string] identifier[logging] . identifier[exception] ( identifier[log_string] ) keyword[else] : identifier[print] ( literal[string] ) identifier[print] ( literal[string] literal[string] ) keyword[try] : identifier[loop] = identifier[self] . identifier[loop] keyword[for] identifier[t] keyword[in] identifier[asyncio] . identifier[Task] . identifier[all_tasks] ( identifier[loop] ): identifier[t] . identifier[cancel] () identifier[loop] . identifier[run_until_complete] ( identifier[asyncio] . identifier[sleep] ( literal[int] )) identifier[loop] . identifier[close] () identifier[loop] . identifier[stop] () identifier[sys] . identifier[exit] ( literal[int] ) keyword[except] identifier[RuntimeError] : identifier[sys] . identifier[exit] ( literal[int] ) keyword[except] identifier[TypeError] : identifier[sys] . identifier[exit] ( literal[int] ) keyword[for] identifier[pin] keyword[in] identifier[report] : identifier[digital_data] = identifier[PinData] () identifier[self] . identifier[digital_pins] . identifier[append] ( identifier[digital_data] ) keyword[if] identifier[pin] != identifier[Constants] . identifier[IGNORE] : identifier[analog_data] = identifier[PinData] () identifier[self] . identifier[analog_pins] . identifier[append] ( identifier[analog_data] ) keyword[if] identifier[self] . identifier[log_output] : identifier[log_string] = literal[string] + identifier[str] ( identifier[len] ( identifier[self] . identifier[digital_pins] ))+ literal[string] + identifier[str] ( identifier[len] ( identifier[self] . identifier[analog_pins] ))+ literal[string] identifier[logging] . identifier[info] ( identifier[log_string] ) keyword[else] : identifier[print] ( literal[string] . identifier[format] ( literal[string] , identifier[len] ( identifier[self] . identifier[digital_pins] ), literal[string] , identifier[len] ( identifier[self] . identifier[analog_pins] ), literal[string] )) identifier[self] . identifier[first_analog_pin] = identifier[len] ( identifier[self] . identifier[digital_pins] )- identifier[len] ( identifier[self] . identifier[analog_pins] )
def start(self): """ This method must be called immediately after the class is instantiated. It instantiates the serial interface and then performs auto pin discovery. It is intended for use by pymata3 applications that do not use asyncio coroutines directly. :returns: No return value. """ # check if user specified a socket transport if self.ip_address: self.socket = PymataSocket(self.ip_address, self.ip_port, self.loop) self.loop.run_until_complete(self.socket.start()) # set the read and write handles self.read = self.socket.read self.write = self.socket.write for i in range(0, len(self.ip_handshake)): self.loop.run_until_complete(self.read()) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] else: try: self.serial_port = PymataSerial(self.com_port, 57600, self.sleep_tune, self.log_output) # set the read and write handles self.read = self.serial_port.read self.write = self.serial_port.write # depends on [control=['try'], data=[]] except serial.SerialException: if self.log_output: log_string = 'Cannot instantiate serial interface: ' + self.com_port logging.exception(log_string) # depends on [control=['if'], data=[]] else: print('Cannot instantiate serial interface: ' + self.com_port) print('To see a list of serial ports, type: "list_serial_ports" in your console.') sys.exit(0) # depends on [control=['except'], data=[]] # wait for arduino to go through a reset cycle if need be time.sleep(self.arduino_wait) # register the get_command method with the event loop # self.loop = asyncio.get_event_loop() self.the_task = self.loop.create_task(self._command_dispatcher()) # get arduino firmware version and print it try: firmware_version = self.loop.run_until_complete(self.get_firmware_version()) if self.log_output: log_string = '\nArduino Firmware ID: ' + firmware_version logging.exception(log_string) # depends on [control=['if'], data=[]] else: print('\nArduino Firmware ID: ' + firmware_version) # depends on [control=['try'], data=[]] except TypeError: print('\nIs your serial cable plugged in and do you have the correct Firmata sketch loaded?') print('Is the COM port correct?') print('To see a list of serial ports, type: "list_serial_ports" in your console.') sys.exit(0) # depends on [control=['except'], data=[]] # try to get an analog pin map. if it comes back as none - shutdown report = self.loop.run_until_complete(self.get_analog_map()) if not report: if self.log_output: log_string = '*** Analog map retrieval timed out. ***' logging.exception(log_string) log_string = '\nDo you have Arduino connectivity and do you have a Firmata sketch uploaded to the board?' logging.exception(log_string) # depends on [control=['if'], data=[]] else: print('*** Analog map retrieval timed out. ***') print('\nDo you have Arduino connectivity and do you have a Firmata sketch uploaded to the board?') try: loop = self.loop for t in asyncio.Task.all_tasks(loop): t.cancel() # depends on [control=['for'], data=['t']] loop.run_until_complete(asyncio.sleep(0.1)) loop.close() loop.stop() sys.exit(0) # depends on [control=['try'], data=[]] except RuntimeError: # this suppresses the Event Loop Is Running message, which may # be a bug in python 3 sys.exit(0) # depends on [control=['except'], data=[]] except TypeError: sys.exit(0) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # custom assemble the pin lists for pin in report: digital_data = PinData() self.digital_pins.append(digital_data) if pin != Constants.IGNORE: analog_data = PinData() self.analog_pins.append(analog_data) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pin']] if self.log_output: log_string = 'Auto-discovery complete. Found ' + str(len(self.digital_pins)) + ' Digital Pins and ' + str(len(self.analog_pins)) + ' Analog Pins' logging.info(log_string) # depends on [control=['if'], data=[]] else: print('{} {} {} {} {}'.format('Auto-discovery complete. Found', len(self.digital_pins), 'Digital Pins and', len(self.analog_pins), 'Analog Pins\n\n')) self.first_analog_pin = len(self.digital_pins) - len(self.analog_pins)
def plot_noncontiguous(ax, data, ind, color='black', label='', offset=0, linewidth=0.5, linestyle='-'): '''Plot non-contiguous slice of data Args ---- data: ndarray The data with non continguous regions to plot ind: ndarray indices of data to be plotted color: matplotlib color Color of plotted line label: str Name to be shown in legend offset: int The number of index positions to reset start of data to zero linewidth: float The width of the plotted line linstyle: str The char representation of the plotting style for the line Returns ------- ax: pyplot.ax Axes object with line glyph added for non-contiguous regions ''' def slice_with_nans(ind, data, offset): '''Insert nans in indices and data where indices non-contiguous''' import copy import numpy ind_nan = numpy.zeros(len(data)) ind_nan[:] = numpy.nan # prevent ind from overwrite with deepcopy ind_nan[ind-offset] = copy.deepcopy(ind) #ind_nan = ind_nan[ind[0]-offset:ind[-1]-offset] # prevent data from overwrite with deepcopy data_nan = copy.deepcopy(data) data_nan[numpy.isnan(ind_nan)] = numpy.nan return ind_nan, data_nan x, y = slice_with_nans(ind, data, offset) ax.plot(x, y, color=color, linewidth=linewidth, linestyle=linestyle, label=label) return ax
def function[plot_noncontiguous, parameter[ax, data, ind, color, label, offset, linewidth, linestyle]]: constant[Plot non-contiguous slice of data Args ---- data: ndarray The data with non continguous regions to plot ind: ndarray indices of data to be plotted color: matplotlib color Color of plotted line label: str Name to be shown in legend offset: int The number of index positions to reset start of data to zero linewidth: float The width of the plotted line linstyle: str The char representation of the plotting style for the line Returns ------- ax: pyplot.ax Axes object with line glyph added for non-contiguous regions ] def function[slice_with_nans, parameter[ind, data, offset]]: constant[Insert nans in indices and data where indices non-contiguous] import module[copy] import module[numpy] variable[ind_nan] assign[=] call[name[numpy].zeros, parameter[call[name[len], parameter[name[data]]]]] call[name[ind_nan]][<ast.Slice object at 0x7da1b13e6590>] assign[=] name[numpy].nan call[name[ind_nan]][binary_operation[name[ind] - name[offset]]] assign[=] call[name[copy].deepcopy, parameter[name[ind]]] variable[data_nan] assign[=] call[name[copy].deepcopy, parameter[name[data]]] call[name[data_nan]][call[name[numpy].isnan, parameter[name[ind_nan]]]] assign[=] name[numpy].nan return[tuple[[<ast.Name object at 0x7da1b13e4df0>, <ast.Name object at 0x7da1b13e4e20>]]] <ast.Tuple object at 0x7da1b13e4d60> assign[=] call[name[slice_with_nans], parameter[name[ind], name[data], name[offset]]] call[name[ax].plot, parameter[name[x], name[y]]] return[name[ax]]
keyword[def] identifier[plot_noncontiguous] ( identifier[ax] , identifier[data] , identifier[ind] , identifier[color] = literal[string] , identifier[label] = literal[string] , identifier[offset] = literal[int] , identifier[linewidth] = literal[int] , identifier[linestyle] = literal[string] ): literal[string] keyword[def] identifier[slice_with_nans] ( identifier[ind] , identifier[data] , identifier[offset] ): literal[string] keyword[import] identifier[copy] keyword[import] identifier[numpy] identifier[ind_nan] = identifier[numpy] . identifier[zeros] ( identifier[len] ( identifier[data] )) identifier[ind_nan] [:]= identifier[numpy] . identifier[nan] identifier[ind_nan] [ identifier[ind] - identifier[offset] ]= identifier[copy] . identifier[deepcopy] ( identifier[ind] ) identifier[data_nan] = identifier[copy] . identifier[deepcopy] ( identifier[data] ) identifier[data_nan] [ identifier[numpy] . identifier[isnan] ( identifier[ind_nan] )]= identifier[numpy] . identifier[nan] keyword[return] identifier[ind_nan] , identifier[data_nan] identifier[x] , identifier[y] = identifier[slice_with_nans] ( identifier[ind] , identifier[data] , identifier[offset] ) identifier[ax] . identifier[plot] ( identifier[x] , identifier[y] , identifier[color] = identifier[color] , identifier[linewidth] = identifier[linewidth] , identifier[linestyle] = identifier[linestyle] , identifier[label] = identifier[label] ) keyword[return] identifier[ax]
def plot_noncontiguous(ax, data, ind, color='black', label='', offset=0, linewidth=0.5, linestyle='-'): """Plot non-contiguous slice of data Args ---- data: ndarray The data with non continguous regions to plot ind: ndarray indices of data to be plotted color: matplotlib color Color of plotted line label: str Name to be shown in legend offset: int The number of index positions to reset start of data to zero linewidth: float The width of the plotted line linstyle: str The char representation of the plotting style for the line Returns ------- ax: pyplot.ax Axes object with line glyph added for non-contiguous regions """ def slice_with_nans(ind, data, offset): """Insert nans in indices and data where indices non-contiguous""" import copy import numpy ind_nan = numpy.zeros(len(data)) ind_nan[:] = numpy.nan # prevent ind from overwrite with deepcopy ind_nan[ind - offset] = copy.deepcopy(ind) #ind_nan = ind_nan[ind[0]-offset:ind[-1]-offset] # prevent data from overwrite with deepcopy data_nan = copy.deepcopy(data) data_nan[numpy.isnan(ind_nan)] = numpy.nan return (ind_nan, data_nan) (x, y) = slice_with_nans(ind, data, offset) ax.plot(x, y, color=color, linewidth=linewidth, linestyle=linestyle, label=label) return ax
def dispatch_sockets(self, timeout=None): """Dispatches incoming sockets.""" for sock in self.select_sockets(timeout=timeout): if sock is self.listener: listener = sock sock, addr = listener.accept() self.connected(sock) else: try: sock.recv(1) except socket.error as exc: if exc.errno != ECONNRESET: raise self.disconnected(sock)
def function[dispatch_sockets, parameter[self, timeout]]: constant[Dispatches incoming sockets.] for taget[name[sock]] in starred[call[name[self].select_sockets, parameter[]]] begin[:] if compare[name[sock] is name[self].listener] begin[:] variable[listener] assign[=] name[sock] <ast.Tuple object at 0x7da1b11a8610> assign[=] call[name[listener].accept, parameter[]] call[name[self].connected, parameter[name[sock]]]
keyword[def] identifier[dispatch_sockets] ( identifier[self] , identifier[timeout] = keyword[None] ): literal[string] keyword[for] identifier[sock] keyword[in] identifier[self] . identifier[select_sockets] ( identifier[timeout] = identifier[timeout] ): keyword[if] identifier[sock] keyword[is] identifier[self] . identifier[listener] : identifier[listener] = identifier[sock] identifier[sock] , identifier[addr] = identifier[listener] . identifier[accept] () identifier[self] . identifier[connected] ( identifier[sock] ) keyword[else] : keyword[try] : identifier[sock] . identifier[recv] ( literal[int] ) keyword[except] identifier[socket] . identifier[error] keyword[as] identifier[exc] : keyword[if] identifier[exc] . identifier[errno] != identifier[ECONNRESET] : keyword[raise] identifier[self] . identifier[disconnected] ( identifier[sock] )
def dispatch_sockets(self, timeout=None): """Dispatches incoming sockets.""" for sock in self.select_sockets(timeout=timeout): if sock is self.listener: listener = sock (sock, addr) = listener.accept() self.connected(sock) # depends on [control=['if'], data=['sock']] else: try: sock.recv(1) # depends on [control=['try'], data=[]] except socket.error as exc: if exc.errno != ECONNRESET: raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['exc']] self.disconnected(sock) # depends on [control=['for'], data=['sock']]
def delete_data_source(self, data_source): """ Delete data source with it's name or ID. data_source = { 'imap': {'name': 'data-source-name'}} or data_source = { 'pop3': {'id': 'data-source-id'}} """ source_type = [k for k in data_source.keys()][0] complete_source = self.get_data_sources( source_id=data_source[source_type]['id']) folder_id = complete_source[source_type][0]['l'] self.delete_folders(folder_ids=[folder_id]) return self.request('DeleteDataSource', data_source)
def function[delete_data_source, parameter[self, data_source]]: constant[ Delete data source with it's name or ID. data_source = { 'imap': {'name': 'data-source-name'}} or data_source = { 'pop3': {'id': 'data-source-id'}} ] variable[source_type] assign[=] call[<ast.ListComp object at 0x7da207f98a90>][constant[0]] variable[complete_source] assign[=] call[name[self].get_data_sources, parameter[]] variable[folder_id] assign[=] call[call[call[name[complete_source]][name[source_type]]][constant[0]]][constant[l]] call[name[self].delete_folders, parameter[]] return[call[name[self].request, parameter[constant[DeleteDataSource], name[data_source]]]]
keyword[def] identifier[delete_data_source] ( identifier[self] , identifier[data_source] ): literal[string] identifier[source_type] =[ identifier[k] keyword[for] identifier[k] keyword[in] identifier[data_source] . identifier[keys] ()][ literal[int] ] identifier[complete_source] = identifier[self] . identifier[get_data_sources] ( identifier[source_id] = identifier[data_source] [ identifier[source_type] ][ literal[string] ]) identifier[folder_id] = identifier[complete_source] [ identifier[source_type] ][ literal[int] ][ literal[string] ] identifier[self] . identifier[delete_folders] ( identifier[folder_ids] =[ identifier[folder_id] ]) keyword[return] identifier[self] . identifier[request] ( literal[string] , identifier[data_source] )
def delete_data_source(self, data_source): """ Delete data source with it's name or ID. data_source = { 'imap': {'name': 'data-source-name'}} or data_source = { 'pop3': {'id': 'data-source-id'}} """ source_type = [k for k in data_source.keys()][0] complete_source = self.get_data_sources(source_id=data_source[source_type]['id']) folder_id = complete_source[source_type][0]['l'] self.delete_folders(folder_ids=[folder_id]) return self.request('DeleteDataSource', data_source)
def camelize_classname(base, tablename, table): "Produce a 'camelized' class name, e.g. " "'words_and_underscores' -> 'WordsAndUnderscores'" return str(tablename[0].upper() + re.sub(r'_([a-z])', lambda m: m.group(1).upper(), tablename[1:]))
def function[camelize_classname, parameter[base, tablename, table]]: constant[Produce a 'camelized' class name, e.g. ] constant['words_and_underscores' -> 'WordsAndUnderscores'] return[call[name[str], parameter[binary_operation[call[call[name[tablename]][constant[0]].upper, parameter[]] + call[name[re].sub, parameter[constant[_([a-z])], <ast.Lambda object at 0x7da2041d8a30>, call[name[tablename]][<ast.Slice object at 0x7da2041d82e0>]]]]]]]
keyword[def] identifier[camelize_classname] ( identifier[base] , identifier[tablename] , identifier[table] ): literal[string] literal[string] keyword[return] identifier[str] ( identifier[tablename] [ literal[int] ]. identifier[upper] ()+ identifier[re] . identifier[sub] ( literal[string] , keyword[lambda] identifier[m] : identifier[m] . identifier[group] ( literal[int] ). identifier[upper] (), identifier[tablename] [ literal[int] :]))
def camelize_classname(base, tablename, table): """Produce a 'camelized' class name, e.g. """ "'words_and_underscores' -> 'WordsAndUnderscores'" return str(tablename[0].upper() + re.sub('_([a-z])', lambda m: m.group(1).upper(), tablename[1:]))
def interpolate_colors(array: numpy.ndarray, x: int) -> numpy.ndarray: """ Creates a color map for values in array :param array: color map to interpolate :param x: number of colors :return: interpolated color map """ out_array = [] for i in range(x): if i % (x / (len(array) - 1)) == 0: index = i / (x / (len(array) - 1)) out_array.append(array[int(index)]) else: start_marker = array[math.floor(i / (x / (len(array) - 1)))] stop_marker = array[math.ceil(i / (x / (len(array) - 1)))] interp_amount = i % (x / (len(array) - 1)) / (x / (len(array) - 1)) interp_color = numpy.rint(start_marker + ((stop_marker - start_marker) * interp_amount)) out_array.append(interp_color) out_array[-1] = array[-1] return numpy.array(out_array).astype(numpy.uint8)
def function[interpolate_colors, parameter[array, x]]: constant[ Creates a color map for values in array :param array: color map to interpolate :param x: number of colors :return: interpolated color map ] variable[out_array] assign[=] list[[]] for taget[name[i]] in starred[call[name[range], parameter[name[x]]]] begin[:] if compare[binary_operation[name[i] <ast.Mod object at 0x7da2590d6920> binary_operation[name[x] / binary_operation[call[name[len], parameter[name[array]]] - constant[1]]]] equal[==] constant[0]] begin[:] variable[index] assign[=] binary_operation[name[i] / binary_operation[name[x] / binary_operation[call[name[len], parameter[name[array]]] - constant[1]]]] call[name[out_array].append, parameter[call[name[array]][call[name[int], parameter[name[index]]]]]] call[name[out_array]][<ast.UnaryOp object at 0x7da1b0e772e0>] assign[=] call[name[array]][<ast.UnaryOp object at 0x7da1b0e76f80>] return[call[call[name[numpy].array, parameter[name[out_array]]].astype, parameter[name[numpy].uint8]]]
keyword[def] identifier[interpolate_colors] ( identifier[array] : identifier[numpy] . identifier[ndarray] , identifier[x] : identifier[int] )-> identifier[numpy] . identifier[ndarray] : literal[string] identifier[out_array] =[] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[x] ): keyword[if] identifier[i] %( identifier[x] /( identifier[len] ( identifier[array] )- literal[int] ))== literal[int] : identifier[index] = identifier[i] /( identifier[x] /( identifier[len] ( identifier[array] )- literal[int] )) identifier[out_array] . identifier[append] ( identifier[array] [ identifier[int] ( identifier[index] )]) keyword[else] : identifier[start_marker] = identifier[array] [ identifier[math] . identifier[floor] ( identifier[i] /( identifier[x] /( identifier[len] ( identifier[array] )- literal[int] )))] identifier[stop_marker] = identifier[array] [ identifier[math] . identifier[ceil] ( identifier[i] /( identifier[x] /( identifier[len] ( identifier[array] )- literal[int] )))] identifier[interp_amount] = identifier[i] %( identifier[x] /( identifier[len] ( identifier[array] )- literal[int] ))/( identifier[x] /( identifier[len] ( identifier[array] )- literal[int] )) identifier[interp_color] = identifier[numpy] . identifier[rint] ( identifier[start_marker] +(( identifier[stop_marker] - identifier[start_marker] )* identifier[interp_amount] )) identifier[out_array] . identifier[append] ( identifier[interp_color] ) identifier[out_array] [- literal[int] ]= identifier[array] [- literal[int] ] keyword[return] identifier[numpy] . identifier[array] ( identifier[out_array] ). identifier[astype] ( identifier[numpy] . identifier[uint8] )
def interpolate_colors(array: numpy.ndarray, x: int) -> numpy.ndarray: """ Creates a color map for values in array :param array: color map to interpolate :param x: number of colors :return: interpolated color map """ out_array = [] for i in range(x): if i % (x / (len(array) - 1)) == 0: index = i / (x / (len(array) - 1)) out_array.append(array[int(index)]) # depends on [control=['if'], data=[]] else: start_marker = array[math.floor(i / (x / (len(array) - 1)))] stop_marker = array[math.ceil(i / (x / (len(array) - 1)))] interp_amount = i % (x / (len(array) - 1)) / (x / (len(array) - 1)) interp_color = numpy.rint(start_marker + (stop_marker - start_marker) * interp_amount) out_array.append(interp_color) # depends on [control=['for'], data=['i']] out_array[-1] = array[-1] return numpy.array(out_array).astype(numpy.uint8)
def send_message(self, stream, msg): """Send an arbitrary message to a particular client. Parameters ---------- stream : :class:`tornado.iostream.IOStream` object The stream to send the message to. msg : Message object The message to send. Notes ----- This method can only be called in the IOLoop thread. Failed sends disconnect the client connection and calls the device on_client_disconnect() method. They do not raise exceptions, but they are logged. Sends also fail if more than self.MAX_WRITE_BUFFER_SIZE bytes are queued for sending, implying that client is falling behind. """ assert get_thread_ident() == self.ioloop_thread_id try: if stream.KATCPServer_closing: raise RuntimeError('Stream is closing so we cannot ' 'accept any more writes') return stream.write(str(msg) + '\n') except Exception: addr = self.get_address(stream) self._logger.warn('Could not send message {0!r} to {1}' .format(str(msg), addr), exc_info=True) stream.close(exc_info=True)
def function[send_message, parameter[self, stream, msg]]: constant[Send an arbitrary message to a particular client. Parameters ---------- stream : :class:`tornado.iostream.IOStream` object The stream to send the message to. msg : Message object The message to send. Notes ----- This method can only be called in the IOLoop thread. Failed sends disconnect the client connection and calls the device on_client_disconnect() method. They do not raise exceptions, but they are logged. Sends also fail if more than self.MAX_WRITE_BUFFER_SIZE bytes are queued for sending, implying that client is falling behind. ] assert[compare[call[name[get_thread_ident], parameter[]] equal[==] name[self].ioloop_thread_id]] <ast.Try object at 0x7da20c6a9c00>
keyword[def] identifier[send_message] ( identifier[self] , identifier[stream] , identifier[msg] ): literal[string] keyword[assert] identifier[get_thread_ident] ()== identifier[self] . identifier[ioloop_thread_id] keyword[try] : keyword[if] identifier[stream] . identifier[KATCPServer_closing] : keyword[raise] identifier[RuntimeError] ( literal[string] literal[string] ) keyword[return] identifier[stream] . identifier[write] ( identifier[str] ( identifier[msg] )+ literal[string] ) keyword[except] identifier[Exception] : identifier[addr] = identifier[self] . identifier[get_address] ( identifier[stream] ) identifier[self] . identifier[_logger] . identifier[warn] ( literal[string] . identifier[format] ( identifier[str] ( identifier[msg] ), identifier[addr] ), identifier[exc_info] = keyword[True] ) identifier[stream] . identifier[close] ( identifier[exc_info] = keyword[True] )
def send_message(self, stream, msg): """Send an arbitrary message to a particular client. Parameters ---------- stream : :class:`tornado.iostream.IOStream` object The stream to send the message to. msg : Message object The message to send. Notes ----- This method can only be called in the IOLoop thread. Failed sends disconnect the client connection and calls the device on_client_disconnect() method. They do not raise exceptions, but they are logged. Sends also fail if more than self.MAX_WRITE_BUFFER_SIZE bytes are queued for sending, implying that client is falling behind. """ assert get_thread_ident() == self.ioloop_thread_id try: if stream.KATCPServer_closing: raise RuntimeError('Stream is closing so we cannot accept any more writes') # depends on [control=['if'], data=[]] return stream.write(str(msg) + '\n') # depends on [control=['try'], data=[]] except Exception: addr = self.get_address(stream) self._logger.warn('Could not send message {0!r} to {1}'.format(str(msg), addr), exc_info=True) stream.close(exc_info=True) # depends on [control=['except'], data=[]]
def triggered(self, walker): """Check if this input is triggered on the given stream walker. Args: walker (StreamWalker): The walker to check Returns: bool: Whether this trigger is triggered or not """ if self.use_count: comp_value = walker.count() else: if walker.count() == 0: return False comp_value = walker.peek().value return self.comp_function(comp_value, self.reference)
def function[triggered, parameter[self, walker]]: constant[Check if this input is triggered on the given stream walker. Args: walker (StreamWalker): The walker to check Returns: bool: Whether this trigger is triggered or not ] if name[self].use_count begin[:] variable[comp_value] assign[=] call[name[walker].count, parameter[]] return[call[name[self].comp_function, parameter[name[comp_value], name[self].reference]]]
keyword[def] identifier[triggered] ( identifier[self] , identifier[walker] ): literal[string] keyword[if] identifier[self] . identifier[use_count] : identifier[comp_value] = identifier[walker] . identifier[count] () keyword[else] : keyword[if] identifier[walker] . identifier[count] ()== literal[int] : keyword[return] keyword[False] identifier[comp_value] = identifier[walker] . identifier[peek] (). identifier[value] keyword[return] identifier[self] . identifier[comp_function] ( identifier[comp_value] , identifier[self] . identifier[reference] )
def triggered(self, walker): """Check if this input is triggered on the given stream walker. Args: walker (StreamWalker): The walker to check Returns: bool: Whether this trigger is triggered or not """ if self.use_count: comp_value = walker.count() # depends on [control=['if'], data=[]] else: if walker.count() == 0: return False # depends on [control=['if'], data=[]] comp_value = walker.peek().value return self.comp_function(comp_value, self.reference)
def factors(self): """ Access the factors :returns: twilio.rest.authy.v1.service.entity.factor.FactorList :rtype: twilio.rest.authy.v1.service.entity.factor.FactorList """ if self._factors is None: self._factors = FactorList( self._version, service_sid=self._solution['service_sid'], identity=self._solution['identity'], ) return self._factors
def function[factors, parameter[self]]: constant[ Access the factors :returns: twilio.rest.authy.v1.service.entity.factor.FactorList :rtype: twilio.rest.authy.v1.service.entity.factor.FactorList ] if compare[name[self]._factors is constant[None]] begin[:] name[self]._factors assign[=] call[name[FactorList], parameter[name[self]._version]] return[name[self]._factors]
keyword[def] identifier[factors] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_factors] keyword[is] keyword[None] : identifier[self] . identifier[_factors] = identifier[FactorList] ( identifier[self] . identifier[_version] , identifier[service_sid] = identifier[self] . identifier[_solution] [ literal[string] ], identifier[identity] = identifier[self] . identifier[_solution] [ literal[string] ], ) keyword[return] identifier[self] . identifier[_factors]
def factors(self): """ Access the factors :returns: twilio.rest.authy.v1.service.entity.factor.FactorList :rtype: twilio.rest.authy.v1.service.entity.factor.FactorList """ if self._factors is None: self._factors = FactorList(self._version, service_sid=self._solution['service_sid'], identity=self._solution['identity']) # depends on [control=['if'], data=[]] return self._factors
def get_email_context(self,**kwargs): ''' Overrides EmailRecipientMixin ''' context = super(Event,self).get_email_context(**kwargs) context.update({ 'id': self.id, 'name': self.__str__(), 'title': self.name, 'start': self.firstOccurrenceTime, 'next': self.nextOccurrenceTime, 'last': self.lastOccurrenceTime, 'url': self.url, }) return context
def function[get_email_context, parameter[self]]: constant[ Overrides EmailRecipientMixin ] variable[context] assign[=] call[call[name[super], parameter[name[Event], name[self]]].get_email_context, parameter[]] call[name[context].update, parameter[dictionary[[<ast.Constant object at 0x7da1b13e0220>, <ast.Constant object at 0x7da1b13e1120>, <ast.Constant object at 0x7da1b13e1090>, <ast.Constant object at 0x7da1b13e01c0>, <ast.Constant object at 0x7da1b13e0790>, <ast.Constant object at 0x7da1b13e10f0>, <ast.Constant object at 0x7da1b13e0880>], [<ast.Attribute object at 0x7da1b13e0fd0>, <ast.Call object at 0x7da1b13e39d0>, <ast.Attribute object at 0x7da1b13e3a90>, <ast.Attribute object at 0x7da1b13e35e0>, <ast.Attribute object at 0x7da1b13e31c0>, <ast.Attribute object at 0x7da1b13e33d0>, <ast.Attribute object at 0x7da1b13e1e70>]]]] return[name[context]]
keyword[def] identifier[get_email_context] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[context] = identifier[super] ( identifier[Event] , identifier[self] ). identifier[get_email_context] (** identifier[kwargs] ) identifier[context] . identifier[update] ({ literal[string] : identifier[self] . identifier[id] , literal[string] : identifier[self] . identifier[__str__] (), literal[string] : identifier[self] . identifier[name] , literal[string] : identifier[self] . identifier[firstOccurrenceTime] , literal[string] : identifier[self] . identifier[nextOccurrenceTime] , literal[string] : identifier[self] . identifier[lastOccurrenceTime] , literal[string] : identifier[self] . identifier[url] , }) keyword[return] identifier[context]
def get_email_context(self, **kwargs): """ Overrides EmailRecipientMixin """ context = super(Event, self).get_email_context(**kwargs) context.update({'id': self.id, 'name': self.__str__(), 'title': self.name, 'start': self.firstOccurrenceTime, 'next': self.nextOccurrenceTime, 'last': self.lastOccurrenceTime, 'url': self.url}) return context
def add_f90_to_env(env): """Add Builders and construction variables for f90 to an Environment.""" try: F90Suffixes = env['F90FILESUFFIXES'] except KeyError: F90Suffixes = ['.f90'] #print("Adding %s to f90 suffixes" % F90Suffixes) try: F90PPSuffixes = env['F90PPFILESUFFIXES'] except KeyError: F90PPSuffixes = [] DialectAddToEnv(env, "F90", F90Suffixes, F90PPSuffixes, support_module = 1)
def function[add_f90_to_env, parameter[env]]: constant[Add Builders and construction variables for f90 to an Environment.] <ast.Try object at 0x7da204963fa0> <ast.Try object at 0x7da20e9608b0> call[name[DialectAddToEnv], parameter[name[env], constant[F90], name[F90Suffixes], name[F90PPSuffixes]]]
keyword[def] identifier[add_f90_to_env] ( identifier[env] ): literal[string] keyword[try] : identifier[F90Suffixes] = identifier[env] [ literal[string] ] keyword[except] identifier[KeyError] : identifier[F90Suffixes] =[ literal[string] ] keyword[try] : identifier[F90PPSuffixes] = identifier[env] [ literal[string] ] keyword[except] identifier[KeyError] : identifier[F90PPSuffixes] =[] identifier[DialectAddToEnv] ( identifier[env] , literal[string] , identifier[F90Suffixes] , identifier[F90PPSuffixes] , identifier[support_module] = literal[int] )
def add_f90_to_env(env): """Add Builders and construction variables for f90 to an Environment.""" try: F90Suffixes = env['F90FILESUFFIXES'] # depends on [control=['try'], data=[]] except KeyError: F90Suffixes = ['.f90'] # depends on [control=['except'], data=[]] #print("Adding %s to f90 suffixes" % F90Suffixes) try: F90PPSuffixes = env['F90PPFILESUFFIXES'] # depends on [control=['try'], data=[]] except KeyError: F90PPSuffixes = [] # depends on [control=['except'], data=[]] DialectAddToEnv(env, 'F90', F90Suffixes, F90PPSuffixes, support_module=1)
def write_string_on_file_between_markers(filename: str, string: str, marker: str): r"""Write the table of contents on a single file. :parameter filename: the file that needs to be read or modified. :parameter string: the string that will be written on the file. :parameter marker: a marker that will identify the start and the end of the string. :type filenames: str :type string: str :type marker: str :returns: None :rtype: None :raises: StdinIsNotAFileToBeWritten or an fpyutils exception or a built-in exception. """ if filename == '-': raise StdinIsNotAFileToBeWritten final_string = marker + '\n\n' + string.rstrip() + '\n\n' + marker + '\n' marker_line_positions = fpyutils.get_line_matches( filename, marker, 2, loose_matching=True) if 1 in marker_line_positions: if 2 in marker_line_positions: fpyutils.remove_line_interval(filename, marker_line_positions[1], marker_line_positions[2], filename) else: fpyutils.remove_line_interval(filename, marker_line_positions[1], marker_line_positions[1], filename) fpyutils.insert_string_at_line( filename, final_string, marker_line_positions[1], filename, append=False)
def function[write_string_on_file_between_markers, parameter[filename, string, marker]]: constant[Write the table of contents on a single file. :parameter filename: the file that needs to be read or modified. :parameter string: the string that will be written on the file. :parameter marker: a marker that will identify the start and the end of the string. :type filenames: str :type string: str :type marker: str :returns: None :rtype: None :raises: StdinIsNotAFileToBeWritten or an fpyutils exception or a built-in exception. ] if compare[name[filename] equal[==] constant[-]] begin[:] <ast.Raise object at 0x7da204962110> variable[final_string] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[marker] + constant[ ]] + call[name[string].rstrip, parameter[]]] + constant[ ]] + name[marker]] + constant[ ]] variable[marker_line_positions] assign[=] call[name[fpyutils].get_line_matches, parameter[name[filename], name[marker], constant[2]]] if compare[constant[1] in name[marker_line_positions]] begin[:] if compare[constant[2] in name[marker_line_positions]] begin[:] call[name[fpyutils].remove_line_interval, parameter[name[filename], call[name[marker_line_positions]][constant[1]], call[name[marker_line_positions]][constant[2]], name[filename]]] call[name[fpyutils].insert_string_at_line, parameter[name[filename], name[final_string], call[name[marker_line_positions]][constant[1]], name[filename]]]
keyword[def] identifier[write_string_on_file_between_markers] ( identifier[filename] : identifier[str] , identifier[string] : identifier[str] , identifier[marker] : identifier[str] ): literal[string] keyword[if] identifier[filename] == literal[string] : keyword[raise] identifier[StdinIsNotAFileToBeWritten] identifier[final_string] = identifier[marker] + literal[string] + identifier[string] . identifier[rstrip] ()+ literal[string] + identifier[marker] + literal[string] identifier[marker_line_positions] = identifier[fpyutils] . identifier[get_line_matches] ( identifier[filename] , identifier[marker] , literal[int] , identifier[loose_matching] = keyword[True] ) keyword[if] literal[int] keyword[in] identifier[marker_line_positions] : keyword[if] literal[int] keyword[in] identifier[marker_line_positions] : identifier[fpyutils] . identifier[remove_line_interval] ( identifier[filename] , identifier[marker_line_positions] [ literal[int] ], identifier[marker_line_positions] [ literal[int] ], identifier[filename] ) keyword[else] : identifier[fpyutils] . identifier[remove_line_interval] ( identifier[filename] , identifier[marker_line_positions] [ literal[int] ], identifier[marker_line_positions] [ literal[int] ], identifier[filename] ) identifier[fpyutils] . identifier[insert_string_at_line] ( identifier[filename] , identifier[final_string] , identifier[marker_line_positions] [ literal[int] ], identifier[filename] , identifier[append] = keyword[False] )
def write_string_on_file_between_markers(filename: str, string: str, marker: str): """Write the table of contents on a single file. :parameter filename: the file that needs to be read or modified. :parameter string: the string that will be written on the file. :parameter marker: a marker that will identify the start and the end of the string. :type filenames: str :type string: str :type marker: str :returns: None :rtype: None :raises: StdinIsNotAFileToBeWritten or an fpyutils exception or a built-in exception. """ if filename == '-': raise StdinIsNotAFileToBeWritten # depends on [control=['if'], data=[]] final_string = marker + '\n\n' + string.rstrip() + '\n\n' + marker + '\n' marker_line_positions = fpyutils.get_line_matches(filename, marker, 2, loose_matching=True) if 1 in marker_line_positions: if 2 in marker_line_positions: fpyutils.remove_line_interval(filename, marker_line_positions[1], marker_line_positions[2], filename) # depends on [control=['if'], data=['marker_line_positions']] else: fpyutils.remove_line_interval(filename, marker_line_positions[1], marker_line_positions[1], filename) fpyutils.insert_string_at_line(filename, final_string, marker_line_positions[1], filename, append=False) # depends on [control=['if'], data=['marker_line_positions']]
def diropenbox(msg=None, title=None, argInitialDir=None): """Original doc: A dialog to get a directory name. Note that the msg argument, if specified, is ignored. Returns the name of a directory, or None if user chose to cancel. If an initial directory is specified in argument 3, and that directory exists, then the dialog box will start with that directory. """ return psidialogs.ask_folder(message=msg, title=title, default=argInitialDir)
def function[diropenbox, parameter[msg, title, argInitialDir]]: constant[Original doc: A dialog to get a directory name. Note that the msg argument, if specified, is ignored. Returns the name of a directory, or None if user chose to cancel. If an initial directory is specified in argument 3, and that directory exists, then the dialog box will start with that directory. ] return[call[name[psidialogs].ask_folder, parameter[]]]
keyword[def] identifier[diropenbox] ( identifier[msg] = keyword[None] , identifier[title] = keyword[None] , identifier[argInitialDir] = keyword[None] ): literal[string] keyword[return] identifier[psidialogs] . identifier[ask_folder] ( identifier[message] = identifier[msg] , identifier[title] = identifier[title] , identifier[default] = identifier[argInitialDir] )
def diropenbox(msg=None, title=None, argInitialDir=None): """Original doc: A dialog to get a directory name. Note that the msg argument, if specified, is ignored. Returns the name of a directory, or None if user chose to cancel. If an initial directory is specified in argument 3, and that directory exists, then the dialog box will start with that directory. """ return psidialogs.ask_folder(message=msg, title=title, default=argInitialDir)
def dispatch_to_awaiting(self,result): """ Send dat ato the appropriate queues """ # If we are awaiting to login, then we might also get # an abort message. Handle that here.... if self._state == STATE_AUTHENTICATING: # If the authentication message is something unexpected, # we'll just ignore it for now if result == WAMP_ABORT \ or result == WAMP_WELCOME \ or result == WAMP_GOODBYE: self._welcome_queue.put(result) return try: request_id = result.request_id if request_id in self._requests_pending: self._requests_pending[request_id].put(result) del self._requests_pending[request_id] except: raise Exception("Response does not have a request id. Do not know who to send data to. Data: {} ".format(result.dump()))
def function[dispatch_to_awaiting, parameter[self, result]]: constant[ Send dat ato the appropriate queues ] if compare[name[self]._state equal[==] name[STATE_AUTHENTICATING]] begin[:] if <ast.BoolOp object at 0x7da20e956470> begin[:] call[name[self]._welcome_queue.put, parameter[name[result]]] return[None] <ast.Try object at 0x7da20e956200>
keyword[def] identifier[dispatch_to_awaiting] ( identifier[self] , identifier[result] ): literal[string] keyword[if] identifier[self] . identifier[_state] == identifier[STATE_AUTHENTICATING] : keyword[if] identifier[result] == identifier[WAMP_ABORT] keyword[or] identifier[result] == identifier[WAMP_WELCOME] keyword[or] identifier[result] == identifier[WAMP_GOODBYE] : identifier[self] . identifier[_welcome_queue] . identifier[put] ( identifier[result] ) keyword[return] keyword[try] : identifier[request_id] = identifier[result] . identifier[request_id] keyword[if] identifier[request_id] keyword[in] identifier[self] . identifier[_requests_pending] : identifier[self] . identifier[_requests_pending] [ identifier[request_id] ]. identifier[put] ( identifier[result] ) keyword[del] identifier[self] . identifier[_requests_pending] [ identifier[request_id] ] keyword[except] : keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[result] . identifier[dump] ()))
def dispatch_to_awaiting(self, result): """ Send dat ato the appropriate queues """ # If we are awaiting to login, then we might also get # an abort message. Handle that here.... if self._state == STATE_AUTHENTICATING: # If the authentication message is something unexpected, # we'll just ignore it for now if result == WAMP_ABORT or result == WAMP_WELCOME or result == WAMP_GOODBYE: self._welcome_queue.put(result) # depends on [control=['if'], data=[]] return # depends on [control=['if'], data=[]] try: request_id = result.request_id if request_id in self._requests_pending: self._requests_pending[request_id].put(result) del self._requests_pending[request_id] # depends on [control=['if'], data=['request_id']] # depends on [control=['try'], data=[]] except: raise Exception('Response does not have a request id. Do not know who to send data to. Data: {} '.format(result.dump())) # depends on [control=['except'], data=[]]
def stop_capture(self, adapter_number, port_number): """ Stops a packet capture. :param adapter_number: adapter number :param port_number: port number """ try: adapter = self._adapters[adapter_number] except IndexError: raise IOUError('Adapter {adapter_number} does not exist on IOU "{name}"'.format(name=self._name, adapter_number=adapter_number)) if not adapter.port_exists(port_number): raise IOUError("Port {port_number} does not exist in adapter {adapter}".format(adapter=adapter, port_number=port_number)) nio = adapter.get_nio(port_number) if not nio: raise IOUError("NIO {port_number} does not exist in adapter {adapter}".format(adapter=adapter, port_number=port_number)) nio.stopPacketCapture() log.info('IOU "{name}" [{id}]: stopping packet capture on {adapter_number}/{port_number}'.format(name=self._name, id=self._id, adapter_number=adapter_number, port_number=port_number)) if self.ubridge: bridge_name = "IOL-BRIDGE-{}".format(self.application_id + 512) yield from self._ubridge_send('iol_bridge stop_capture {name} {bay} {unit}'.format(name=bridge_name, bay=adapter_number, unit=port_number))
def function[stop_capture, parameter[self, adapter_number, port_number]]: constant[ Stops a packet capture. :param adapter_number: adapter number :param port_number: port number ] <ast.Try object at 0x7da2044c1810> if <ast.UnaryOp object at 0x7da2044c0a60> begin[:] <ast.Raise object at 0x7da2044c2c20> variable[nio] assign[=] call[name[adapter].get_nio, parameter[name[port_number]]] if <ast.UnaryOp object at 0x7da2044c0940> begin[:] <ast.Raise object at 0x7da2044c2cb0> call[name[nio].stopPacketCapture, parameter[]] call[name[log].info, parameter[call[constant[IOU "{name}" [{id}]: stopping packet capture on {adapter_number}/{port_number}].format, parameter[]]]] if name[self].ubridge begin[:] variable[bridge_name] assign[=] call[constant[IOL-BRIDGE-{}].format, parameter[binary_operation[name[self].application_id + constant[512]]]] <ast.YieldFrom object at 0x7da2049612a0>
keyword[def] identifier[stop_capture] ( identifier[self] , identifier[adapter_number] , identifier[port_number] ): literal[string] keyword[try] : identifier[adapter] = identifier[self] . identifier[_adapters] [ identifier[adapter_number] ] keyword[except] identifier[IndexError] : keyword[raise] identifier[IOUError] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[_name] , identifier[adapter_number] = identifier[adapter_number] )) keyword[if] keyword[not] identifier[adapter] . identifier[port_exists] ( identifier[port_number] ): keyword[raise] identifier[IOUError] ( literal[string] . identifier[format] ( identifier[adapter] = identifier[adapter] , identifier[port_number] = identifier[port_number] )) identifier[nio] = identifier[adapter] . identifier[get_nio] ( identifier[port_number] ) keyword[if] keyword[not] identifier[nio] : keyword[raise] identifier[IOUError] ( literal[string] . identifier[format] ( identifier[adapter] = identifier[adapter] , identifier[port_number] = identifier[port_number] )) identifier[nio] . identifier[stopPacketCapture] () identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[_name] , identifier[id] = identifier[self] . identifier[_id] , identifier[adapter_number] = identifier[adapter_number] , identifier[port_number] = identifier[port_number] )) keyword[if] identifier[self] . identifier[ubridge] : identifier[bridge_name] = literal[string] . identifier[format] ( identifier[self] . identifier[application_id] + literal[int] ) keyword[yield] keyword[from] identifier[self] . identifier[_ubridge_send] ( literal[string] . identifier[format] ( identifier[name] = identifier[bridge_name] , identifier[bay] = identifier[adapter_number] , identifier[unit] = identifier[port_number] ))
def stop_capture(self, adapter_number, port_number): """ Stops a packet capture. :param adapter_number: adapter number :param port_number: port number """ try: adapter = self._adapters[adapter_number] # depends on [control=['try'], data=[]] except IndexError: raise IOUError('Adapter {adapter_number} does not exist on IOU "{name}"'.format(name=self._name, adapter_number=adapter_number)) # depends on [control=['except'], data=[]] if not adapter.port_exists(port_number): raise IOUError('Port {port_number} does not exist in adapter {adapter}'.format(adapter=adapter, port_number=port_number)) # depends on [control=['if'], data=[]] nio = adapter.get_nio(port_number) if not nio: raise IOUError('NIO {port_number} does not exist in adapter {adapter}'.format(adapter=adapter, port_number=port_number)) # depends on [control=['if'], data=[]] nio.stopPacketCapture() log.info('IOU "{name}" [{id}]: stopping packet capture on {adapter_number}/{port_number}'.format(name=self._name, id=self._id, adapter_number=adapter_number, port_number=port_number)) if self.ubridge: bridge_name = 'IOL-BRIDGE-{}'.format(self.application_id + 512) yield from self._ubridge_send('iol_bridge stop_capture {name} {bay} {unit}'.format(name=bridge_name, bay=adapter_number, unit=port_number)) # depends on [control=['if'], data=[]]
def vafter_ts(self): """Function that is called after a song finishes playing""" logger.debug("Song finishing") future = asyncio.run_coroutine_threadsafe(self.vafter(), client.loop) try: future.result() except Exception as e: logger.exception(e)
def function[vafter_ts, parameter[self]]: constant[Function that is called after a song finishes playing] call[name[logger].debug, parameter[constant[Song finishing]]] variable[future] assign[=] call[name[asyncio].run_coroutine_threadsafe, parameter[call[name[self].vafter, parameter[]], name[client].loop]] <ast.Try object at 0x7da1b1923310>
keyword[def] identifier[vafter_ts] ( identifier[self] ): literal[string] identifier[logger] . identifier[debug] ( literal[string] ) identifier[future] = identifier[asyncio] . identifier[run_coroutine_threadsafe] ( identifier[self] . identifier[vafter] (), identifier[client] . identifier[loop] ) keyword[try] : identifier[future] . identifier[result] () keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[logger] . identifier[exception] ( identifier[e] )
def vafter_ts(self): """Function that is called after a song finishes playing""" logger.debug('Song finishing') future = asyncio.run_coroutine_threadsafe(self.vafter(), client.loop) try: future.result() # depends on [control=['try'], data=[]] except Exception as e: logger.exception(e) # depends on [control=['except'], data=['e']]
def get_complex_coefficients(self, params): """ Get the arrays ``alpha_complex_*`` and ``beta_complex_*`` This method should be overloaded by subclasses to return the arrays ``alpha_complex_real``, ``alpha_complex_imag``, ``beta_complex_real``, and ``beta_complex_imag`` given the current parameter settings. By default, this term is empty. Returns: (array[j_complex], array[j_complex], array[j_complex], array[j_complex]): ``alpha_complex_real``, ``alpha_complex_imag``, ``beta_complex_real``, and ``beta_complex_imag`` as described above. ``alpha_complex_imag`` can be omitted and it will be assumed to be zero. """ return np.empty(0), np.empty(0), np.empty(0), np.empty(0)
def function[get_complex_coefficients, parameter[self, params]]: constant[ Get the arrays ``alpha_complex_*`` and ``beta_complex_*`` This method should be overloaded by subclasses to return the arrays ``alpha_complex_real``, ``alpha_complex_imag``, ``beta_complex_real``, and ``beta_complex_imag`` given the current parameter settings. By default, this term is empty. Returns: (array[j_complex], array[j_complex], array[j_complex], array[j_complex]): ``alpha_complex_real``, ``alpha_complex_imag``, ``beta_complex_real``, and ``beta_complex_imag`` as described above. ``alpha_complex_imag`` can be omitted and it will be assumed to be zero. ] return[tuple[[<ast.Call object at 0x7da1b1be50f0>, <ast.Call object at 0x7da1b1be7d60>, <ast.Call object at 0x7da1b1be7070>, <ast.Call object at 0x7da1b1be6350>]]]
keyword[def] identifier[get_complex_coefficients] ( identifier[self] , identifier[params] ): literal[string] keyword[return] identifier[np] . identifier[empty] ( literal[int] ), identifier[np] . identifier[empty] ( literal[int] ), identifier[np] . identifier[empty] ( literal[int] ), identifier[np] . identifier[empty] ( literal[int] )
def get_complex_coefficients(self, params): """ Get the arrays ``alpha_complex_*`` and ``beta_complex_*`` This method should be overloaded by subclasses to return the arrays ``alpha_complex_real``, ``alpha_complex_imag``, ``beta_complex_real``, and ``beta_complex_imag`` given the current parameter settings. By default, this term is empty. Returns: (array[j_complex], array[j_complex], array[j_complex], array[j_complex]): ``alpha_complex_real``, ``alpha_complex_imag``, ``beta_complex_real``, and ``beta_complex_imag`` as described above. ``alpha_complex_imag`` can be omitted and it will be assumed to be zero. """ return (np.empty(0), np.empty(0), np.empty(0), np.empty(0))
def register_class(klass, alias=None): """ Registers a class to be used in the data streaming. This is the equivalent to the C{[RemoteClass(alias="foobar")]} AS3 metatag. @return: The registered L{ClassAlias} instance. @see: L{unregister_class} """ meta = util.get_class_meta(klass) if alias is not None: meta['alias'] = alias alias_klass = util.get_class_alias(klass) or ClassAlias x = alias_klass(klass, defer=True, **meta) if not x.anonymous: CLASS_CACHE[x.alias] = x CLASS_CACHE[klass] = x return x
def function[register_class, parameter[klass, alias]]: constant[ Registers a class to be used in the data streaming. This is the equivalent to the C{[RemoteClass(alias="foobar")]} AS3 metatag. @return: The registered L{ClassAlias} instance. @see: L{unregister_class} ] variable[meta] assign[=] call[name[util].get_class_meta, parameter[name[klass]]] if compare[name[alias] is_not constant[None]] begin[:] call[name[meta]][constant[alias]] assign[=] name[alias] variable[alias_klass] assign[=] <ast.BoolOp object at 0x7da18dc9b220> variable[x] assign[=] call[name[alias_klass], parameter[name[klass]]] if <ast.UnaryOp object at 0x7da18dc9ada0> begin[:] call[name[CLASS_CACHE]][name[x].alias] assign[=] name[x] call[name[CLASS_CACHE]][name[klass]] assign[=] name[x] return[name[x]]
keyword[def] identifier[register_class] ( identifier[klass] , identifier[alias] = keyword[None] ): literal[string] identifier[meta] = identifier[util] . identifier[get_class_meta] ( identifier[klass] ) keyword[if] identifier[alias] keyword[is] keyword[not] keyword[None] : identifier[meta] [ literal[string] ]= identifier[alias] identifier[alias_klass] = identifier[util] . identifier[get_class_alias] ( identifier[klass] ) keyword[or] identifier[ClassAlias] identifier[x] = identifier[alias_klass] ( identifier[klass] , identifier[defer] = keyword[True] ,** identifier[meta] ) keyword[if] keyword[not] identifier[x] . identifier[anonymous] : identifier[CLASS_CACHE] [ identifier[x] . identifier[alias] ]= identifier[x] identifier[CLASS_CACHE] [ identifier[klass] ]= identifier[x] keyword[return] identifier[x]
def register_class(klass, alias=None): """ Registers a class to be used in the data streaming. This is the equivalent to the C{[RemoteClass(alias="foobar")]} AS3 metatag. @return: The registered L{ClassAlias} instance. @see: L{unregister_class} """ meta = util.get_class_meta(klass) if alias is not None: meta['alias'] = alias # depends on [control=['if'], data=['alias']] alias_klass = util.get_class_alias(klass) or ClassAlias x = alias_klass(klass, defer=True, **meta) if not x.anonymous: CLASS_CACHE[x.alias] = x # depends on [control=['if'], data=[]] CLASS_CACHE[klass] = x return x
def init_argparser_optional_advice( self, argparser, default=[], help=( 'a comma separated list of packages to retrieve optional ' 'advice from; the provided packages should have registered ' 'the appropriate entry points for setting up the advices for ' 'the toolchain; refer to documentation for the specified ' 'packages for details' )): """ For setting up optional advice. """ argparser.add_argument( '--optional-advice', default=default, required=False, dest=ADVICE_PACKAGES, action=StoreRequirementList, metavar='<advice>[,<advice>[...]]', help=help )
def function[init_argparser_optional_advice, parameter[self, argparser, default, help]]: constant[ For setting up optional advice. ] call[name[argparser].add_argument, parameter[constant[--optional-advice]]]
keyword[def] identifier[init_argparser_optional_advice] ( identifier[self] , identifier[argparser] , identifier[default] =[], identifier[help] =( literal[string] literal[string] literal[string] literal[string] literal[string] )): literal[string] identifier[argparser] . identifier[add_argument] ( literal[string] , identifier[default] = identifier[default] , identifier[required] = keyword[False] , identifier[dest] = identifier[ADVICE_PACKAGES] , identifier[action] = identifier[StoreRequirementList] , identifier[metavar] = literal[string] , identifier[help] = identifier[help] )
def init_argparser_optional_advice(self, argparser, default=[], help='a comma separated list of packages to retrieve optional advice from; the provided packages should have registered the appropriate entry points for setting up the advices for the toolchain; refer to documentation for the specified packages for details'): """ For setting up optional advice. """ argparser.add_argument('--optional-advice', default=default, required=False, dest=ADVICE_PACKAGES, action=StoreRequirementList, metavar='<advice>[,<advice>[...]]', help=help)
def response_delay(self, delay): """Setter method; for a description see the getter method.""" if isinstance(delay, (int, float)) and delay >= 0 or delay is None: self._response_delay = delay else: raise ValueError( _format("Invalid value for response_delay: {0!A}, must be a " "positive number", delay))
def function[response_delay, parameter[self, delay]]: constant[Setter method; for a description see the getter method.] if <ast.BoolOp object at 0x7da20c76cbe0> begin[:] name[self]._response_delay assign[=] name[delay]
keyword[def] identifier[response_delay] ( identifier[self] , identifier[delay] ): literal[string] keyword[if] identifier[isinstance] ( identifier[delay] ,( identifier[int] , identifier[float] )) keyword[and] identifier[delay] >= literal[int] keyword[or] identifier[delay] keyword[is] keyword[None] : identifier[self] . identifier[_response_delay] = identifier[delay] keyword[else] : keyword[raise] identifier[ValueError] ( identifier[_format] ( literal[string] literal[string] , identifier[delay] ))
def response_delay(self, delay): """Setter method; for a description see the getter method.""" if isinstance(delay, (int, float)) and delay >= 0 or delay is None: self._response_delay = delay # depends on [control=['if'], data=[]] else: raise ValueError(_format('Invalid value for response_delay: {0!A}, must be a positive number', delay))
def to_vararray(var_instance, bounds): """ Converts a var_instance to a var array one """ assert isinstance(var_instance, SymbolVAR) from symbols import BOUNDLIST from symbols import VARARRAY assert isinstance(bounds, BOUNDLIST) var_instance.__class__ = VARARRAY var_instance.class_ = CLASS.array var_instance.bounds = bounds return var_instance
def function[to_vararray, parameter[var_instance, bounds]]: constant[ Converts a var_instance to a var array one ] assert[call[name[isinstance], parameter[name[var_instance], name[SymbolVAR]]]] from relative_module[symbols] import module[BOUNDLIST] from relative_module[symbols] import module[VARARRAY] assert[call[name[isinstance], parameter[name[bounds], name[BOUNDLIST]]]] name[var_instance].__class__ assign[=] name[VARARRAY] name[var_instance].class_ assign[=] name[CLASS].array name[var_instance].bounds assign[=] name[bounds] return[name[var_instance]]
keyword[def] identifier[to_vararray] ( identifier[var_instance] , identifier[bounds] ): literal[string] keyword[assert] identifier[isinstance] ( identifier[var_instance] , identifier[SymbolVAR] ) keyword[from] identifier[symbols] keyword[import] identifier[BOUNDLIST] keyword[from] identifier[symbols] keyword[import] identifier[VARARRAY] keyword[assert] identifier[isinstance] ( identifier[bounds] , identifier[BOUNDLIST] ) identifier[var_instance] . identifier[__class__] = identifier[VARARRAY] identifier[var_instance] . identifier[class_] = identifier[CLASS] . identifier[array] identifier[var_instance] . identifier[bounds] = identifier[bounds] keyword[return] identifier[var_instance]
def to_vararray(var_instance, bounds): """ Converts a var_instance to a var array one """ assert isinstance(var_instance, SymbolVAR) from symbols import BOUNDLIST from symbols import VARARRAY assert isinstance(bounds, BOUNDLIST) var_instance.__class__ = VARARRAY var_instance.class_ = CLASS.array var_instance.bounds = bounds return var_instance
def _space(self, hwr_obj, stroke, kind): """Do the interpolation of 'kind' for 'stroke'""" new_stroke = [] stroke = sorted(stroke, key=lambda p: p['time']) x, y, t = [], [], [] for point in stroke: x.append(point['x']) y.append(point['y']) t.append(point['time']) x, y = numpy.array(x), numpy.array(y) failed = False try: fx = interp1d(t, x, kind=kind) fy = interp1d(t, y, kind=kind) except Exception as e: # pylint: disable=W0703 if hwr_obj.raw_data_id is not None: logging.debug("spline failed for raw_data_id %i", hwr_obj.raw_data_id) else: logging.debug("spline failed") logging.debug(e) failed = True tnew = numpy.linspace(t[0], t[-1], self.number) # linear interpolation fallback due to # https://github.com/scipy/scipy/issues/3868 if failed: try: fx = interp1d(t, x, kind='linear') fy = interp1d(t, y, kind='linear') failed = False except Exception as e: logging.debug("len(stroke) = %i", len(stroke)) logging.debug("len(x) = %i", len(x)) logging.debug("len(y) = %i", len(y)) logging.debug("stroke=%s", stroke) raise e for x, y, t in zip(fx(tnew), fy(tnew), tnew): new_stroke.append({'x': x, 'y': y, 'time': t}) return new_stroke
def function[_space, parameter[self, hwr_obj, stroke, kind]]: constant[Do the interpolation of 'kind' for 'stroke'] variable[new_stroke] assign[=] list[[]] variable[stroke] assign[=] call[name[sorted], parameter[name[stroke]]] <ast.Tuple object at 0x7da1b287c910> assign[=] tuple[[<ast.List object at 0x7da1b287e950>, <ast.List object at 0x7da1b287c310>, <ast.List object at 0x7da1b287c0a0>]] for taget[name[point]] in starred[name[stroke]] begin[:] call[name[x].append, parameter[call[name[point]][constant[x]]]] call[name[y].append, parameter[call[name[point]][constant[y]]]] call[name[t].append, parameter[call[name[point]][constant[time]]]] <ast.Tuple object at 0x7da1b287e680> assign[=] tuple[[<ast.Call object at 0x7da1b287c4f0>, <ast.Call object at 0x7da1b287c700>]] variable[failed] assign[=] constant[False] <ast.Try object at 0x7da1b287ed70> variable[tnew] assign[=] call[name[numpy].linspace, parameter[call[name[t]][constant[0]], call[name[t]][<ast.UnaryOp object at 0x7da1b28dc5e0>], name[self].number]] if name[failed] begin[:] <ast.Try object at 0x7da1b28deda0> for taget[tuple[[<ast.Name object at 0x7da1b28dc250>, <ast.Name object at 0x7da1b28deb60>, <ast.Name object at 0x7da1b28dcb20>]]] in starred[call[name[zip], parameter[call[name[fx], parameter[name[tnew]]], call[name[fy], parameter[name[tnew]]], name[tnew]]]] begin[:] call[name[new_stroke].append, parameter[dictionary[[<ast.Constant object at 0x7da1b28dd540>, <ast.Constant object at 0x7da1b28ddea0>, <ast.Constant object at 0x7da1b28dce80>], [<ast.Name object at 0x7da1b28dc0a0>, <ast.Name object at 0x7da1b28de8c0>, <ast.Name object at 0x7da1b28dc040>]]]] return[name[new_stroke]]
keyword[def] identifier[_space] ( identifier[self] , identifier[hwr_obj] , identifier[stroke] , identifier[kind] ): literal[string] identifier[new_stroke] =[] identifier[stroke] = identifier[sorted] ( identifier[stroke] , identifier[key] = keyword[lambda] identifier[p] : identifier[p] [ literal[string] ]) identifier[x] , identifier[y] , identifier[t] =[],[],[] keyword[for] identifier[point] keyword[in] identifier[stroke] : identifier[x] . identifier[append] ( identifier[point] [ literal[string] ]) identifier[y] . identifier[append] ( identifier[point] [ literal[string] ]) identifier[t] . identifier[append] ( identifier[point] [ literal[string] ]) identifier[x] , identifier[y] = identifier[numpy] . identifier[array] ( identifier[x] ), identifier[numpy] . identifier[array] ( identifier[y] ) identifier[failed] = keyword[False] keyword[try] : identifier[fx] = identifier[interp1d] ( identifier[t] , identifier[x] , identifier[kind] = identifier[kind] ) identifier[fy] = identifier[interp1d] ( identifier[t] , identifier[y] , identifier[kind] = identifier[kind] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : keyword[if] identifier[hwr_obj] . identifier[raw_data_id] keyword[is] keyword[not] keyword[None] : identifier[logging] . identifier[debug] ( literal[string] , identifier[hwr_obj] . identifier[raw_data_id] ) keyword[else] : identifier[logging] . identifier[debug] ( literal[string] ) identifier[logging] . identifier[debug] ( identifier[e] ) identifier[failed] = keyword[True] identifier[tnew] = identifier[numpy] . identifier[linspace] ( identifier[t] [ literal[int] ], identifier[t] [- literal[int] ], identifier[self] . identifier[number] ) keyword[if] identifier[failed] : keyword[try] : identifier[fx] = identifier[interp1d] ( identifier[t] , identifier[x] , identifier[kind] = literal[string] ) identifier[fy] = identifier[interp1d] ( identifier[t] , identifier[y] , identifier[kind] = literal[string] ) identifier[failed] = keyword[False] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[logging] . identifier[debug] ( literal[string] , identifier[len] ( identifier[stroke] )) identifier[logging] . identifier[debug] ( literal[string] , identifier[len] ( identifier[x] )) identifier[logging] . identifier[debug] ( literal[string] , identifier[len] ( identifier[y] )) identifier[logging] . identifier[debug] ( literal[string] , identifier[stroke] ) keyword[raise] identifier[e] keyword[for] identifier[x] , identifier[y] , identifier[t] keyword[in] identifier[zip] ( identifier[fx] ( identifier[tnew] ), identifier[fy] ( identifier[tnew] ), identifier[tnew] ): identifier[new_stroke] . identifier[append] ({ literal[string] : identifier[x] , literal[string] : identifier[y] , literal[string] : identifier[t] }) keyword[return] identifier[new_stroke]
def _space(self, hwr_obj, stroke, kind): """Do the interpolation of 'kind' for 'stroke'""" new_stroke = [] stroke = sorted(stroke, key=lambda p: p['time']) (x, y, t) = ([], [], []) for point in stroke: x.append(point['x']) y.append(point['y']) t.append(point['time']) # depends on [control=['for'], data=['point']] (x, y) = (numpy.array(x), numpy.array(y)) failed = False try: fx = interp1d(t, x, kind=kind) fy = interp1d(t, y, kind=kind) # depends on [control=['try'], data=[]] except Exception as e: # pylint: disable=W0703 if hwr_obj.raw_data_id is not None: logging.debug('spline failed for raw_data_id %i', hwr_obj.raw_data_id) # depends on [control=['if'], data=[]] else: logging.debug('spline failed') logging.debug(e) failed = True # depends on [control=['except'], data=['e']] tnew = numpy.linspace(t[0], t[-1], self.number) # linear interpolation fallback due to # https://github.com/scipy/scipy/issues/3868 if failed: try: fx = interp1d(t, x, kind='linear') fy = interp1d(t, y, kind='linear') failed = False # depends on [control=['try'], data=[]] except Exception as e: logging.debug('len(stroke) = %i', len(stroke)) logging.debug('len(x) = %i', len(x)) logging.debug('len(y) = %i', len(y)) logging.debug('stroke=%s', stroke) raise e # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] for (x, y, t) in zip(fx(tnew), fy(tnew), tnew): new_stroke.append({'x': x, 'y': y, 'time': t}) # depends on [control=['for'], data=[]] return new_stroke
def decode_cookie(cookie, key=None): ''' This decodes a cookie given by `encode_cookie`. If verification of the cookie fails, ``None`` will be implicitly returned. :param cookie: An encoded cookie. :type cookie: str :param key: The key to use when creating the cookie digest. If not specified, the SECRET_KEY value from app config will be used. :type key: str ''' try: payload, digest = cookie.rsplit(u'|', 1) if hasattr(digest, 'decode'): digest = digest.decode('ascii') # pragma: no cover except ValueError: return if safe_str_cmp(_cookie_digest(payload, key=key), digest): return payload
def function[decode_cookie, parameter[cookie, key]]: constant[ This decodes a cookie given by `encode_cookie`. If verification of the cookie fails, ``None`` will be implicitly returned. :param cookie: An encoded cookie. :type cookie: str :param key: The key to use when creating the cookie digest. If not specified, the SECRET_KEY value from app config will be used. :type key: str ] <ast.Try object at 0x7da1b23473a0> if call[name[safe_str_cmp], parameter[call[name[_cookie_digest], parameter[name[payload]]], name[digest]]] begin[:] return[name[payload]]
keyword[def] identifier[decode_cookie] ( identifier[cookie] , identifier[key] = keyword[None] ): literal[string] keyword[try] : identifier[payload] , identifier[digest] = identifier[cookie] . identifier[rsplit] ( literal[string] , literal[int] ) keyword[if] identifier[hasattr] ( identifier[digest] , literal[string] ): identifier[digest] = identifier[digest] . identifier[decode] ( literal[string] ) keyword[except] identifier[ValueError] : keyword[return] keyword[if] identifier[safe_str_cmp] ( identifier[_cookie_digest] ( identifier[payload] , identifier[key] = identifier[key] ), identifier[digest] ): keyword[return] identifier[payload]
def decode_cookie(cookie, key=None): """ This decodes a cookie given by `encode_cookie`. If verification of the cookie fails, ``None`` will be implicitly returned. :param cookie: An encoded cookie. :type cookie: str :param key: The key to use when creating the cookie digest. If not specified, the SECRET_KEY value from app config will be used. :type key: str """ try: (payload, digest) = cookie.rsplit(u'|', 1) if hasattr(digest, 'decode'): digest = digest.decode('ascii') # pragma: no cover # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except ValueError: return # depends on [control=['except'], data=[]] if safe_str_cmp(_cookie_digest(payload, key=key), digest): return payload # depends on [control=['if'], data=[]]
def makePalette(color1, color2, N, hsv=True): """ Generate N colors starting from `color1` to `color2` by linear interpolation HSV in or RGB spaces. :param int N: number of output colors. :param color1: first rgb color. :param color2: second rgb color. :param bool hsv: if `False`, interpolation is calculated in RGB space. .. hint:: Example: |colorpalette.py|_ """ if hsv: color1 = rgb2hsv(color1) color2 = rgb2hsv(color2) c1 = np.array(getColor(color1)) c2 = np.array(getColor(color2)) cols = [] for f in np.linspace(0, 1, N - 1, endpoint=True): c = c1 * (1 - f) + c2 * f if hsv: c = np.array(hsv2rgb(c)) cols.append(c) return cols
def function[makePalette, parameter[color1, color2, N, hsv]]: constant[ Generate N colors starting from `color1` to `color2` by linear interpolation HSV in or RGB spaces. :param int N: number of output colors. :param color1: first rgb color. :param color2: second rgb color. :param bool hsv: if `False`, interpolation is calculated in RGB space. .. hint:: Example: |colorpalette.py|_ ] if name[hsv] begin[:] variable[color1] assign[=] call[name[rgb2hsv], parameter[name[color1]]] variable[color2] assign[=] call[name[rgb2hsv], parameter[name[color2]]] variable[c1] assign[=] call[name[np].array, parameter[call[name[getColor], parameter[name[color1]]]]] variable[c2] assign[=] call[name[np].array, parameter[call[name[getColor], parameter[name[color2]]]]] variable[cols] assign[=] list[[]] for taget[name[f]] in starred[call[name[np].linspace, parameter[constant[0], constant[1], binary_operation[name[N] - constant[1]]]]] begin[:] variable[c] assign[=] binary_operation[binary_operation[name[c1] * binary_operation[constant[1] - name[f]]] + binary_operation[name[c2] * name[f]]] if name[hsv] begin[:] variable[c] assign[=] call[name[np].array, parameter[call[name[hsv2rgb], parameter[name[c]]]]] call[name[cols].append, parameter[name[c]]] return[name[cols]]
keyword[def] identifier[makePalette] ( identifier[color1] , identifier[color2] , identifier[N] , identifier[hsv] = keyword[True] ): literal[string] keyword[if] identifier[hsv] : identifier[color1] = identifier[rgb2hsv] ( identifier[color1] ) identifier[color2] = identifier[rgb2hsv] ( identifier[color2] ) identifier[c1] = identifier[np] . identifier[array] ( identifier[getColor] ( identifier[color1] )) identifier[c2] = identifier[np] . identifier[array] ( identifier[getColor] ( identifier[color2] )) identifier[cols] =[] keyword[for] identifier[f] keyword[in] identifier[np] . identifier[linspace] ( literal[int] , literal[int] , identifier[N] - literal[int] , identifier[endpoint] = keyword[True] ): identifier[c] = identifier[c1] *( literal[int] - identifier[f] )+ identifier[c2] * identifier[f] keyword[if] identifier[hsv] : identifier[c] = identifier[np] . identifier[array] ( identifier[hsv2rgb] ( identifier[c] )) identifier[cols] . identifier[append] ( identifier[c] ) keyword[return] identifier[cols]
def makePalette(color1, color2, N, hsv=True): """ Generate N colors starting from `color1` to `color2` by linear interpolation HSV in or RGB spaces. :param int N: number of output colors. :param color1: first rgb color. :param color2: second rgb color. :param bool hsv: if `False`, interpolation is calculated in RGB space. .. hint:: Example: |colorpalette.py|_ """ if hsv: color1 = rgb2hsv(color1) color2 = rgb2hsv(color2) # depends on [control=['if'], data=[]] c1 = np.array(getColor(color1)) c2 = np.array(getColor(color2)) cols = [] for f in np.linspace(0, 1, N - 1, endpoint=True): c = c1 * (1 - f) + c2 * f if hsv: c = np.array(hsv2rgb(c)) # depends on [control=['if'], data=[]] cols.append(c) # depends on [control=['for'], data=['f']] return cols
def updated(self): 'return datetime.datetime' return dateutil.parser.parse(str(self.f.currentRevision.updated))
def function[updated, parameter[self]]: constant[return datetime.datetime] return[call[name[dateutil].parser.parse, parameter[call[name[str], parameter[name[self].f.currentRevision.updated]]]]]
keyword[def] identifier[updated] ( identifier[self] ): literal[string] keyword[return] identifier[dateutil] . identifier[parser] . identifier[parse] ( identifier[str] ( identifier[self] . identifier[f] . identifier[currentRevision] . identifier[updated] ))
def updated(self): """return datetime.datetime""" return dateutil.parser.parse(str(self.f.currentRevision.updated))
def upgrade(): """Upgrade database.""" op.create_table( 'userprofiles_userprofile', sa.Column('user_id', sa.Integer(), nullable=False), sa.Column('username', sa.String(length=255), nullable=True), sa.Column('displayname', sa.String(length=255), nullable=True), sa.Column('full_name', sa.String(length=255), nullable=False), sa.ForeignKeyConstraint(['user_id'], [u'accounts_user.id'], ), sa.PrimaryKeyConstraint('user_id'), sa.UniqueConstraint('username') )
def function[upgrade, parameter[]]: constant[Upgrade database.] call[name[op].create_table, parameter[constant[userprofiles_userprofile], call[name[sa].Column, parameter[constant[user_id], call[name[sa].Integer, parameter[]]]], call[name[sa].Column, parameter[constant[username], call[name[sa].String, parameter[]]]], call[name[sa].Column, parameter[constant[displayname], call[name[sa].String, parameter[]]]], call[name[sa].Column, parameter[constant[full_name], call[name[sa].String, parameter[]]]], call[name[sa].ForeignKeyConstraint, parameter[list[[<ast.Constant object at 0x7da20c795bd0>]], list[[<ast.Constant object at 0x7da20c7951e0>]]]], call[name[sa].PrimaryKeyConstraint, parameter[constant[user_id]]], call[name[sa].UniqueConstraint, parameter[constant[username]]]]]
keyword[def] identifier[upgrade] (): literal[string] identifier[op] . identifier[create_table] ( literal[string] , identifier[sa] . identifier[Column] ( literal[string] , identifier[sa] . identifier[Integer] (), identifier[nullable] = keyword[False] ), identifier[sa] . identifier[Column] ( literal[string] , identifier[sa] . identifier[String] ( identifier[length] = literal[int] ), identifier[nullable] = keyword[True] ), identifier[sa] . identifier[Column] ( literal[string] , identifier[sa] . identifier[String] ( identifier[length] = literal[int] ), identifier[nullable] = keyword[True] ), identifier[sa] . identifier[Column] ( literal[string] , identifier[sa] . identifier[String] ( identifier[length] = literal[int] ), identifier[nullable] = keyword[False] ), identifier[sa] . identifier[ForeignKeyConstraint] ([ literal[string] ],[ literal[string] ],), identifier[sa] . identifier[PrimaryKeyConstraint] ( literal[string] ), identifier[sa] . identifier[UniqueConstraint] ( literal[string] ) )
def upgrade(): """Upgrade database.""" op.create_table('userprofiles_userprofile', sa.Column('user_id', sa.Integer(), nullable=False), sa.Column('username', sa.String(length=255), nullable=True), sa.Column('displayname', sa.String(length=255), nullable=True), sa.Column('full_name', sa.String(length=255), nullable=False), sa.ForeignKeyConstraint(['user_id'], [u'accounts_user.id']), sa.PrimaryKeyConstraint('user_id'), sa.UniqueConstraint('username'))
def reversedict(d: Dict[Any, Any]) -> Dict[Any, Any]: """ Takes a ``k -> v`` mapping and returns a ``v -> k`` mapping. """ return {v: k for k, v in d.items()}
def function[reversedict, parameter[d]]: constant[ Takes a ``k -> v`` mapping and returns a ``v -> k`` mapping. ] return[<ast.DictComp object at 0x7da1b190ccd0>]
keyword[def] identifier[reversedict] ( identifier[d] : identifier[Dict] [ identifier[Any] , identifier[Any] ])-> identifier[Dict] [ identifier[Any] , identifier[Any] ]: literal[string] keyword[return] { identifier[v] : identifier[k] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[d] . identifier[items] ()}
def reversedict(d: Dict[Any, Any]) -> Dict[Any, Any]: """ Takes a ``k -> v`` mapping and returns a ``v -> k`` mapping. """ return {v: k for (k, v) in d.items()}
def _check_backends(self): """ Check that every backend in roles and attributes is declared in main configuration """ backends = self.backends_params.keys() for b in self.roles.get_backends(): if b not in backends: raise MissingBackend(b, 'role') for b in self.attributes.get_backends(): if b not in backends: raise MissingBackend(b, 'attribute')
def function[_check_backends, parameter[self]]: constant[ Check that every backend in roles and attributes is declared in main configuration ] variable[backends] assign[=] call[name[self].backends_params.keys, parameter[]] for taget[name[b]] in starred[call[name[self].roles.get_backends, parameter[]]] begin[:] if compare[name[b] <ast.NotIn object at 0x7da2590d7190> name[backends]] begin[:] <ast.Raise object at 0x7da1b26ad510> for taget[name[b]] in starred[call[name[self].attributes.get_backends, parameter[]]] begin[:] if compare[name[b] <ast.NotIn object at 0x7da2590d7190> name[backends]] begin[:] <ast.Raise object at 0x7da1b26af2e0>
keyword[def] identifier[_check_backends] ( identifier[self] ): literal[string] identifier[backends] = identifier[self] . identifier[backends_params] . identifier[keys] () keyword[for] identifier[b] keyword[in] identifier[self] . identifier[roles] . identifier[get_backends] (): keyword[if] identifier[b] keyword[not] keyword[in] identifier[backends] : keyword[raise] identifier[MissingBackend] ( identifier[b] , literal[string] ) keyword[for] identifier[b] keyword[in] identifier[self] . identifier[attributes] . identifier[get_backends] (): keyword[if] identifier[b] keyword[not] keyword[in] identifier[backends] : keyword[raise] identifier[MissingBackend] ( identifier[b] , literal[string] )
def _check_backends(self): """ Check that every backend in roles and attributes is declared in main configuration """ backends = self.backends_params.keys() for b in self.roles.get_backends(): if b not in backends: raise MissingBackend(b, 'role') # depends on [control=['if'], data=['b']] # depends on [control=['for'], data=['b']] for b in self.attributes.get_backends(): if b not in backends: raise MissingBackend(b, 'attribute') # depends on [control=['if'], data=['b']] # depends on [control=['for'], data=['b']]
def plot_chain(chain, joints, ax, target=None, show=False): """Plots the chain""" # LIst of nodes and orientations nodes = [] axes = [] transformation_matrixes = chain.forward_kinematics(joints, full_kinematics=True) # Get the nodes and the orientation from the tranformation matrix for (index, link) in enumerate(chain.links): (node, rotation) = geometry_utils.from_transformation_matrix(transformation_matrixes[index]) nodes.append(node) rotation_axis = link._get_rotation_axis() if index == 0: axes.append(rotation_axis) else: axes.append(geometry_utils.homogeneous_to_cartesian_vectors(np.dot(transformation_matrixes[index - 1], rotation_axis))) # Plot the chain ax.plot([x[0] for x in nodes], [x[1] for x in nodes], [x[2] for x in nodes]) # Plot of the nodes of the chain ax.scatter([x[0] for x in nodes], [x[1] for x in nodes], [x[2] for x in nodes]) # Plot rotation axes for index, axe in enumerate(axes): ax.plot([nodes[index][0], axe[0]], [nodes[index][1], axe[1]], [nodes[index][2], axe[2]])
def function[plot_chain, parameter[chain, joints, ax, target, show]]: constant[Plots the chain] variable[nodes] assign[=] list[[]] variable[axes] assign[=] list[[]] variable[transformation_matrixes] assign[=] call[name[chain].forward_kinematics, parameter[name[joints]]] for taget[tuple[[<ast.Name object at 0x7da18fe91e10>, <ast.Name object at 0x7da18fe93d90>]]] in starred[call[name[enumerate], parameter[name[chain].links]]] begin[:] <ast.Tuple object at 0x7da18fe937c0> assign[=] call[name[geometry_utils].from_transformation_matrix, parameter[call[name[transformation_matrixes]][name[index]]]] call[name[nodes].append, parameter[name[node]]] variable[rotation_axis] assign[=] call[name[link]._get_rotation_axis, parameter[]] if compare[name[index] equal[==] constant[0]] begin[:] call[name[axes].append, parameter[name[rotation_axis]]] call[name[ax].plot, parameter[<ast.ListComp object at 0x7da18fe92e60>, <ast.ListComp object at 0x7da18fe911e0>, <ast.ListComp object at 0x7da18fe93280>]] call[name[ax].scatter, parameter[<ast.ListComp object at 0x7da207f02110>, <ast.ListComp object at 0x7da207f00d60>, <ast.ListComp object at 0x7da207f01c30>]] for taget[tuple[[<ast.Name object at 0x7da207f02b60>, <ast.Name object at 0x7da207f02da0>]]] in starred[call[name[enumerate], parameter[name[axes]]]] begin[:] call[name[ax].plot, parameter[list[[<ast.Subscript object at 0x7da207f03610>, <ast.Subscript object at 0x7da20c991540>]], list[[<ast.Subscript object at 0x7da20c992380>, <ast.Subscript object at 0x7da20c992230>]], list[[<ast.Subscript object at 0x7da20c990b50>, <ast.Subscript object at 0x7da20c993100>]]]]
keyword[def] identifier[plot_chain] ( identifier[chain] , identifier[joints] , identifier[ax] , identifier[target] = keyword[None] , identifier[show] = keyword[False] ): literal[string] identifier[nodes] =[] identifier[axes] =[] identifier[transformation_matrixes] = identifier[chain] . identifier[forward_kinematics] ( identifier[joints] , identifier[full_kinematics] = keyword[True] ) keyword[for] ( identifier[index] , identifier[link] ) keyword[in] identifier[enumerate] ( identifier[chain] . identifier[links] ): ( identifier[node] , identifier[rotation] )= identifier[geometry_utils] . identifier[from_transformation_matrix] ( identifier[transformation_matrixes] [ identifier[index] ]) identifier[nodes] . identifier[append] ( identifier[node] ) identifier[rotation_axis] = identifier[link] . identifier[_get_rotation_axis] () keyword[if] identifier[index] == literal[int] : identifier[axes] . identifier[append] ( identifier[rotation_axis] ) keyword[else] : identifier[axes] . identifier[append] ( identifier[geometry_utils] . identifier[homogeneous_to_cartesian_vectors] ( identifier[np] . identifier[dot] ( identifier[transformation_matrixes] [ identifier[index] - literal[int] ], identifier[rotation_axis] ))) identifier[ax] . identifier[plot] ([ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[nodes] ],[ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[nodes] ],[ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[nodes] ]) identifier[ax] . identifier[scatter] ([ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[nodes] ],[ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[nodes] ],[ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[nodes] ]) keyword[for] identifier[index] , identifier[axe] keyword[in] identifier[enumerate] ( identifier[axes] ): identifier[ax] . identifier[plot] ([ identifier[nodes] [ identifier[index] ][ literal[int] ], identifier[axe] [ literal[int] ]],[ identifier[nodes] [ identifier[index] ][ literal[int] ], identifier[axe] [ literal[int] ]],[ identifier[nodes] [ identifier[index] ][ literal[int] ], identifier[axe] [ literal[int] ]])
def plot_chain(chain, joints, ax, target=None, show=False): """Plots the chain""" # LIst of nodes and orientations nodes = [] axes = [] transformation_matrixes = chain.forward_kinematics(joints, full_kinematics=True) # Get the nodes and the orientation from the tranformation matrix for (index, link) in enumerate(chain.links): (node, rotation) = geometry_utils.from_transformation_matrix(transformation_matrixes[index]) nodes.append(node) rotation_axis = link._get_rotation_axis() if index == 0: axes.append(rotation_axis) # depends on [control=['if'], data=[]] else: axes.append(geometry_utils.homogeneous_to_cartesian_vectors(np.dot(transformation_matrixes[index - 1], rotation_axis))) # depends on [control=['for'], data=[]] # Plot the chain ax.plot([x[0] for x in nodes], [x[1] for x in nodes], [x[2] for x in nodes]) # Plot of the nodes of the chain ax.scatter([x[0] for x in nodes], [x[1] for x in nodes], [x[2] for x in nodes]) # Plot rotation axes for (index, axe) in enumerate(axes): ax.plot([nodes[index][0], axe[0]], [nodes[index][1], axe[1]], [nodes[index][2], axe[2]]) # depends on [control=['for'], data=[]]
def parallel_tfa_lclist(lclist, templateinfo, timecols=None, magcols=None, errcols=None, lcformat='hat-sql', lcformatdir=None, interp='nearest', sigclip=5.0, mintemplatedist_arcmin=10.0, nworkers=NCPUS, maxworkertasks=1000): '''This applies TFA in parallel to all LCs in the given list of file names. Parameters ---------- lclist : str This is a list of light curve files to apply TFA correction to. templateinfo : dict or str This is either the dict produced by `tfa_templates_lclist` or the pickle produced by the same function. timecols : list of str or None The timecol keys to use from the lcdict in applying TFA corrections. magcols : list of str or None The magcol keys to use from the lcdict in applying TFA corrections. errcols : list of str or None The errcol keys to use from the lcdict in applying TFA corrections. lcformat : str This is the `formatkey` associated with your light curve format, which you previously passed in to the `lcproc.register_lcformat` function. This will be used to look up how to find and read the light curves specified in `basedir` or `use_list_of_filenames`. lcformatdir : str or None If this is provided, gives the path to a directory when you've stored your lcformat description JSONs, other than the usual directories lcproc knows to search for them in. Use this along with `lcformat` to specify an LC format JSON file that's not currently registered with lcproc. interp : str This is passed to scipy.interpolate.interp1d as the kind of interpolation to use when reforming the light curves to the timebase of the TFA templates. sigclip : float or sequence of two floats or None This is the sigma clip to apply to the light curves before running TFA on it. mintemplatedist_arcmin : float This sets the minimum distance required from the target object for objects in the TFA template ensemble. Objects closer than this distance will be removed from the ensemble. nworkers : int The number of parallel workers to launch maxworkertasks : int The maximum number of tasks per worker allowed before it's replaced by a fresh one. Returns ------- dict Contains the input file names and output TFA light curve filenames per input file organized by each `magcol` in `magcols`. ''' # open the templateinfo first if isinstance(templateinfo,str) and os.path.exists(templateinfo): with open(templateinfo,'rb') as infd: templateinfo = pickle.load(infd) try: formatinfo = get_lcformat(lcformat, use_lcformat_dir=lcformatdir) if formatinfo: (dfileglob, readerfunc, dtimecols, dmagcols, derrcols, magsarefluxes, normfunc) = formatinfo else: LOGERROR("can't figure out the light curve format") return None except Exception as e: LOGEXCEPTION("can't figure out the light curve format") return None # override the default timecols, magcols, and errcols # using the ones provided to the function # we'll get the defaults from the templateinfo object if timecols is None: timecols = templateinfo['timecols'] if magcols is None: magcols = templateinfo['magcols'] if errcols is None: errcols = templateinfo['errcols'] outdict = {} # run by magcol for t, m, e in zip(timecols, magcols, errcols): tasks = [(x, t, m, e, templateinfo, lcformat, lcformatdir, interp, sigclip) for x in lclist] pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks) results = pool.map(_parallel_tfa_worker, tasks) pool.close() pool.join() outdict[m] = results return outdict
def function[parallel_tfa_lclist, parameter[lclist, templateinfo, timecols, magcols, errcols, lcformat, lcformatdir, interp, sigclip, mintemplatedist_arcmin, nworkers, maxworkertasks]]: constant[This applies TFA in parallel to all LCs in the given list of file names. Parameters ---------- lclist : str This is a list of light curve files to apply TFA correction to. templateinfo : dict or str This is either the dict produced by `tfa_templates_lclist` or the pickle produced by the same function. timecols : list of str or None The timecol keys to use from the lcdict in applying TFA corrections. magcols : list of str or None The magcol keys to use from the lcdict in applying TFA corrections. errcols : list of str or None The errcol keys to use from the lcdict in applying TFA corrections. lcformat : str This is the `formatkey` associated with your light curve format, which you previously passed in to the `lcproc.register_lcformat` function. This will be used to look up how to find and read the light curves specified in `basedir` or `use_list_of_filenames`. lcformatdir : str or None If this is provided, gives the path to a directory when you've stored your lcformat description JSONs, other than the usual directories lcproc knows to search for them in. Use this along with `lcformat` to specify an LC format JSON file that's not currently registered with lcproc. interp : str This is passed to scipy.interpolate.interp1d as the kind of interpolation to use when reforming the light curves to the timebase of the TFA templates. sigclip : float or sequence of two floats or None This is the sigma clip to apply to the light curves before running TFA on it. mintemplatedist_arcmin : float This sets the minimum distance required from the target object for objects in the TFA template ensemble. Objects closer than this distance will be removed from the ensemble. nworkers : int The number of parallel workers to launch maxworkertasks : int The maximum number of tasks per worker allowed before it's replaced by a fresh one. Returns ------- dict Contains the input file names and output TFA light curve filenames per input file organized by each `magcol` in `magcols`. ] if <ast.BoolOp object at 0x7da1b00d9f30> begin[:] with call[name[open], parameter[name[templateinfo], constant[rb]]] begin[:] variable[templateinfo] assign[=] call[name[pickle].load, parameter[name[infd]]] <ast.Try object at 0x7da1b00db850> if compare[name[timecols] is constant[None]] begin[:] variable[timecols] assign[=] call[name[templateinfo]][constant[timecols]] if compare[name[magcols] is constant[None]] begin[:] variable[magcols] assign[=] call[name[templateinfo]][constant[magcols]] if compare[name[errcols] is constant[None]] begin[:] variable[errcols] assign[=] call[name[templateinfo]][constant[errcols]] variable[outdict] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da1b00db580>, <ast.Name object at 0x7da1b00d9270>, <ast.Name object at 0x7da1b00d9e70>]]] in starred[call[name[zip], parameter[name[timecols], name[magcols], name[errcols]]]] begin[:] variable[tasks] assign[=] <ast.ListComp object at 0x7da1b00d8c10> variable[pool] assign[=] call[name[mp].Pool, parameter[name[nworkers]]] variable[results] assign[=] call[name[pool].map, parameter[name[_parallel_tfa_worker], name[tasks]]] call[name[pool].close, parameter[]] call[name[pool].join, parameter[]] call[name[outdict]][name[m]] assign[=] name[results] return[name[outdict]]
keyword[def] identifier[parallel_tfa_lclist] ( identifier[lclist] , identifier[templateinfo] , identifier[timecols] = keyword[None] , identifier[magcols] = keyword[None] , identifier[errcols] = keyword[None] , identifier[lcformat] = literal[string] , identifier[lcformatdir] = keyword[None] , identifier[interp] = literal[string] , identifier[sigclip] = literal[int] , identifier[mintemplatedist_arcmin] = literal[int] , identifier[nworkers] = identifier[NCPUS] , identifier[maxworkertasks] = literal[int] ): literal[string] keyword[if] identifier[isinstance] ( identifier[templateinfo] , identifier[str] ) keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[templateinfo] ): keyword[with] identifier[open] ( identifier[templateinfo] , literal[string] ) keyword[as] identifier[infd] : identifier[templateinfo] = identifier[pickle] . identifier[load] ( identifier[infd] ) keyword[try] : identifier[formatinfo] = identifier[get_lcformat] ( identifier[lcformat] , identifier[use_lcformat_dir] = identifier[lcformatdir] ) keyword[if] identifier[formatinfo] : ( identifier[dfileglob] , identifier[readerfunc] , identifier[dtimecols] , identifier[dmagcols] , identifier[derrcols] , identifier[magsarefluxes] , identifier[normfunc] )= identifier[formatinfo] keyword[else] : identifier[LOGERROR] ( literal[string] ) keyword[return] keyword[None] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[LOGEXCEPTION] ( literal[string] ) keyword[return] keyword[None] keyword[if] identifier[timecols] keyword[is] keyword[None] : identifier[timecols] = identifier[templateinfo] [ literal[string] ] keyword[if] identifier[magcols] keyword[is] keyword[None] : identifier[magcols] = identifier[templateinfo] [ literal[string] ] keyword[if] identifier[errcols] keyword[is] keyword[None] : identifier[errcols] = identifier[templateinfo] [ literal[string] ] identifier[outdict] ={} keyword[for] identifier[t] , identifier[m] , identifier[e] keyword[in] identifier[zip] ( identifier[timecols] , identifier[magcols] , identifier[errcols] ): identifier[tasks] =[( identifier[x] , identifier[t] , identifier[m] , identifier[e] , identifier[templateinfo] , identifier[lcformat] , identifier[lcformatdir] , identifier[interp] , identifier[sigclip] ) keyword[for] identifier[x] keyword[in] identifier[lclist] ] identifier[pool] = identifier[mp] . identifier[Pool] ( identifier[nworkers] , identifier[maxtasksperchild] = identifier[maxworkertasks] ) identifier[results] = identifier[pool] . identifier[map] ( identifier[_parallel_tfa_worker] , identifier[tasks] ) identifier[pool] . identifier[close] () identifier[pool] . identifier[join] () identifier[outdict] [ identifier[m] ]= identifier[results] keyword[return] identifier[outdict]
def parallel_tfa_lclist(lclist, templateinfo, timecols=None, magcols=None, errcols=None, lcformat='hat-sql', lcformatdir=None, interp='nearest', sigclip=5.0, mintemplatedist_arcmin=10.0, nworkers=NCPUS, maxworkertasks=1000): """This applies TFA in parallel to all LCs in the given list of file names. Parameters ---------- lclist : str This is a list of light curve files to apply TFA correction to. templateinfo : dict or str This is either the dict produced by `tfa_templates_lclist` or the pickle produced by the same function. timecols : list of str or None The timecol keys to use from the lcdict in applying TFA corrections. magcols : list of str or None The magcol keys to use from the lcdict in applying TFA corrections. errcols : list of str or None The errcol keys to use from the lcdict in applying TFA corrections. lcformat : str This is the `formatkey` associated with your light curve format, which you previously passed in to the `lcproc.register_lcformat` function. This will be used to look up how to find and read the light curves specified in `basedir` or `use_list_of_filenames`. lcformatdir : str or None If this is provided, gives the path to a directory when you've stored your lcformat description JSONs, other than the usual directories lcproc knows to search for them in. Use this along with `lcformat` to specify an LC format JSON file that's not currently registered with lcproc. interp : str This is passed to scipy.interpolate.interp1d as the kind of interpolation to use when reforming the light curves to the timebase of the TFA templates. sigclip : float or sequence of two floats or None This is the sigma clip to apply to the light curves before running TFA on it. mintemplatedist_arcmin : float This sets the minimum distance required from the target object for objects in the TFA template ensemble. Objects closer than this distance will be removed from the ensemble. nworkers : int The number of parallel workers to launch maxworkertasks : int The maximum number of tasks per worker allowed before it's replaced by a fresh one. Returns ------- dict Contains the input file names and output TFA light curve filenames per input file organized by each `magcol` in `magcols`. """ # open the templateinfo first if isinstance(templateinfo, str) and os.path.exists(templateinfo): with open(templateinfo, 'rb') as infd: templateinfo = pickle.load(infd) # depends on [control=['with'], data=['infd']] # depends on [control=['if'], data=[]] try: formatinfo = get_lcformat(lcformat, use_lcformat_dir=lcformatdir) if formatinfo: (dfileglob, readerfunc, dtimecols, dmagcols, derrcols, magsarefluxes, normfunc) = formatinfo # depends on [control=['if'], data=[]] else: LOGERROR("can't figure out the light curve format") return None # depends on [control=['try'], data=[]] except Exception as e: LOGEXCEPTION("can't figure out the light curve format") return None # depends on [control=['except'], data=[]] # override the default timecols, magcols, and errcols # using the ones provided to the function # we'll get the defaults from the templateinfo object if timecols is None: timecols = templateinfo['timecols'] # depends on [control=['if'], data=['timecols']] if magcols is None: magcols = templateinfo['magcols'] # depends on [control=['if'], data=['magcols']] if errcols is None: errcols = templateinfo['errcols'] # depends on [control=['if'], data=['errcols']] outdict = {} # run by magcol for (t, m, e) in zip(timecols, magcols, errcols): tasks = [(x, t, m, e, templateinfo, lcformat, lcformatdir, interp, sigclip) for x in lclist] pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks) results = pool.map(_parallel_tfa_worker, tasks) pool.close() pool.join() outdict[m] = results # depends on [control=['for'], data=[]] return outdict
def flp_nonlinear_soco(I,J,d,M,f,c): """flp_nonlinear_soco -- use Parameters: - I: set of customers - J: set of facilities - d[i]: demand for product i - M[j]: capacity of facility j - f[j]: fixed cost for using a facility in point j - c[i,j]: unit cost of servicing demand point i from facility j Returns a model, ready to be solved. """ model = Model("nonlinear flp -- soco formulation") x,X,u = {},{},{} for j in J: X[j] = model.addVar(ub=M[j], vtype="C", name="X(%s)"%j) # for sum_i x_ij u[j] = model.addVar(vtype="C", name="u(%s)"%j) # for replacing sqrt sum_i x_ij in soco for i in I: x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j)) # i's demand satisfied from j # constraints for customer's demand satisfaction for i in I: model.addCons(quicksum(x[i,j] for j in J) == 1, "Demand(%s)"%i) for j in J: model.addCons(quicksum(d[i]*x[i,j] for i in I) == X[j], "Capacity(%s)"%j) model.addQConstr(quicksum(f[j]**2*d[i]*x[i,j]*x[i,j] for i in I) <= u[j]*u[j], "SOC(%s)"%j) model.setObjective(quicksum(u[j] for j in J) +\ quicksum(c[i,j]*d[i]*x[i,j] for j in J for i in I),\ "minimize") model.data = x,u return model
def function[flp_nonlinear_soco, parameter[I, J, d, M, f, c]]: constant[flp_nonlinear_soco -- use Parameters: - I: set of customers - J: set of facilities - d[i]: demand for product i - M[j]: capacity of facility j - f[j]: fixed cost for using a facility in point j - c[i,j]: unit cost of servicing demand point i from facility j Returns a model, ready to be solved. ] variable[model] assign[=] call[name[Model], parameter[constant[nonlinear flp -- soco formulation]]] <ast.Tuple object at 0x7da1b18b6170> assign[=] tuple[[<ast.Dict object at 0x7da1b18b4370>, <ast.Dict object at 0x7da1b18b42e0>, <ast.Dict object at 0x7da1b18b6410>]] for taget[name[j]] in starred[name[J]] begin[:] call[name[X]][name[j]] assign[=] call[name[model].addVar, parameter[]] call[name[u]][name[j]] assign[=] call[name[model].addVar, parameter[]] for taget[name[i]] in starred[name[I]] begin[:] call[name[x]][tuple[[<ast.Name object at 0x7da1b18b5d20>, <ast.Name object at 0x7da1b18b6050>]]] assign[=] call[name[model].addVar, parameter[]] for taget[name[i]] in starred[name[I]] begin[:] call[name[model].addCons, parameter[compare[call[name[quicksum], parameter[<ast.GeneratorExp object at 0x7da1b18b7280>]] equal[==] constant[1]], binary_operation[constant[Demand(%s)] <ast.Mod object at 0x7da2590d6920> name[i]]]] for taget[name[j]] in starred[name[J]] begin[:] call[name[model].addCons, parameter[compare[call[name[quicksum], parameter[<ast.GeneratorExp object at 0x7da1b18b7c40>]] equal[==] call[name[X]][name[j]]], binary_operation[constant[Capacity(%s)] <ast.Mod object at 0x7da2590d6920> name[j]]]] call[name[model].addQConstr, parameter[compare[call[name[quicksum], parameter[<ast.GeneratorExp object at 0x7da1b18bb6d0>]] less_or_equal[<=] binary_operation[call[name[u]][name[j]] * call[name[u]][name[j]]]], binary_operation[constant[SOC(%s)] <ast.Mod object at 0x7da2590d6920> name[j]]]] call[name[model].setObjective, parameter[binary_operation[call[name[quicksum], parameter[<ast.GeneratorExp object at 0x7da1b18b5bd0>]] + call[name[quicksum], parameter[<ast.GeneratorExp object at 0x7da1b18b49d0>]]], constant[minimize]]] name[model].data assign[=] tuple[[<ast.Name object at 0x7da1b18b63b0>, <ast.Name object at 0x7da1b18b6f80>]] return[name[model]]
keyword[def] identifier[flp_nonlinear_soco] ( identifier[I] , identifier[J] , identifier[d] , identifier[M] , identifier[f] , identifier[c] ): literal[string] identifier[model] = identifier[Model] ( literal[string] ) identifier[x] , identifier[X] , identifier[u] ={},{},{} keyword[for] identifier[j] keyword[in] identifier[J] : identifier[X] [ identifier[j] ]= identifier[model] . identifier[addVar] ( identifier[ub] = identifier[M] [ identifier[j] ], identifier[vtype] = literal[string] , identifier[name] = literal[string] % identifier[j] ) identifier[u] [ identifier[j] ]= identifier[model] . identifier[addVar] ( identifier[vtype] = literal[string] , identifier[name] = literal[string] % identifier[j] ) keyword[for] identifier[i] keyword[in] identifier[I] : identifier[x] [ identifier[i] , identifier[j] ]= identifier[model] . identifier[addVar] ( identifier[vtype] = literal[string] , identifier[name] = literal[string] %( identifier[i] , identifier[j] )) keyword[for] identifier[i] keyword[in] identifier[I] : identifier[model] . identifier[addCons] ( identifier[quicksum] ( identifier[x] [ identifier[i] , identifier[j] ] keyword[for] identifier[j] keyword[in] identifier[J] )== literal[int] , literal[string] % identifier[i] ) keyword[for] identifier[j] keyword[in] identifier[J] : identifier[model] . identifier[addCons] ( identifier[quicksum] ( identifier[d] [ identifier[i] ]* identifier[x] [ identifier[i] , identifier[j] ] keyword[for] identifier[i] keyword[in] identifier[I] )== identifier[X] [ identifier[j] ], literal[string] % identifier[j] ) identifier[model] . identifier[addQConstr] ( identifier[quicksum] ( identifier[f] [ identifier[j] ]** literal[int] * identifier[d] [ identifier[i] ]* identifier[x] [ identifier[i] , identifier[j] ]* identifier[x] [ identifier[i] , identifier[j] ] keyword[for] identifier[i] keyword[in] identifier[I] )<= identifier[u] [ identifier[j] ]* identifier[u] [ identifier[j] ], literal[string] % identifier[j] ) identifier[model] . identifier[setObjective] ( identifier[quicksum] ( identifier[u] [ identifier[j] ] keyword[for] identifier[j] keyword[in] identifier[J] )+ identifier[quicksum] ( identifier[c] [ identifier[i] , identifier[j] ]* identifier[d] [ identifier[i] ]* identifier[x] [ identifier[i] , identifier[j] ] keyword[for] identifier[j] keyword[in] identifier[J] keyword[for] identifier[i] keyword[in] identifier[I] ), literal[string] ) identifier[model] . identifier[data] = identifier[x] , identifier[u] keyword[return] identifier[model]
def flp_nonlinear_soco(I, J, d, M, f, c): """flp_nonlinear_soco -- use Parameters: - I: set of customers - J: set of facilities - d[i]: demand for product i - M[j]: capacity of facility j - f[j]: fixed cost for using a facility in point j - c[i,j]: unit cost of servicing demand point i from facility j Returns a model, ready to be solved. """ model = Model('nonlinear flp -- soco formulation') (x, X, u) = ({}, {}, {}) for j in J: X[j] = model.addVar(ub=M[j], vtype='C', name='X(%s)' % j) # for sum_i x_ij u[j] = model.addVar(vtype='C', name='u(%s)' % j) # for replacing sqrt sum_i x_ij in soco for i in I: x[i, j] = model.addVar(vtype='B', name='x(%s,%s)' % (i, j)) # i's demand satisfied from j # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['j']] # constraints for customer's demand satisfaction for i in I: model.addCons(quicksum((x[i, j] for j in J)) == 1, 'Demand(%s)' % i) # depends on [control=['for'], data=['i']] for j in J: model.addCons(quicksum((d[i] * x[i, j] for i in I)) == X[j], 'Capacity(%s)' % j) model.addQConstr(quicksum((f[j] ** 2 * d[i] * x[i, j] * x[i, j] for i in I)) <= u[j] * u[j], 'SOC(%s)' % j) # depends on [control=['for'], data=['j']] model.setObjective(quicksum((u[j] for j in J)) + quicksum((c[i, j] * d[i] * x[i, j] for j in J for i in I)), 'minimize') model.data = (x, u) return model
def create_or_update_release(user, pwd, secret, repo_slug, changelog_file, doc_url, data_file, tag): """ Creates or updates (TODO) a github release corresponding to git tag <TAG>. """ # 1- AUTHENTICATION if user is not None and secret is None: # using username and password # validate('user', user, instance_of=str) assert isinstance(user, str) # validate('pwd', pwd, instance_of=str) assert isinstance(pwd, str) g = Github(user, pwd) elif user is None and secret is not None: # or using an access token # validate('secret', secret, instance_of=str) assert isinstance(secret, str) g = Github(secret) else: raise ValueError("You should either provide username/password OR an access token") click.echo("Logged in as {user_name}".format(user_name=g.get_user())) # 2- CHANGELOG VALIDATION regex_pattern = "[\s\S]*[\n][#]+[\s]*(?P<title>[\S ]*%s[\S ]*)[\n]+?(?P<body>[\s\S]*?)[\n]*?(\n#|$)" % re.escape(tag) changelog_section = re.compile(regex_pattern) if changelog_file is not None: # validate('changelog_file', changelog_file, custom=os.path.exists, # help_msg="changelog file should be a valid file path") assert os.path.exists(changelog_file), "changelog file should be a valid file path" with open(changelog_file) as f: contents = f.read() match = changelog_section.match(contents).groupdict() if match is None or len(match) != 2: raise ValueError("Unable to find changelog section matching regexp pattern in changelog file.") else: title = match['title'] message = match['body'] else: title = tag message = '' # append footer if doc url is provided message += "\n\nSee [documentation page](%s) for details." % doc_url # 3- REPOSITORY EXPLORATION # validate('repo_slug', repo_slug, instance_of=str, min_len=1, help_msg="repo_slug should be a non-empty string") assert isinstance(repo_slug, str) and len(repo_slug) > 0, "repo_slug should be a non-empty string" repo = g.get_repo(repo_slug) # -- Is there a tag with that name ? try: tag_ref = repo.get_git_ref("tags/" + tag) except UnknownObjectException: raise ValueError("No tag with name %s exists in repository %s" % (tag, repo.name)) # -- Is there already a release with that tag name ? click.echo("Checking if release %s already exists in repository %s" % (tag, repo.name)) try: release = repo.get_release(tag) if release is not None: raise ValueError("Release %s already exists in repository %s. Please set overwrite to True if you wish to " "update the release (Not yet supported)" % (tag, repo.name)) except UnknownObjectException: # Release does not exist: we can safely create it. click.echo("Creating release %s on repo: %s" % (tag, repo.name)) click.echo("Release title: '%s'" % title) click.echo("Release message:\n--\n%s\n--\n" % message) repo.create_git_release(tag=tag, name=title, message=message, draft=False, prerelease=False) # add the asset file if needed if data_file is not None: release = None while release is None: release = repo.get_release(tag) release.upload_asset(path=data_file, label=path.split(data_file)[1], content_type="application/gzip")
def function[create_or_update_release, parameter[user, pwd, secret, repo_slug, changelog_file, doc_url, data_file, tag]]: constant[ Creates or updates (TODO) a github release corresponding to git tag <TAG>. ] if <ast.BoolOp object at 0x7da1b042bd00> begin[:] assert[call[name[isinstance], parameter[name[user], name[str]]]] assert[call[name[isinstance], parameter[name[pwd], name[str]]]] variable[g] assign[=] call[name[Github], parameter[name[user], name[pwd]]] call[name[click].echo, parameter[call[constant[Logged in as {user_name}].format, parameter[]]]] variable[regex_pattern] assign[=] binary_operation[constant[[\s\S]*[ ][#]+[\s]*(?P<title>[\S ]*%s[\S ]*)[ ]+?(?P<body>[\s\S]*?)[ ]*?( #|$)] <ast.Mod object at 0x7da2590d6920> call[name[re].escape, parameter[name[tag]]]] variable[changelog_section] assign[=] call[name[re].compile, parameter[name[regex_pattern]]] if compare[name[changelog_file] is_not constant[None]] begin[:] assert[call[name[os].path.exists, parameter[name[changelog_file]]]] with call[name[open], parameter[name[changelog_file]]] begin[:] variable[contents] assign[=] call[name[f].read, parameter[]] variable[match] assign[=] call[call[name[changelog_section].match, parameter[name[contents]]].groupdict, parameter[]] if <ast.BoolOp object at 0x7da1b042a020> begin[:] <ast.Raise object at 0x7da1b0429e70> <ast.AugAssign object at 0x7da1b0429ab0> assert[<ast.BoolOp object at 0x7da1b0429990>] variable[repo] assign[=] call[name[g].get_repo, parameter[name[repo_slug]]] <ast.Try object at 0x7da1b0429660> call[name[click].echo, parameter[binary_operation[constant[Checking if release %s already exists in repository %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0429150>, <ast.Attribute object at 0x7da1b0429120>]]]]] <ast.Try object at 0x7da1b0429090>
keyword[def] identifier[create_or_update_release] ( identifier[user] , identifier[pwd] , identifier[secret] , identifier[repo_slug] , identifier[changelog_file] , identifier[doc_url] , identifier[data_file] , identifier[tag] ): literal[string] keyword[if] identifier[user] keyword[is] keyword[not] keyword[None] keyword[and] identifier[secret] keyword[is] keyword[None] : keyword[assert] identifier[isinstance] ( identifier[user] , identifier[str] ) keyword[assert] identifier[isinstance] ( identifier[pwd] , identifier[str] ) identifier[g] = identifier[Github] ( identifier[user] , identifier[pwd] ) keyword[elif] identifier[user] keyword[is] keyword[None] keyword[and] identifier[secret] keyword[is] keyword[not] keyword[None] : keyword[assert] identifier[isinstance] ( identifier[secret] , identifier[str] ) identifier[g] = identifier[Github] ( identifier[secret] ) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[click] . identifier[echo] ( literal[string] . identifier[format] ( identifier[user_name] = identifier[g] . identifier[get_user] ())) identifier[regex_pattern] = literal[string] % identifier[re] . identifier[escape] ( identifier[tag] ) identifier[changelog_section] = identifier[re] . identifier[compile] ( identifier[regex_pattern] ) keyword[if] identifier[changelog_file] keyword[is] keyword[not] keyword[None] : keyword[assert] identifier[os] . identifier[path] . identifier[exists] ( identifier[changelog_file] ), literal[string] keyword[with] identifier[open] ( identifier[changelog_file] ) keyword[as] identifier[f] : identifier[contents] = identifier[f] . identifier[read] () identifier[match] = identifier[changelog_section] . identifier[match] ( identifier[contents] ). identifier[groupdict] () keyword[if] identifier[match] keyword[is] keyword[None] keyword[or] identifier[len] ( identifier[match] )!= literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[else] : identifier[title] = identifier[match] [ literal[string] ] identifier[message] = identifier[match] [ literal[string] ] keyword[else] : identifier[title] = identifier[tag] identifier[message] = literal[string] identifier[message] += literal[string] % identifier[doc_url] keyword[assert] identifier[isinstance] ( identifier[repo_slug] , identifier[str] ) keyword[and] identifier[len] ( identifier[repo_slug] )> literal[int] , literal[string] identifier[repo] = identifier[g] . identifier[get_repo] ( identifier[repo_slug] ) keyword[try] : identifier[tag_ref] = identifier[repo] . identifier[get_git_ref] ( literal[string] + identifier[tag] ) keyword[except] identifier[UnknownObjectException] : keyword[raise] identifier[ValueError] ( literal[string] %( identifier[tag] , identifier[repo] . identifier[name] )) identifier[click] . identifier[echo] ( literal[string] %( identifier[tag] , identifier[repo] . identifier[name] )) keyword[try] : identifier[release] = identifier[repo] . identifier[get_release] ( identifier[tag] ) keyword[if] identifier[release] keyword[is] keyword[not] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] %( identifier[tag] , identifier[repo] . identifier[name] )) keyword[except] identifier[UnknownObjectException] : identifier[click] . identifier[echo] ( literal[string] %( identifier[tag] , identifier[repo] . identifier[name] )) identifier[click] . identifier[echo] ( literal[string] % identifier[title] ) identifier[click] . identifier[echo] ( literal[string] % identifier[message] ) identifier[repo] . identifier[create_git_release] ( identifier[tag] = identifier[tag] , identifier[name] = identifier[title] , identifier[message] = identifier[message] , identifier[draft] = keyword[False] , identifier[prerelease] = keyword[False] ) keyword[if] identifier[data_file] keyword[is] keyword[not] keyword[None] : identifier[release] = keyword[None] keyword[while] identifier[release] keyword[is] keyword[None] : identifier[release] = identifier[repo] . identifier[get_release] ( identifier[tag] ) identifier[release] . identifier[upload_asset] ( identifier[path] = identifier[data_file] , identifier[label] = identifier[path] . identifier[split] ( identifier[data_file] )[ literal[int] ], identifier[content_type] = literal[string] )
def create_or_update_release(user, pwd, secret, repo_slug, changelog_file, doc_url, data_file, tag): """ Creates or updates (TODO) a github release corresponding to git tag <TAG>. """ # 1- AUTHENTICATION if user is not None and secret is None: # using username and password # validate('user', user, instance_of=str) assert isinstance(user, str) # validate('pwd', pwd, instance_of=str) assert isinstance(pwd, str) g = Github(user, pwd) # depends on [control=['if'], data=[]] elif user is None and secret is not None: # or using an access token # validate('secret', secret, instance_of=str) assert isinstance(secret, str) g = Github(secret) # depends on [control=['if'], data=[]] else: raise ValueError('You should either provide username/password OR an access token') click.echo('Logged in as {user_name}'.format(user_name=g.get_user())) # 2- CHANGELOG VALIDATION regex_pattern = '[\\s\\S]*[\n][#]+[\\s]*(?P<title>[\\S ]*%s[\\S ]*)[\n]+?(?P<body>[\\s\\S]*?)[\n]*?(\n#|$)' % re.escape(tag) changelog_section = re.compile(regex_pattern) if changelog_file is not None: # validate('changelog_file', changelog_file, custom=os.path.exists, # help_msg="changelog file should be a valid file path") assert os.path.exists(changelog_file), 'changelog file should be a valid file path' with open(changelog_file) as f: contents = f.read() # depends on [control=['with'], data=['f']] match = changelog_section.match(contents).groupdict() if match is None or len(match) != 2: raise ValueError('Unable to find changelog section matching regexp pattern in changelog file.') # depends on [control=['if'], data=[]] else: title = match['title'] message = match['body'] # depends on [control=['if'], data=['changelog_file']] else: title = tag message = '' # append footer if doc url is provided message += '\n\nSee [documentation page](%s) for details.' % doc_url # 3- REPOSITORY EXPLORATION # validate('repo_slug', repo_slug, instance_of=str, min_len=1, help_msg="repo_slug should be a non-empty string") assert isinstance(repo_slug, str) and len(repo_slug) > 0, 'repo_slug should be a non-empty string' repo = g.get_repo(repo_slug) # -- Is there a tag with that name ? try: tag_ref = repo.get_git_ref('tags/' + tag) # depends on [control=['try'], data=[]] except UnknownObjectException: raise ValueError('No tag with name %s exists in repository %s' % (tag, repo.name)) # depends on [control=['except'], data=[]] # -- Is there already a release with that tag name ? click.echo('Checking if release %s already exists in repository %s' % (tag, repo.name)) try: release = repo.get_release(tag) if release is not None: raise ValueError('Release %s already exists in repository %s. Please set overwrite to True if you wish to update the release (Not yet supported)' % (tag, repo.name)) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except UnknownObjectException: # Release does not exist: we can safely create it. click.echo('Creating release %s on repo: %s' % (tag, repo.name)) click.echo("Release title: '%s'" % title) click.echo('Release message:\n--\n%s\n--\n' % message) repo.create_git_release(tag=tag, name=title, message=message, draft=False, prerelease=False) # add the asset file if needed if data_file is not None: release = None while release is None: release = repo.get_release(tag) # depends on [control=['while'], data=['release']] release.upload_asset(path=data_file, label=path.split(data_file)[1], content_type='application/gzip') # depends on [control=['if'], data=['data_file']] # depends on [control=['except'], data=[]]
def _makeInfoFiles(basedir, quiet): """ Create info/* files inside basedir. @param basedir: worker base directory relative path @param quiet: if True, don't print info messages @raise CreateWorkerError: on error making info directory or writing info files """ def createFile(path, file, contents): filepath = os.path.join(path, file) if os.path.exists(filepath): return False if not quiet: print("Creating {0}, you need to edit it appropriately.".format( os.path.join("info", file))) try: open(filepath, "wt").write(contents) except IOError as exception: raise CreateWorkerError("could not write {0}: {1}".format( filepath, exception.strerror)) return True path = os.path.join(basedir, "info") if not os.path.exists(path): if not quiet: print("mkdir", path) try: os.mkdir(path) except OSError as exception: raise CreateWorkerError("error creating directory {0}: {1}".format( path, exception.strerror)) # create 'info/admin' file created = createFile(path, "admin", "Your Name Here <admin@youraddress.invalid>\n") # create 'info/host' file created = createFile(path, "host", "Please put a description of this build host here\n") access_uri = os.path.join(path, "access_uri") if not os.path.exists(access_uri): if not quiet: print("Not creating {0} - add it if you wish".format( os.path.join("info", "access_uri"))) if created and not quiet: print("Please edit the files in {0} appropriately.".format(path))
def function[_makeInfoFiles, parameter[basedir, quiet]]: constant[ Create info/* files inside basedir. @param basedir: worker base directory relative path @param quiet: if True, don't print info messages @raise CreateWorkerError: on error making info directory or writing info files ] def function[createFile, parameter[path, file, contents]]: variable[filepath] assign[=] call[name[os].path.join, parameter[name[path], name[file]]] if call[name[os].path.exists, parameter[name[filepath]]] begin[:] return[constant[False]] if <ast.UnaryOp object at 0x7da1b1c3d480> begin[:] call[name[print], parameter[call[constant[Creating {0}, you need to edit it appropriately.].format, parameter[call[name[os].path.join, parameter[constant[info], name[file]]]]]]] <ast.Try object at 0x7da1b1c3d8d0> return[constant[True]] variable[path] assign[=] call[name[os].path.join, parameter[name[basedir], constant[info]]] if <ast.UnaryOp object at 0x7da1b1c3ddb0> begin[:] if <ast.UnaryOp object at 0x7da1b1c3cc10> begin[:] call[name[print], parameter[constant[mkdir], name[path]]] <ast.Try object at 0x7da1b1c3ce50> variable[created] assign[=] call[name[createFile], parameter[name[path], constant[admin], constant[Your Name Here <admin@youraddress.invalid> ]]] variable[created] assign[=] call[name[createFile], parameter[name[path], constant[host], constant[Please put a description of this build host here ]]] variable[access_uri] assign[=] call[name[os].path.join, parameter[name[path], constant[access_uri]]] if <ast.UnaryOp object at 0x7da1b1c3e140> begin[:] if <ast.UnaryOp object at 0x7da1b1c3cc70> begin[:] call[name[print], parameter[call[constant[Not creating {0} - add it if you wish].format, parameter[call[name[os].path.join, parameter[constant[info], constant[access_uri]]]]]]] if <ast.BoolOp object at 0x7da18f813280> begin[:] call[name[print], parameter[call[constant[Please edit the files in {0} appropriately.].format, parameter[name[path]]]]]
keyword[def] identifier[_makeInfoFiles] ( identifier[basedir] , identifier[quiet] ): literal[string] keyword[def] identifier[createFile] ( identifier[path] , identifier[file] , identifier[contents] ): identifier[filepath] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[file] ) keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[filepath] ): keyword[return] keyword[False] keyword[if] keyword[not] identifier[quiet] : identifier[print] ( literal[string] . identifier[format] ( identifier[os] . identifier[path] . identifier[join] ( literal[string] , identifier[file] ))) keyword[try] : identifier[open] ( identifier[filepath] , literal[string] ). identifier[write] ( identifier[contents] ) keyword[except] identifier[IOError] keyword[as] identifier[exception] : keyword[raise] identifier[CreateWorkerError] ( literal[string] . identifier[format] ( identifier[filepath] , identifier[exception] . identifier[strerror] )) keyword[return] keyword[True] identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[basedir] , literal[string] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ): keyword[if] keyword[not] identifier[quiet] : identifier[print] ( literal[string] , identifier[path] ) keyword[try] : identifier[os] . identifier[mkdir] ( identifier[path] ) keyword[except] identifier[OSError] keyword[as] identifier[exception] : keyword[raise] identifier[CreateWorkerError] ( literal[string] . identifier[format] ( identifier[path] , identifier[exception] . identifier[strerror] )) identifier[created] = identifier[createFile] ( identifier[path] , literal[string] , literal[string] ) identifier[created] = identifier[createFile] ( identifier[path] , literal[string] , literal[string] ) identifier[access_uri] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , literal[string] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[access_uri] ): keyword[if] keyword[not] identifier[quiet] : identifier[print] ( literal[string] . identifier[format] ( identifier[os] . identifier[path] . identifier[join] ( literal[string] , literal[string] ))) keyword[if] identifier[created] keyword[and] keyword[not] identifier[quiet] : identifier[print] ( literal[string] . identifier[format] ( identifier[path] ))
def _makeInfoFiles(basedir, quiet): """ Create info/* files inside basedir. @param basedir: worker base directory relative path @param quiet: if True, don't print info messages @raise CreateWorkerError: on error making info directory or writing info files """ def createFile(path, file, contents): filepath = os.path.join(path, file) if os.path.exists(filepath): return False # depends on [control=['if'], data=[]] if not quiet: print('Creating {0}, you need to edit it appropriately.'.format(os.path.join('info', file))) # depends on [control=['if'], data=[]] try: open(filepath, 'wt').write(contents) # depends on [control=['try'], data=[]] except IOError as exception: raise CreateWorkerError('could not write {0}: {1}'.format(filepath, exception.strerror)) # depends on [control=['except'], data=['exception']] return True path = os.path.join(basedir, 'info') if not os.path.exists(path): if not quiet: print('mkdir', path) # depends on [control=['if'], data=[]] try: os.mkdir(path) # depends on [control=['try'], data=[]] except OSError as exception: raise CreateWorkerError('error creating directory {0}: {1}'.format(path, exception.strerror)) # depends on [control=['except'], data=['exception']] # depends on [control=['if'], data=[]] # create 'info/admin' file created = createFile(path, 'admin', 'Your Name Here <admin@youraddress.invalid>\n') # create 'info/host' file created = createFile(path, 'host', 'Please put a description of this build host here\n') access_uri = os.path.join(path, 'access_uri') if not os.path.exists(access_uri): if not quiet: print('Not creating {0} - add it if you wish'.format(os.path.join('info', 'access_uri'))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if created and (not quiet): print('Please edit the files in {0} appropriately.'.format(path)) # depends on [control=['if'], data=[]]
def update_domainalias(self, domainid, aliasid, data): """Update a domain alias""" return self.api_call( ENDPOINTS['domainaliases']['update'], dict(domainid=domainid, aliasid=aliasid), body=data)
def function[update_domainalias, parameter[self, domainid, aliasid, data]]: constant[Update a domain alias] return[call[name[self].api_call, parameter[call[call[name[ENDPOINTS]][constant[domainaliases]]][constant[update]], call[name[dict], parameter[]]]]]
keyword[def] identifier[update_domainalias] ( identifier[self] , identifier[domainid] , identifier[aliasid] , identifier[data] ): literal[string] keyword[return] identifier[self] . identifier[api_call] ( identifier[ENDPOINTS] [ literal[string] ][ literal[string] ], identifier[dict] ( identifier[domainid] = identifier[domainid] , identifier[aliasid] = identifier[aliasid] ), identifier[body] = identifier[data] )
def update_domainalias(self, domainid, aliasid, data): """Update a domain alias""" return self.api_call(ENDPOINTS['domainaliases']['update'], dict(domainid=domainid, aliasid=aliasid), body=data)
def get_major_version(version, remove=None): """Return major version of a provided version string. Major version is the first component of the dot-separated version string. For non-version-like strings this function returns the argument unchanged. The ``remove`` parameter is deprecated since version 1.18 and will be removed in the future. :param version: Version string :type version: str :rtype: str """ if remove: warnings.warn("remove argument is deprecated", DeprecationWarning) version_split = version.split(".") return version_split[0]
def function[get_major_version, parameter[version, remove]]: constant[Return major version of a provided version string. Major version is the first component of the dot-separated version string. For non-version-like strings this function returns the argument unchanged. The ``remove`` parameter is deprecated since version 1.18 and will be removed in the future. :param version: Version string :type version: str :rtype: str ] if name[remove] begin[:] call[name[warnings].warn, parameter[constant[remove argument is deprecated], name[DeprecationWarning]]] variable[version_split] assign[=] call[name[version].split, parameter[constant[.]]] return[call[name[version_split]][constant[0]]]
keyword[def] identifier[get_major_version] ( identifier[version] , identifier[remove] = keyword[None] ): literal[string] keyword[if] identifier[remove] : identifier[warnings] . identifier[warn] ( literal[string] , identifier[DeprecationWarning] ) identifier[version_split] = identifier[version] . identifier[split] ( literal[string] ) keyword[return] identifier[version_split] [ literal[int] ]
def get_major_version(version, remove=None): """Return major version of a provided version string. Major version is the first component of the dot-separated version string. For non-version-like strings this function returns the argument unchanged. The ``remove`` parameter is deprecated since version 1.18 and will be removed in the future. :param version: Version string :type version: str :rtype: str """ if remove: warnings.warn('remove argument is deprecated', DeprecationWarning) # depends on [control=['if'], data=[]] version_split = version.split('.') return version_split[0]
def get_fw_dev_map(self, fw_id): """Return the object dict and mgmt ip for a firewall. """ for cnt in self.res: if fw_id in self.res.get(cnt).get('fw_id_lst'): return self.res[cnt].get('obj_dict'), ( self.res[cnt].get('mgmt_ip')) return None, None
def function[get_fw_dev_map, parameter[self, fw_id]]: constant[Return the object dict and mgmt ip for a firewall. ] for taget[name[cnt]] in starred[name[self].res] begin[:] if compare[name[fw_id] in call[call[name[self].res.get, parameter[name[cnt]]].get, parameter[constant[fw_id_lst]]]] begin[:] return[tuple[[<ast.Call object at 0x7da18c4ce440>, <ast.Call object at 0x7da1b1b16ce0>]]] return[tuple[[<ast.Constant object at 0x7da1b1b16140>, <ast.Constant object at 0x7da1b1b16f20>]]]
keyword[def] identifier[get_fw_dev_map] ( identifier[self] , identifier[fw_id] ): literal[string] keyword[for] identifier[cnt] keyword[in] identifier[self] . identifier[res] : keyword[if] identifier[fw_id] keyword[in] identifier[self] . identifier[res] . identifier[get] ( identifier[cnt] ). identifier[get] ( literal[string] ): keyword[return] identifier[self] . identifier[res] [ identifier[cnt] ]. identifier[get] ( literal[string] ),( identifier[self] . identifier[res] [ identifier[cnt] ]. identifier[get] ( literal[string] )) keyword[return] keyword[None] , keyword[None]
def get_fw_dev_map(self, fw_id): """Return the object dict and mgmt ip for a firewall. """ for cnt in self.res: if fw_id in self.res.get(cnt).get('fw_id_lst'): return (self.res[cnt].get('obj_dict'), self.res[cnt].get('mgmt_ip')) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cnt']] return (None, None)
def plot_generated_images(images, fname): """Save a synthetic image as a PNG file. Args: images: samples of synthetic images generated by the generative network. fname: Python `str`, filename to save the plot to. """ fig = plt.figure(figsize=(4, 4)) canvas = backend_agg.FigureCanvasAgg(fig) for i, image in enumerate(images): ax = fig.add_subplot(4, 4, i + 1) plt.axis('off') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.imshow(image.reshape(IMAGE_SHAPE[:-1]), cmap='Greys_r') fig.tight_layout() plt.subplots_adjust(wspace=0.05, hspace=0.05) canvas.print_figure(fname, format='png')
def function[plot_generated_images, parameter[images, fname]]: constant[Save a synthetic image as a PNG file. Args: images: samples of synthetic images generated by the generative network. fname: Python `str`, filename to save the plot to. ] variable[fig] assign[=] call[name[plt].figure, parameter[]] variable[canvas] assign[=] call[name[backend_agg].FigureCanvasAgg, parameter[name[fig]]] for taget[tuple[[<ast.Name object at 0x7da1b05bd210>, <ast.Name object at 0x7da1b05bcac0>]]] in starred[call[name[enumerate], parameter[name[images]]]] begin[:] variable[ax] assign[=] call[name[fig].add_subplot, parameter[constant[4], constant[4], binary_operation[name[i] + constant[1]]]] call[name[plt].axis, parameter[constant[off]]] call[name[ax].set_xticklabels, parameter[list[[]]]] call[name[ax].set_yticklabels, parameter[list[[]]]] call[name[ax].imshow, parameter[call[name[image].reshape, parameter[call[name[IMAGE_SHAPE]][<ast.Slice object at 0x7da1b03a06d0>]]]]] call[name[fig].tight_layout, parameter[]] call[name[plt].subplots_adjust, parameter[]] call[name[canvas].print_figure, parameter[name[fname]]]
keyword[def] identifier[plot_generated_images] ( identifier[images] , identifier[fname] ): literal[string] identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] , literal[int] )) identifier[canvas] = identifier[backend_agg] . identifier[FigureCanvasAgg] ( identifier[fig] ) keyword[for] identifier[i] , identifier[image] keyword[in] identifier[enumerate] ( identifier[images] ): identifier[ax] = identifier[fig] . identifier[add_subplot] ( literal[int] , literal[int] , identifier[i] + literal[int] ) identifier[plt] . identifier[axis] ( literal[string] ) identifier[ax] . identifier[set_xticklabels] ([]) identifier[ax] . identifier[set_yticklabels] ([]) identifier[ax] . identifier[imshow] ( identifier[image] . identifier[reshape] ( identifier[IMAGE_SHAPE] [:- literal[int] ]), identifier[cmap] = literal[string] ) identifier[fig] . identifier[tight_layout] () identifier[plt] . identifier[subplots_adjust] ( identifier[wspace] = literal[int] , identifier[hspace] = literal[int] ) identifier[canvas] . identifier[print_figure] ( identifier[fname] , identifier[format] = literal[string] )
def plot_generated_images(images, fname): """Save a synthetic image as a PNG file. Args: images: samples of synthetic images generated by the generative network. fname: Python `str`, filename to save the plot to. """ fig = plt.figure(figsize=(4, 4)) canvas = backend_agg.FigureCanvasAgg(fig) for (i, image) in enumerate(images): ax = fig.add_subplot(4, 4, i + 1) plt.axis('off') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.imshow(image.reshape(IMAGE_SHAPE[:-1]), cmap='Greys_r') # depends on [control=['for'], data=[]] fig.tight_layout() plt.subplots_adjust(wspace=0.05, hspace=0.05) canvas.print_figure(fname, format='png')
def match_contains(self, el, contains): """Match element if it contains text.""" match = True content = None for contain_list in contains: if content is None: content = self.get_text(el, no_iframe=self.is_html) found = False for text in contain_list.text: if text in content: found = True break if not found: match = False return match
def function[match_contains, parameter[self, el, contains]]: constant[Match element if it contains text.] variable[match] assign[=] constant[True] variable[content] assign[=] constant[None] for taget[name[contain_list]] in starred[name[contains]] begin[:] if compare[name[content] is constant[None]] begin[:] variable[content] assign[=] call[name[self].get_text, parameter[name[el]]] variable[found] assign[=] constant[False] for taget[name[text]] in starred[name[contain_list].text] begin[:] if compare[name[text] in name[content]] begin[:] variable[found] assign[=] constant[True] break if <ast.UnaryOp object at 0x7da18fe91c60> begin[:] variable[match] assign[=] constant[False] return[name[match]]
keyword[def] identifier[match_contains] ( identifier[self] , identifier[el] , identifier[contains] ): literal[string] identifier[match] = keyword[True] identifier[content] = keyword[None] keyword[for] identifier[contain_list] keyword[in] identifier[contains] : keyword[if] identifier[content] keyword[is] keyword[None] : identifier[content] = identifier[self] . identifier[get_text] ( identifier[el] , identifier[no_iframe] = identifier[self] . identifier[is_html] ) identifier[found] = keyword[False] keyword[for] identifier[text] keyword[in] identifier[contain_list] . identifier[text] : keyword[if] identifier[text] keyword[in] identifier[content] : identifier[found] = keyword[True] keyword[break] keyword[if] keyword[not] identifier[found] : identifier[match] = keyword[False] keyword[return] identifier[match]
def match_contains(self, el, contains): """Match element if it contains text.""" match = True content = None for contain_list in contains: if content is None: content = self.get_text(el, no_iframe=self.is_html) # depends on [control=['if'], data=['content']] found = False for text in contain_list.text: if text in content: found = True break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['text']] if not found: match = False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['contain_list']] return match
def encode(cls, value): """ write binary data into redis without encoding it. :param value: bytes :return: bytes """ try: coerced = bytes(value) if coerced == value: return coerced except (TypeError, UnicodeError): pass raise InvalidValue('not binary')
def function[encode, parameter[cls, value]]: constant[ write binary data into redis without encoding it. :param value: bytes :return: bytes ] <ast.Try object at 0x7da1b0a6cc70> <ast.Raise object at 0x7da1b0a23460>
keyword[def] identifier[encode] ( identifier[cls] , identifier[value] ): literal[string] keyword[try] : identifier[coerced] = identifier[bytes] ( identifier[value] ) keyword[if] identifier[coerced] == identifier[value] : keyword[return] identifier[coerced] keyword[except] ( identifier[TypeError] , identifier[UnicodeError] ): keyword[pass] keyword[raise] identifier[InvalidValue] ( literal[string] )
def encode(cls, value): """ write binary data into redis without encoding it. :param value: bytes :return: bytes """ try: coerced = bytes(value) if coerced == value: return coerced # depends on [control=['if'], data=['coerced']] # depends on [control=['try'], data=[]] except (TypeError, UnicodeError): pass # depends on [control=['except'], data=[]] raise InvalidValue('not binary')
def complete_handshake(self): """ Tells `Packetizer` that the handshake has completed. """ if self.__timer: self.__timer.cancel() self.__timer_expired = False self.__handshake_complete = True
def function[complete_handshake, parameter[self]]: constant[ Tells `Packetizer` that the handshake has completed. ] if name[self].__timer begin[:] call[name[self].__timer.cancel, parameter[]] name[self].__timer_expired assign[=] constant[False] name[self].__handshake_complete assign[=] constant[True]
keyword[def] identifier[complete_handshake] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[__timer] : identifier[self] . identifier[__timer] . identifier[cancel] () identifier[self] . identifier[__timer_expired] = keyword[False] identifier[self] . identifier[__handshake_complete] = keyword[True]
def complete_handshake(self): """ Tells `Packetizer` that the handshake has completed. """ if self.__timer: self.__timer.cancel() self.__timer_expired = False self.__handshake_complete = True # depends on [control=['if'], data=[]]
def battery_charge_current(self): """ Returns current in mA """ msb = self.bus.read_byte_data(AXP209_ADDRESS, BATTERY_CHARGE_CURRENT_MSB_REG) lsb = self.bus.read_byte_data(AXP209_ADDRESS, BATTERY_CHARGE_CURRENT_LSB_REG) # (12 bits) charge_bin = msb << 4 | lsb & 0x0f # 0 mV -> 000h, 0.5 mA/bit FFFh -> 1800 mA return charge_bin * 0.5
def function[battery_charge_current, parameter[self]]: constant[ Returns current in mA ] variable[msb] assign[=] call[name[self].bus.read_byte_data, parameter[name[AXP209_ADDRESS], name[BATTERY_CHARGE_CURRENT_MSB_REG]]] variable[lsb] assign[=] call[name[self].bus.read_byte_data, parameter[name[AXP209_ADDRESS], name[BATTERY_CHARGE_CURRENT_LSB_REG]]] variable[charge_bin] assign[=] binary_operation[binary_operation[name[msb] <ast.LShift object at 0x7da2590d69e0> constant[4]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[name[lsb] <ast.BitAnd object at 0x7da2590d6b60> constant[15]]] return[binary_operation[name[charge_bin] * constant[0.5]]]
keyword[def] identifier[battery_charge_current] ( identifier[self] ): literal[string] identifier[msb] = identifier[self] . identifier[bus] . identifier[read_byte_data] ( identifier[AXP209_ADDRESS] , identifier[BATTERY_CHARGE_CURRENT_MSB_REG] ) identifier[lsb] = identifier[self] . identifier[bus] . identifier[read_byte_data] ( identifier[AXP209_ADDRESS] , identifier[BATTERY_CHARGE_CURRENT_LSB_REG] ) identifier[charge_bin] = identifier[msb] << literal[int] | identifier[lsb] & literal[int] keyword[return] identifier[charge_bin] * literal[int]
def battery_charge_current(self): """ Returns current in mA """ msb = self.bus.read_byte_data(AXP209_ADDRESS, BATTERY_CHARGE_CURRENT_MSB_REG) lsb = self.bus.read_byte_data(AXP209_ADDRESS, BATTERY_CHARGE_CURRENT_LSB_REG) # (12 bits) charge_bin = msb << 4 | lsb & 15 # 0 mV -> 000h, 0.5 mA/bit FFFh -> 1800 mA return charge_bin * 0.5
def _create_alignment_button(self): """Creates vertical alignment button""" iconnames = ["AlignTop", "AlignCenter", "AlignBottom"] bmplist = [icons[iconname] for iconname in iconnames] self.alignment_tb = _widgets.BitmapToggleButton(self, bmplist) self.alignment_tb.SetToolTipString(_(u"Alignment")) self.Bind(wx.EVT_BUTTON, self.OnAlignment, self.alignment_tb) self.AddControl(self.alignment_tb)
def function[_create_alignment_button, parameter[self]]: constant[Creates vertical alignment button] variable[iconnames] assign[=] list[[<ast.Constant object at 0x7da1b16a5630>, <ast.Constant object at 0x7da1b16a5d80>, <ast.Constant object at 0x7da1b16a5060>]] variable[bmplist] assign[=] <ast.ListComp object at 0x7da1b16a7460> name[self].alignment_tb assign[=] call[name[_widgets].BitmapToggleButton, parameter[name[self], name[bmplist]]] call[name[self].alignment_tb.SetToolTipString, parameter[call[name[_], parameter[constant[Alignment]]]]] call[name[self].Bind, parameter[name[wx].EVT_BUTTON, name[self].OnAlignment, name[self].alignment_tb]] call[name[self].AddControl, parameter[name[self].alignment_tb]]
keyword[def] identifier[_create_alignment_button] ( identifier[self] ): literal[string] identifier[iconnames] =[ literal[string] , literal[string] , literal[string] ] identifier[bmplist] =[ identifier[icons] [ identifier[iconname] ] keyword[for] identifier[iconname] keyword[in] identifier[iconnames] ] identifier[self] . identifier[alignment_tb] = identifier[_widgets] . identifier[BitmapToggleButton] ( identifier[self] , identifier[bmplist] ) identifier[self] . identifier[alignment_tb] . identifier[SetToolTipString] ( identifier[_] ( literal[string] )) identifier[self] . identifier[Bind] ( identifier[wx] . identifier[EVT_BUTTON] , identifier[self] . identifier[OnAlignment] , identifier[self] . identifier[alignment_tb] ) identifier[self] . identifier[AddControl] ( identifier[self] . identifier[alignment_tb] )
def _create_alignment_button(self): """Creates vertical alignment button""" iconnames = ['AlignTop', 'AlignCenter', 'AlignBottom'] bmplist = [icons[iconname] for iconname in iconnames] self.alignment_tb = _widgets.BitmapToggleButton(self, bmplist) self.alignment_tb.SetToolTipString(_(u'Alignment')) self.Bind(wx.EVT_BUTTON, self.OnAlignment, self.alignment_tb) self.AddControl(self.alignment_tb)
def plot(self): """ After model execution, print the dataset, curve-fit, BMD, and BMDL. Example ------- >>> import os >>> fn = os.path.expanduser('~/Desktop/image.png') >>> fig = model.plot() >>> fig.savefig(fn) >>> fig.clear() .. figure:: ../tests/resources/test_exponential_m4_plot.png :scale: 80% :align: center :alt: Example generated BMD plot BMD models can generate plots using the ``plot()`` method; an example is shown here. """ fig = self.dataset.plot() ax = fig.gca() ax.set_title( "{}\n{}, {}".format(self.dataset._get_dataset_name(), self.name, self.get_bmr_text()) ) if self.has_successfully_executed: self._set_x_range(ax) ax.plot(self._xs, self.get_ys(self._xs), label=self.name, **plotting.LINE_FORMAT) self._add_bmr_lines(ax) else: self._add_plot_failure(ax) ax.legend(**settings.LEGEND_OPTS) return fig
def function[plot, parameter[self]]: constant[ After model execution, print the dataset, curve-fit, BMD, and BMDL. Example ------- >>> import os >>> fn = os.path.expanduser('~/Desktop/image.png') >>> fig = model.plot() >>> fig.savefig(fn) >>> fig.clear() .. figure:: ../tests/resources/test_exponential_m4_plot.png :scale: 80% :align: center :alt: Example generated BMD plot BMD models can generate plots using the ``plot()`` method; an example is shown here. ] variable[fig] assign[=] call[name[self].dataset.plot, parameter[]] variable[ax] assign[=] call[name[fig].gca, parameter[]] call[name[ax].set_title, parameter[call[constant[{} {}, {}].format, parameter[call[name[self].dataset._get_dataset_name, parameter[]], name[self].name, call[name[self].get_bmr_text, parameter[]]]]]] if name[self].has_successfully_executed begin[:] call[name[self]._set_x_range, parameter[name[ax]]] call[name[ax].plot, parameter[name[self]._xs, call[name[self].get_ys, parameter[name[self]._xs]]]] call[name[self]._add_bmr_lines, parameter[name[ax]]] call[name[ax].legend, parameter[]] return[name[fig]]
keyword[def] identifier[plot] ( identifier[self] ): literal[string] identifier[fig] = identifier[self] . identifier[dataset] . identifier[plot] () identifier[ax] = identifier[fig] . identifier[gca] () identifier[ax] . identifier[set_title] ( literal[string] . identifier[format] ( identifier[self] . identifier[dataset] . identifier[_get_dataset_name] (), identifier[self] . identifier[name] , identifier[self] . identifier[get_bmr_text] ()) ) keyword[if] identifier[self] . identifier[has_successfully_executed] : identifier[self] . identifier[_set_x_range] ( identifier[ax] ) identifier[ax] . identifier[plot] ( identifier[self] . identifier[_xs] , identifier[self] . identifier[get_ys] ( identifier[self] . identifier[_xs] ), identifier[label] = identifier[self] . identifier[name] ,** identifier[plotting] . identifier[LINE_FORMAT] ) identifier[self] . identifier[_add_bmr_lines] ( identifier[ax] ) keyword[else] : identifier[self] . identifier[_add_plot_failure] ( identifier[ax] ) identifier[ax] . identifier[legend] (** identifier[settings] . identifier[LEGEND_OPTS] ) keyword[return] identifier[fig]
def plot(self): """ After model execution, print the dataset, curve-fit, BMD, and BMDL. Example ------- >>> import os >>> fn = os.path.expanduser('~/Desktop/image.png') >>> fig = model.plot() >>> fig.savefig(fn) >>> fig.clear() .. figure:: ../tests/resources/test_exponential_m4_plot.png :scale: 80% :align: center :alt: Example generated BMD plot BMD models can generate plots using the ``plot()`` method; an example is shown here. """ fig = self.dataset.plot() ax = fig.gca() ax.set_title('{}\n{}, {}'.format(self.dataset._get_dataset_name(), self.name, self.get_bmr_text())) if self.has_successfully_executed: self._set_x_range(ax) ax.plot(self._xs, self.get_ys(self._xs), label=self.name, **plotting.LINE_FORMAT) self._add_bmr_lines(ax) # depends on [control=['if'], data=[]] else: self._add_plot_failure(ax) ax.legend(**settings.LEGEND_OPTS) return fig
def parse_value(self, frmtcode, buff, start, end): ''' parse the value from the buffer given the interval for the appropraite bytes :param formattype: :param buff: :param start: :param end: ''' frmttype = self.util._formatmap[frmtcode] if(frmtcode == FormatType.FLOAT_32.value.code): # @UndefinedVariable return self.parse_float32(buff, start, end) elif(frmtcode == FormatType.FLOAT_64.value.code): # @UndefinedVariable return self.parse_float64(buff, start, end) if frmttype.value.idx <= FormatType.FIXSTR.value.idx: # @UndefinedVariable return self.parse_str(buff, start, end) elif frmttype.value.idx <= FormatType.INT_64.value.idx: # @UndefinedVariable return self.parse_int(buff, start, end) elif frmttype.value.idx <= FormatType.UINT_64.value.idx: # @UndefinedVariable return self.parse_uint(buff, start, end)
def function[parse_value, parameter[self, frmtcode, buff, start, end]]: constant[ parse the value from the buffer given the interval for the appropraite bytes :param formattype: :param buff: :param start: :param end: ] variable[frmttype] assign[=] call[name[self].util._formatmap][name[frmtcode]] if compare[name[frmtcode] equal[==] name[FormatType].FLOAT_32.value.code] begin[:] return[call[name[self].parse_float32, parameter[name[buff], name[start], name[end]]]] if compare[name[frmttype].value.idx less_or_equal[<=] name[FormatType].FIXSTR.value.idx] begin[:] return[call[name[self].parse_str, parameter[name[buff], name[start], name[end]]]]
keyword[def] identifier[parse_value] ( identifier[self] , identifier[frmtcode] , identifier[buff] , identifier[start] , identifier[end] ): literal[string] identifier[frmttype] = identifier[self] . identifier[util] . identifier[_formatmap] [ identifier[frmtcode] ] keyword[if] ( identifier[frmtcode] == identifier[FormatType] . identifier[FLOAT_32] . identifier[value] . identifier[code] ): keyword[return] identifier[self] . identifier[parse_float32] ( identifier[buff] , identifier[start] , identifier[end] ) keyword[elif] ( identifier[frmtcode] == identifier[FormatType] . identifier[FLOAT_64] . identifier[value] . identifier[code] ): keyword[return] identifier[self] . identifier[parse_float64] ( identifier[buff] , identifier[start] , identifier[end] ) keyword[if] identifier[frmttype] . identifier[value] . identifier[idx] <= identifier[FormatType] . identifier[FIXSTR] . identifier[value] . identifier[idx] : keyword[return] identifier[self] . identifier[parse_str] ( identifier[buff] , identifier[start] , identifier[end] ) keyword[elif] identifier[frmttype] . identifier[value] . identifier[idx] <= identifier[FormatType] . identifier[INT_64] . identifier[value] . identifier[idx] : keyword[return] identifier[self] . identifier[parse_int] ( identifier[buff] , identifier[start] , identifier[end] ) keyword[elif] identifier[frmttype] . identifier[value] . identifier[idx] <= identifier[FormatType] . identifier[UINT_64] . identifier[value] . identifier[idx] : keyword[return] identifier[self] . identifier[parse_uint] ( identifier[buff] , identifier[start] , identifier[end] )
def parse_value(self, frmtcode, buff, start, end): """ parse the value from the buffer given the interval for the appropraite bytes :param formattype: :param buff: :param start: :param end: """ frmttype = self.util._formatmap[frmtcode] if frmtcode == FormatType.FLOAT_32.value.code: # @UndefinedVariable return self.parse_float32(buff, start, end) # depends on [control=['if'], data=[]] elif frmtcode == FormatType.FLOAT_64.value.code: # @UndefinedVariable return self.parse_float64(buff, start, end) # depends on [control=['if'], data=[]] if frmttype.value.idx <= FormatType.FIXSTR.value.idx: # @UndefinedVariable return self.parse_str(buff, start, end) # depends on [control=['if'], data=[]] elif frmttype.value.idx <= FormatType.INT_64.value.idx: # @UndefinedVariable return self.parse_int(buff, start, end) # depends on [control=['if'], data=[]] elif frmttype.value.idx <= FormatType.UINT_64.value.idx: # @UndefinedVariable return self.parse_uint(buff, start, end) # depends on [control=['if'], data=[]]
def get_stp_mst_detail_output_msti_port_designated_bridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") msti = ET.SubElement(output, "msti") instance_id_key = ET.SubElement(msti, "instance-id") instance_id_key.text = kwargs.pop('instance_id') port = ET.SubElement(msti, "port") designated_bridge_id = ET.SubElement(port, "designated-bridge-id") designated_bridge_id.text = kwargs.pop('designated_bridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[get_stp_mst_detail_output_msti_port_designated_bridge_id, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[get_stp_mst_detail] assign[=] call[name[ET].Element, parameter[constant[get_stp_mst_detail]]] variable[config] assign[=] name[get_stp_mst_detail] variable[output] assign[=] call[name[ET].SubElement, parameter[name[get_stp_mst_detail], constant[output]]] variable[msti] assign[=] call[name[ET].SubElement, parameter[name[output], constant[msti]]] variable[instance_id_key] assign[=] call[name[ET].SubElement, parameter[name[msti], constant[instance-id]]] name[instance_id_key].text assign[=] call[name[kwargs].pop, parameter[constant[instance_id]]] variable[port] assign[=] call[name[ET].SubElement, parameter[name[msti], constant[port]]] variable[designated_bridge_id] assign[=] call[name[ET].SubElement, parameter[name[port], constant[designated-bridge-id]]] name[designated_bridge_id].text assign[=] call[name[kwargs].pop, parameter[constant[designated_bridge_id]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[get_stp_mst_detail_output_msti_port_designated_bridge_id] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[get_stp_mst_detail] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[config] = identifier[get_stp_mst_detail] identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[get_stp_mst_detail] , literal[string] ) identifier[msti] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] ) identifier[instance_id_key] = identifier[ET] . identifier[SubElement] ( identifier[msti] , literal[string] ) identifier[instance_id_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[port] = identifier[ET] . identifier[SubElement] ( identifier[msti] , literal[string] ) identifier[designated_bridge_id] = identifier[ET] . identifier[SubElement] ( identifier[port] , literal[string] ) identifier[designated_bridge_id] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def get_stp_mst_detail_output_msti_port_designated_bridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') get_stp_mst_detail = ET.Element('get_stp_mst_detail') config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, 'output') msti = ET.SubElement(output, 'msti') instance_id_key = ET.SubElement(msti, 'instance-id') instance_id_key.text = kwargs.pop('instance_id') port = ET.SubElement(msti, 'port') designated_bridge_id = ET.SubElement(port, 'designated-bridge-id') designated_bridge_id.text = kwargs.pop('designated_bridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
def derivative(self, point): """Derivative of this operator at ``point``. For the particular case of constant padding with non-zero constant, the derivative is the corresponding zero-padding variant. In all other cases, this operator is linear, i.e. the derivative is equal to ``self``. """ if self.pad_mode == 'constant' and self.pad_const != 0: return ResizingOperator( domain=self.domain, range=self.range, pad_mode='constant', pad_const=0.0) else: # operator is linear return self
def function[derivative, parameter[self, point]]: constant[Derivative of this operator at ``point``. For the particular case of constant padding with non-zero constant, the derivative is the corresponding zero-padding variant. In all other cases, this operator is linear, i.e. the derivative is equal to ``self``. ] if <ast.BoolOp object at 0x7da1b1e93220> begin[:] return[call[name[ResizingOperator], parameter[]]]
keyword[def] identifier[derivative] ( identifier[self] , identifier[point] ): literal[string] keyword[if] identifier[self] . identifier[pad_mode] == literal[string] keyword[and] identifier[self] . identifier[pad_const] != literal[int] : keyword[return] identifier[ResizingOperator] ( identifier[domain] = identifier[self] . identifier[domain] , identifier[range] = identifier[self] . identifier[range] , identifier[pad_mode] = literal[string] , identifier[pad_const] = literal[int] ) keyword[else] : keyword[return] identifier[self]
def derivative(self, point): """Derivative of this operator at ``point``. For the particular case of constant padding with non-zero constant, the derivative is the corresponding zero-padding variant. In all other cases, this operator is linear, i.e. the derivative is equal to ``self``. """ if self.pad_mode == 'constant' and self.pad_const != 0: return ResizingOperator(domain=self.domain, range=self.range, pad_mode='constant', pad_const=0.0) # depends on [control=['if'], data=[]] else: # operator is linear return self
def download_workers(self): """ Return the number of parallel works to use when downloading a file. :return: int number of workers. Specify None or 1 to disable parallel downloading """ # Profiling download on different servers showed half the number of CPUs to be optimum for speed. default_workers = int(math.ceil(default_num_workers() / 2)) return self.values.get(Config.DOWNLOAD_WORKERS, default_workers)
def function[download_workers, parameter[self]]: constant[ Return the number of parallel works to use when downloading a file. :return: int number of workers. Specify None or 1 to disable parallel downloading ] variable[default_workers] assign[=] call[name[int], parameter[call[name[math].ceil, parameter[binary_operation[call[name[default_num_workers], parameter[]] / constant[2]]]]]] return[call[name[self].values.get, parameter[name[Config].DOWNLOAD_WORKERS, name[default_workers]]]]
keyword[def] identifier[download_workers] ( identifier[self] ): literal[string] identifier[default_workers] = identifier[int] ( identifier[math] . identifier[ceil] ( identifier[default_num_workers] ()/ literal[int] )) keyword[return] identifier[self] . identifier[values] . identifier[get] ( identifier[Config] . identifier[DOWNLOAD_WORKERS] , identifier[default_workers] )
def download_workers(self): """ Return the number of parallel works to use when downloading a file. :return: int number of workers. Specify None or 1 to disable parallel downloading """ # Profiling download on different servers showed half the number of CPUs to be optimum for speed. default_workers = int(math.ceil(default_num_workers() / 2)) return self.values.get(Config.DOWNLOAD_WORKERS, default_workers)
def symlink_bundles(self, app, bundle_dir): """For each bundle in the given app, symlinks relevant matched paths. Validates that at least one path was matched by a bundle. """ for bundle_counter, bundle in enumerate(app.bundles): count = 0 for path, relpath in bundle.filemap.items(): bundle_path = os.path.join(bundle_dir, relpath) count += 1 if os.path.exists(bundle_path): continue if os.path.isfile(path): safe_mkdir(os.path.dirname(bundle_path)) os.symlink(path, bundle_path) elif os.path.isdir(path): safe_mkdir(bundle_path) if count == 0: raise TargetDefinitionException(app.target, 'Bundle index {} of "bundles" field ' 'does not match any files.'.format(bundle_counter))
def function[symlink_bundles, parameter[self, app, bundle_dir]]: constant[For each bundle in the given app, symlinks relevant matched paths. Validates that at least one path was matched by a bundle. ] for taget[tuple[[<ast.Name object at 0x7da1b1e5cb50>, <ast.Name object at 0x7da1b1e5ca30>]]] in starred[call[name[enumerate], parameter[name[app].bundles]]] begin[:] variable[count] assign[=] constant[0] for taget[tuple[[<ast.Name object at 0x7da1b1e5d3c0>, <ast.Name object at 0x7da1b1e5e200>]]] in starred[call[name[bundle].filemap.items, parameter[]]] begin[:] variable[bundle_path] assign[=] call[name[os].path.join, parameter[name[bundle_dir], name[relpath]]] <ast.AugAssign object at 0x7da1b1e5e7a0> if call[name[os].path.exists, parameter[name[bundle_path]]] begin[:] continue if call[name[os].path.isfile, parameter[name[path]]] begin[:] call[name[safe_mkdir], parameter[call[name[os].path.dirname, parameter[name[bundle_path]]]]] call[name[os].symlink, parameter[name[path], name[bundle_path]]] if compare[name[count] equal[==] constant[0]] begin[:] <ast.Raise object at 0x7da18bccba30>
keyword[def] identifier[symlink_bundles] ( identifier[self] , identifier[app] , identifier[bundle_dir] ): literal[string] keyword[for] identifier[bundle_counter] , identifier[bundle] keyword[in] identifier[enumerate] ( identifier[app] . identifier[bundles] ): identifier[count] = literal[int] keyword[for] identifier[path] , identifier[relpath] keyword[in] identifier[bundle] . identifier[filemap] . identifier[items] (): identifier[bundle_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[bundle_dir] , identifier[relpath] ) identifier[count] += literal[int] keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[bundle_path] ): keyword[continue] keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[path] ): identifier[safe_mkdir] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[bundle_path] )) identifier[os] . identifier[symlink] ( identifier[path] , identifier[bundle_path] ) keyword[elif] identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] ): identifier[safe_mkdir] ( identifier[bundle_path] ) keyword[if] identifier[count] == literal[int] : keyword[raise] identifier[TargetDefinitionException] ( identifier[app] . identifier[target] , literal[string] literal[string] . identifier[format] ( identifier[bundle_counter] ))
def symlink_bundles(self, app, bundle_dir): """For each bundle in the given app, symlinks relevant matched paths. Validates that at least one path was matched by a bundle. """ for (bundle_counter, bundle) in enumerate(app.bundles): count = 0 for (path, relpath) in bundle.filemap.items(): bundle_path = os.path.join(bundle_dir, relpath) count += 1 if os.path.exists(bundle_path): continue # depends on [control=['if'], data=[]] if os.path.isfile(path): safe_mkdir(os.path.dirname(bundle_path)) os.symlink(path, bundle_path) # depends on [control=['if'], data=[]] elif os.path.isdir(path): safe_mkdir(bundle_path) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] if count == 0: raise TargetDefinitionException(app.target, 'Bundle index {} of "bundles" field does not match any files.'.format(bundle_counter)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
def setup_toolbar(self): """Setup the toolbar""" savefig_btn = create_toolbutton( self, icon=ima.icon('filesave'), tip=_("Save Image As..."), triggered=self.save_figure) saveall_btn = create_toolbutton( self, icon=ima.icon('save_all'), tip=_("Save All Images..."), triggered=self.save_all_figures) copyfig_btn = create_toolbutton( self, icon=ima.icon('editcopy'), tip=_("Copy plot to clipboard as image (%s)" % get_shortcut('plots', 'copy')), triggered=self.copy_figure) closefig_btn = create_toolbutton( self, icon=ima.icon('editclear'), tip=_("Remove image"), triggered=self.close_figure) closeall_btn = create_toolbutton( self, icon=ima.icon('filecloseall'), tip=_("Remove all images from the explorer"), triggered=self.close_all_figures) vsep1 = QFrame() vsep1.setFrameStyle(53) goback_btn = create_toolbutton( self, icon=ima.icon('ArrowBack'), tip=_("Previous Figure ({})".format( get_shortcut('plots', 'previous figure'))), triggered=self.go_previous_thumbnail) gonext_btn = create_toolbutton( self, icon=ima.icon('ArrowForward'), tip=_("Next Figure ({})".format( get_shortcut('plots', 'next figure'))), triggered=self.go_next_thumbnail) vsep2 = QFrame() vsep2.setFrameStyle(53) zoom_out_btn = create_toolbutton( self, icon=ima.icon('zoom_out'), tip=_("Zoom out (Ctrl + mouse-wheel-down)"), triggered=self.zoom_out) zoom_in_btn = create_toolbutton( self, icon=ima.icon('zoom_in'), tip=_("Zoom in (Ctrl + mouse-wheel-up)"), triggered=self.zoom_in) self.zoom_disp = QSpinBox() self.zoom_disp.setAlignment(Qt.AlignCenter) self.zoom_disp.setButtonSymbols(QSpinBox.NoButtons) self.zoom_disp.setReadOnly(True) self.zoom_disp.setSuffix(' %') self.zoom_disp.setRange(0, 9999) self.zoom_disp.setValue(100) self.figviewer.sig_zoom_changed.connect(self.zoom_disp.setValue) zoom_pan = QWidget() layout = QHBoxLayout(zoom_pan) layout.setSpacing(0) layout.setContentsMargins(0, 0, 0, 0) layout.addWidget(zoom_out_btn) layout.addWidget(zoom_in_btn) layout.addWidget(self.zoom_disp) return [savefig_btn, saveall_btn, copyfig_btn, closefig_btn, closeall_btn, vsep1, goback_btn, gonext_btn, vsep2, zoom_pan]
def function[setup_toolbar, parameter[self]]: constant[Setup the toolbar] variable[savefig_btn] assign[=] call[name[create_toolbutton], parameter[name[self]]] variable[saveall_btn] assign[=] call[name[create_toolbutton], parameter[name[self]]] variable[copyfig_btn] assign[=] call[name[create_toolbutton], parameter[name[self]]] variable[closefig_btn] assign[=] call[name[create_toolbutton], parameter[name[self]]] variable[closeall_btn] assign[=] call[name[create_toolbutton], parameter[name[self]]] variable[vsep1] assign[=] call[name[QFrame], parameter[]] call[name[vsep1].setFrameStyle, parameter[constant[53]]] variable[goback_btn] assign[=] call[name[create_toolbutton], parameter[name[self]]] variable[gonext_btn] assign[=] call[name[create_toolbutton], parameter[name[self]]] variable[vsep2] assign[=] call[name[QFrame], parameter[]] call[name[vsep2].setFrameStyle, parameter[constant[53]]] variable[zoom_out_btn] assign[=] call[name[create_toolbutton], parameter[name[self]]] variable[zoom_in_btn] assign[=] call[name[create_toolbutton], parameter[name[self]]] name[self].zoom_disp assign[=] call[name[QSpinBox], parameter[]] call[name[self].zoom_disp.setAlignment, parameter[name[Qt].AlignCenter]] call[name[self].zoom_disp.setButtonSymbols, parameter[name[QSpinBox].NoButtons]] call[name[self].zoom_disp.setReadOnly, parameter[constant[True]]] call[name[self].zoom_disp.setSuffix, parameter[constant[ %]]] call[name[self].zoom_disp.setRange, parameter[constant[0], constant[9999]]] call[name[self].zoom_disp.setValue, parameter[constant[100]]] call[name[self].figviewer.sig_zoom_changed.connect, parameter[name[self].zoom_disp.setValue]] variable[zoom_pan] assign[=] call[name[QWidget], parameter[]] variable[layout] assign[=] call[name[QHBoxLayout], parameter[name[zoom_pan]]] call[name[layout].setSpacing, parameter[constant[0]]] call[name[layout].setContentsMargins, parameter[constant[0], constant[0], constant[0], constant[0]]] call[name[layout].addWidget, parameter[name[zoom_out_btn]]] call[name[layout].addWidget, parameter[name[zoom_in_btn]]] call[name[layout].addWidget, parameter[name[self].zoom_disp]] return[list[[<ast.Name object at 0x7da18c4cfa60>, <ast.Name object at 0x7da18c4cdf30>, <ast.Name object at 0x7da18c4cd000>, <ast.Name object at 0x7da18c4ce290>, <ast.Name object at 0x7da18c4cc400>, <ast.Name object at 0x7da18c4cef80>, <ast.Name object at 0x7da18c4cfdc0>, <ast.Name object at 0x7da18c4cd780>, <ast.Name object at 0x7da18c4ccb50>, <ast.Name object at 0x7da18c4cffa0>]]]
keyword[def] identifier[setup_toolbar] ( identifier[self] ): literal[string] identifier[savefig_btn] = identifier[create_toolbutton] ( identifier[self] , identifier[icon] = identifier[ima] . identifier[icon] ( literal[string] ), identifier[tip] = identifier[_] ( literal[string] ), identifier[triggered] = identifier[self] . identifier[save_figure] ) identifier[saveall_btn] = identifier[create_toolbutton] ( identifier[self] , identifier[icon] = identifier[ima] . identifier[icon] ( literal[string] ), identifier[tip] = identifier[_] ( literal[string] ), identifier[triggered] = identifier[self] . identifier[save_all_figures] ) identifier[copyfig_btn] = identifier[create_toolbutton] ( identifier[self] , identifier[icon] = identifier[ima] . identifier[icon] ( literal[string] ), identifier[tip] = identifier[_] ( literal[string] % identifier[get_shortcut] ( literal[string] , literal[string] )), identifier[triggered] = identifier[self] . identifier[copy_figure] ) identifier[closefig_btn] = identifier[create_toolbutton] ( identifier[self] , identifier[icon] = identifier[ima] . identifier[icon] ( literal[string] ), identifier[tip] = identifier[_] ( literal[string] ), identifier[triggered] = identifier[self] . identifier[close_figure] ) identifier[closeall_btn] = identifier[create_toolbutton] ( identifier[self] , identifier[icon] = identifier[ima] . identifier[icon] ( literal[string] ), identifier[tip] = identifier[_] ( literal[string] ), identifier[triggered] = identifier[self] . identifier[close_all_figures] ) identifier[vsep1] = identifier[QFrame] () identifier[vsep1] . identifier[setFrameStyle] ( literal[int] ) identifier[goback_btn] = identifier[create_toolbutton] ( identifier[self] , identifier[icon] = identifier[ima] . identifier[icon] ( literal[string] ), identifier[tip] = identifier[_] ( literal[string] . identifier[format] ( identifier[get_shortcut] ( literal[string] , literal[string] ))), identifier[triggered] = identifier[self] . identifier[go_previous_thumbnail] ) identifier[gonext_btn] = identifier[create_toolbutton] ( identifier[self] , identifier[icon] = identifier[ima] . identifier[icon] ( literal[string] ), identifier[tip] = identifier[_] ( literal[string] . identifier[format] ( identifier[get_shortcut] ( literal[string] , literal[string] ))), identifier[triggered] = identifier[self] . identifier[go_next_thumbnail] ) identifier[vsep2] = identifier[QFrame] () identifier[vsep2] . identifier[setFrameStyle] ( literal[int] ) identifier[zoom_out_btn] = identifier[create_toolbutton] ( identifier[self] , identifier[icon] = identifier[ima] . identifier[icon] ( literal[string] ), identifier[tip] = identifier[_] ( literal[string] ), identifier[triggered] = identifier[self] . identifier[zoom_out] ) identifier[zoom_in_btn] = identifier[create_toolbutton] ( identifier[self] , identifier[icon] = identifier[ima] . identifier[icon] ( literal[string] ), identifier[tip] = identifier[_] ( literal[string] ), identifier[triggered] = identifier[self] . identifier[zoom_in] ) identifier[self] . identifier[zoom_disp] = identifier[QSpinBox] () identifier[self] . identifier[zoom_disp] . identifier[setAlignment] ( identifier[Qt] . identifier[AlignCenter] ) identifier[self] . identifier[zoom_disp] . identifier[setButtonSymbols] ( identifier[QSpinBox] . identifier[NoButtons] ) identifier[self] . identifier[zoom_disp] . identifier[setReadOnly] ( keyword[True] ) identifier[self] . identifier[zoom_disp] . identifier[setSuffix] ( literal[string] ) identifier[self] . identifier[zoom_disp] . identifier[setRange] ( literal[int] , literal[int] ) identifier[self] . identifier[zoom_disp] . identifier[setValue] ( literal[int] ) identifier[self] . identifier[figviewer] . identifier[sig_zoom_changed] . identifier[connect] ( identifier[self] . identifier[zoom_disp] . identifier[setValue] ) identifier[zoom_pan] = identifier[QWidget] () identifier[layout] = identifier[QHBoxLayout] ( identifier[zoom_pan] ) identifier[layout] . identifier[setSpacing] ( literal[int] ) identifier[layout] . identifier[setContentsMargins] ( literal[int] , literal[int] , literal[int] , literal[int] ) identifier[layout] . identifier[addWidget] ( identifier[zoom_out_btn] ) identifier[layout] . identifier[addWidget] ( identifier[zoom_in_btn] ) identifier[layout] . identifier[addWidget] ( identifier[self] . identifier[zoom_disp] ) keyword[return] [ identifier[savefig_btn] , identifier[saveall_btn] , identifier[copyfig_btn] , identifier[closefig_btn] , identifier[closeall_btn] , identifier[vsep1] , identifier[goback_btn] , identifier[gonext_btn] , identifier[vsep2] , identifier[zoom_pan] ]
def setup_toolbar(self): """Setup the toolbar""" savefig_btn = create_toolbutton(self, icon=ima.icon('filesave'), tip=_('Save Image As...'), triggered=self.save_figure) saveall_btn = create_toolbutton(self, icon=ima.icon('save_all'), tip=_('Save All Images...'), triggered=self.save_all_figures) copyfig_btn = create_toolbutton(self, icon=ima.icon('editcopy'), tip=_('Copy plot to clipboard as image (%s)' % get_shortcut('plots', 'copy')), triggered=self.copy_figure) closefig_btn = create_toolbutton(self, icon=ima.icon('editclear'), tip=_('Remove image'), triggered=self.close_figure) closeall_btn = create_toolbutton(self, icon=ima.icon('filecloseall'), tip=_('Remove all images from the explorer'), triggered=self.close_all_figures) vsep1 = QFrame() vsep1.setFrameStyle(53) goback_btn = create_toolbutton(self, icon=ima.icon('ArrowBack'), tip=_('Previous Figure ({})'.format(get_shortcut('plots', 'previous figure'))), triggered=self.go_previous_thumbnail) gonext_btn = create_toolbutton(self, icon=ima.icon('ArrowForward'), tip=_('Next Figure ({})'.format(get_shortcut('plots', 'next figure'))), triggered=self.go_next_thumbnail) vsep2 = QFrame() vsep2.setFrameStyle(53) zoom_out_btn = create_toolbutton(self, icon=ima.icon('zoom_out'), tip=_('Zoom out (Ctrl + mouse-wheel-down)'), triggered=self.zoom_out) zoom_in_btn = create_toolbutton(self, icon=ima.icon('zoom_in'), tip=_('Zoom in (Ctrl + mouse-wheel-up)'), triggered=self.zoom_in) self.zoom_disp = QSpinBox() self.zoom_disp.setAlignment(Qt.AlignCenter) self.zoom_disp.setButtonSymbols(QSpinBox.NoButtons) self.zoom_disp.setReadOnly(True) self.zoom_disp.setSuffix(' %') self.zoom_disp.setRange(0, 9999) self.zoom_disp.setValue(100) self.figviewer.sig_zoom_changed.connect(self.zoom_disp.setValue) zoom_pan = QWidget() layout = QHBoxLayout(zoom_pan) layout.setSpacing(0) layout.setContentsMargins(0, 0, 0, 0) layout.addWidget(zoom_out_btn) layout.addWidget(zoom_in_btn) layout.addWidget(self.zoom_disp) return [savefig_btn, saveall_btn, copyfig_btn, closefig_btn, closeall_btn, vsep1, goback_btn, gonext_btn, vsep2, zoom_pan]
def _initializes_minimum_needs_fields(): """Initialize minimum needs fields. Minimum needs definitions are taken from currently used profile. """ needs_profile = NeedsProfile() needs_profile.load() fields = [] needs_parameters = needs_profile.get_needs_parameters() for need_parameter in needs_parameters: if isinstance(need_parameter, ResourceParameter): format_args = { 'namespace': minimum_needs_namespace, 'key': _normalize_field_name(need_parameter.name), 'name': need_parameter.name, 'field_name': _normalize_field_name(need_parameter.name), } key = '{namespace}__{key}_count_field'.format(**format_args) name = '{name}'.format(**format_args) field_name = '{namespace}__{field_name}'.format(**format_args) field_type = QVariant.LongLong # See issue #4039 length = 11 # See issue #4039 precision = 0 absolute = True replace_null = False description = need_parameter.description field_definition = { 'key': key, 'name': name, 'field_name': field_name, 'type': field_type, 'length': length, 'precision': precision, 'absolute': absolute, 'description': description, 'replace_null': replace_null, 'unit_abbreviation': need_parameter.unit.abbreviation, # Link to need_parameter 'need_parameter': need_parameter } fields.append(field_definition) return fields
def function[_initializes_minimum_needs_fields, parameter[]]: constant[Initialize minimum needs fields. Minimum needs definitions are taken from currently used profile. ] variable[needs_profile] assign[=] call[name[NeedsProfile], parameter[]] call[name[needs_profile].load, parameter[]] variable[fields] assign[=] list[[]] variable[needs_parameters] assign[=] call[name[needs_profile].get_needs_parameters, parameter[]] for taget[name[need_parameter]] in starred[name[needs_parameters]] begin[:] if call[name[isinstance], parameter[name[need_parameter], name[ResourceParameter]]] begin[:] variable[format_args] assign[=] dictionary[[<ast.Constant object at 0x7da2047ea890>, <ast.Constant object at 0x7da2047eaa40>, <ast.Constant object at 0x7da2047e8910>, <ast.Constant object at 0x7da2047e96c0>], [<ast.Name object at 0x7da2047e9210>, <ast.Call object at 0x7da18f00d060>, <ast.Attribute object at 0x7da18f00e350>, <ast.Call object at 0x7da18f00d2a0>]] variable[key] assign[=] call[constant[{namespace}__{key}_count_field].format, parameter[]] variable[name] assign[=] call[constant[{name}].format, parameter[]] variable[field_name] assign[=] call[constant[{namespace}__{field_name}].format, parameter[]] variable[field_type] assign[=] name[QVariant].LongLong variable[length] assign[=] constant[11] variable[precision] assign[=] constant[0] variable[absolute] assign[=] constant[True] variable[replace_null] assign[=] constant[False] variable[description] assign[=] name[need_parameter].description variable[field_definition] assign[=] dictionary[[<ast.Constant object at 0x7da204623a60>, <ast.Constant object at 0x7da204621390>, <ast.Constant object at 0x7da204620dc0>, <ast.Constant object at 0x7da204622b00>, <ast.Constant object at 0x7da204623820>, <ast.Constant object at 0x7da204620760>, <ast.Constant object at 0x7da204622410>, <ast.Constant object at 0x7da204620a90>, <ast.Constant object at 0x7da1b23443a0>, <ast.Constant object at 0x7da1b23471f0>, <ast.Constant object at 0x7da1b23460b0>], [<ast.Name object at 0x7da1b2345ed0>, <ast.Name object at 0x7da1b2345000>, <ast.Name object at 0x7da1b2344dc0>, <ast.Name object at 0x7da1b2345c90>, <ast.Name object at 0x7da1b23449d0>, <ast.Name object at 0x7da1b2344c70>, <ast.Name object at 0x7da1b2347dc0>, <ast.Name object at 0x7da1b2347d60>, <ast.Name object at 0x7da1b2344ac0>, <ast.Attribute object at 0x7da1b2347ac0>, <ast.Name object at 0x7da1b2347280>]] call[name[fields].append, parameter[name[field_definition]]] return[name[fields]]
keyword[def] identifier[_initializes_minimum_needs_fields] (): literal[string] identifier[needs_profile] = identifier[NeedsProfile] () identifier[needs_profile] . identifier[load] () identifier[fields] =[] identifier[needs_parameters] = identifier[needs_profile] . identifier[get_needs_parameters] () keyword[for] identifier[need_parameter] keyword[in] identifier[needs_parameters] : keyword[if] identifier[isinstance] ( identifier[need_parameter] , identifier[ResourceParameter] ): identifier[format_args] ={ literal[string] : identifier[minimum_needs_namespace] , literal[string] : identifier[_normalize_field_name] ( identifier[need_parameter] . identifier[name] ), literal[string] : identifier[need_parameter] . identifier[name] , literal[string] : identifier[_normalize_field_name] ( identifier[need_parameter] . identifier[name] ), } identifier[key] = literal[string] . identifier[format] (** identifier[format_args] ) identifier[name] = literal[string] . identifier[format] (** identifier[format_args] ) identifier[field_name] = literal[string] . identifier[format] (** identifier[format_args] ) identifier[field_type] = identifier[QVariant] . identifier[LongLong] identifier[length] = literal[int] identifier[precision] = literal[int] identifier[absolute] = keyword[True] identifier[replace_null] = keyword[False] identifier[description] = identifier[need_parameter] . identifier[description] identifier[field_definition] ={ literal[string] : identifier[key] , literal[string] : identifier[name] , literal[string] : identifier[field_name] , literal[string] : identifier[field_type] , literal[string] : identifier[length] , literal[string] : identifier[precision] , literal[string] : identifier[absolute] , literal[string] : identifier[description] , literal[string] : identifier[replace_null] , literal[string] : identifier[need_parameter] . identifier[unit] . identifier[abbreviation] , literal[string] : identifier[need_parameter] } identifier[fields] . identifier[append] ( identifier[field_definition] ) keyword[return] identifier[fields]
def _initializes_minimum_needs_fields(): """Initialize minimum needs fields. Minimum needs definitions are taken from currently used profile. """ needs_profile = NeedsProfile() needs_profile.load() fields = [] needs_parameters = needs_profile.get_needs_parameters() for need_parameter in needs_parameters: if isinstance(need_parameter, ResourceParameter): format_args = {'namespace': minimum_needs_namespace, 'key': _normalize_field_name(need_parameter.name), 'name': need_parameter.name, 'field_name': _normalize_field_name(need_parameter.name)} key = '{namespace}__{key}_count_field'.format(**format_args) name = '{name}'.format(**format_args) field_name = '{namespace}__{field_name}'.format(**format_args) field_type = QVariant.LongLong # See issue #4039 length = 11 # See issue #4039 precision = 0 absolute = True replace_null = False description = need_parameter.description # Link to need_parameter field_definition = {'key': key, 'name': name, 'field_name': field_name, 'type': field_type, 'length': length, 'precision': precision, 'absolute': absolute, 'description': description, 'replace_null': replace_null, 'unit_abbreviation': need_parameter.unit.abbreviation, 'need_parameter': need_parameter} fields.append(field_definition) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['need_parameter']] return fields
def decode(self, bytes): """Decodes the given bytes according to this AIT Argument Definition. """ value = self.type.decode(bytes) if self._enum is not None: for name, val in self._enum.items(): if value == val: value = name break return value
def function[decode, parameter[self, bytes]]: constant[Decodes the given bytes according to this AIT Argument Definition. ] variable[value] assign[=] call[name[self].type.decode, parameter[name[bytes]]] if compare[name[self]._enum is_not constant[None]] begin[:] for taget[tuple[[<ast.Name object at 0x7da207f9a110>, <ast.Name object at 0x7da207f99a80>]]] in starred[call[name[self]._enum.items, parameter[]]] begin[:] if compare[name[value] equal[==] name[val]] begin[:] variable[value] assign[=] name[name] break return[name[value]]
keyword[def] identifier[decode] ( identifier[self] , identifier[bytes] ): literal[string] identifier[value] = identifier[self] . identifier[type] . identifier[decode] ( identifier[bytes] ) keyword[if] identifier[self] . identifier[_enum] keyword[is] keyword[not] keyword[None] : keyword[for] identifier[name] , identifier[val] keyword[in] identifier[self] . identifier[_enum] . identifier[items] (): keyword[if] identifier[value] == identifier[val] : identifier[value] = identifier[name] keyword[break] keyword[return] identifier[value]
def decode(self, bytes): """Decodes the given bytes according to this AIT Argument Definition. """ value = self.type.decode(bytes) if self._enum is not None: for (name, val) in self._enum.items(): if value == val: value = name break # depends on [control=['if'], data=['value']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] return value
def groupby(self, key_column_names, operations, *args): """ Perform a group on the key_column_names followed by aggregations on the columns listed in operations. The operations parameter is a dictionary that indicates which aggregation operators to use and which columns to use them on. The available operators are SUM, MAX, MIN, COUNT, AVG, VAR, STDV, CONCAT, SELECT_ONE, ARGMIN, ARGMAX, and QUANTILE. For convenience, aggregators MEAN, STD, and VARIANCE are available as synonyms for AVG, STDV, and VAR. See :mod:`~turicreate.aggregate` for more detail on the aggregators. Parameters ---------- key_column_names : string | list[string] Column(s) to group by. Key columns can be of any type other than dictionary. operations : dict, list Dictionary of columns and aggregation operations. Each key is a output column name and each value is an aggregator. This can also be a list of aggregators, in which case column names will be automatically assigned. *args All other remaining arguments will be interpreted in the same way as the operations argument. Returns ------- out_sf : SFrame A new SFrame, with a column for each groupby column and each aggregation operation. See Also -------- aggregate Notes ----- * Numeric aggregators (such as sum, mean, stdev etc.) follow the skip None policy i.e they will omit all missing values from the aggregation. As an example, `sum([None, 5, 10]) = 15` because the `None` value is skipped. * Aggregators have a default value when no values (after skipping all `None` values) are present. Default values are `None` for ['ARGMAX', 'ARGMIN', 'AVG', 'STD', 'MEAN', 'MIN', 'MAX'], `0` for ['COUNT' 'COUNT_DISTINCT', 'DISTINCT'] `[]` for 'CONCAT', 'QUANTILE', 'DISTINCT', and `{}` for 'FREQ_COUNT'. Examples -------- Suppose we have an SFrame with movie ratings by many users. >>> import turicreate.aggregate as agg >>> url = 'https://static.turi.com/datasets/rating_data_example.csv' >>> sf = turicreate.SFrame.read_csv(url) >>> sf +---------+----------+--------+ | user_id | movie_id | rating | +---------+----------+--------+ | 25904 | 1663 | 3 | | 25907 | 1663 | 3 | | 25923 | 1663 | 3 | | 25924 | 1663 | 3 | | 25928 | 1663 | 2 | | 25933 | 1663 | 4 | | 25934 | 1663 | 4 | | 25935 | 1663 | 4 | | 25936 | 1663 | 5 | | 25937 | 1663 | 2 | | ... | ... | ... | +---------+----------+--------+ [10000 rows x 3 columns] Compute the number of occurrences of each user. >>> user_count = sf.groupby(key_column_names='user_id', ... operations={'count': agg.COUNT()}) >>> user_count +---------+-------+ | user_id | count | +---------+-------+ | 62361 | 1 | | 30727 | 1 | | 40111 | 1 | | 50513 | 1 | | 35140 | 1 | | 42352 | 1 | | 29667 | 1 | | 46242 | 1 | | 58310 | 1 | | 64614 | 1 | | ... | ... | +---------+-------+ [9852 rows x 2 columns] Compute the mean and standard deviation of ratings per user. >>> user_rating_stats = sf.groupby(key_column_names='user_id', ... operations={ ... 'mean_rating': agg.MEAN('rating'), ... 'std_rating': agg.STD('rating') ... }) >>> user_rating_stats +---------+-------------+------------+ | user_id | mean_rating | std_rating | +---------+-------------+------------+ | 62361 | 5.0 | 0.0 | | 30727 | 4.0 | 0.0 | | 40111 | 2.0 | 0.0 | | 50513 | 4.0 | 0.0 | | 35140 | 4.0 | 0.0 | | 42352 | 5.0 | 0.0 | | 29667 | 4.0 | 0.0 | | 46242 | 5.0 | 0.0 | | 58310 | 2.0 | 0.0 | | 64614 | 2.0 | 0.0 | | ... | ... | ... | +---------+-------------+------------+ [9852 rows x 3 columns] Compute the movie with the minimum rating per user. >>> chosen_movies = sf.groupby(key_column_names='user_id', ... operations={ ... 'worst_movies': agg.ARGMIN('rating','movie_id') ... }) >>> chosen_movies +---------+-------------+ | user_id | worst_movies | +---------+-------------+ | 62361 | 1663 | | 30727 | 1663 | | 40111 | 1663 | | 50513 | 1663 | | 35140 | 1663 | | 42352 | 1663 | | 29667 | 1663 | | 46242 | 1663 | | 58310 | 1663 | | 64614 | 1663 | | ... | ... | +---------+-------------+ [9852 rows x 2 columns] Compute the movie with the max rating per user and also the movie with the maximum imdb-ranking per user. >>> sf['imdb-ranking'] = sf['rating'] * 10 >>> chosen_movies = sf.groupby(key_column_names='user_id', ... operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie_id')}) >>> chosen_movies +---------+------------------+------------------------+ | user_id | max_rating_movie | max_imdb_ranking_movie | +---------+------------------+------------------------+ | 62361 | 1663 | 16630 | | 30727 | 1663 | 16630 | | 40111 | 1663 | 16630 | | 50513 | 1663 | 16630 | | 35140 | 1663 | 16630 | | 42352 | 1663 | 16630 | | 29667 | 1663 | 16630 | | 46242 | 1663 | 16630 | | 58310 | 1663 | 16630 | | 64614 | 1663 | 16630 | | ... | ... | ... | +---------+------------------+------------------------+ [9852 rows x 3 columns] Compute the movie with the max rating per user. >>> chosen_movies = sf.groupby(key_column_names='user_id', operations={'best_movies': agg.ARGMAX('rating','movie')}) Compute the movie with the max rating per user and also the movie with the maximum imdb-ranking per user. >>> chosen_movies = sf.groupby(key_column_names='user_id', operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie')}) Compute the count, mean, and standard deviation of ratings per (user, time), automatically assigning output column names. >>> sf['time'] = sf.apply(lambda x: (x['user_id'] + x['movie_id']) % 11 + 2000) >>> user_rating_stats = sf.groupby(['user_id', 'time'], ... [agg.COUNT(), ... agg.AVG('rating'), ... agg.STDV('rating')]) >>> user_rating_stats +------+---------+-------+---------------+----------------+ | time | user_id | Count | Avg of rating | Stdv of rating | +------+---------+-------+---------------+----------------+ | 2006 | 61285 | 1 | 4.0 | 0.0 | | 2000 | 36078 | 1 | 4.0 | 0.0 | | 2003 | 47158 | 1 | 3.0 | 0.0 | | 2007 | 34446 | 1 | 3.0 | 0.0 | | 2010 | 47990 | 1 | 3.0 | 0.0 | | 2003 | 42120 | 1 | 5.0 | 0.0 | | 2007 | 44940 | 1 | 4.0 | 0.0 | | 2008 | 58240 | 1 | 4.0 | 0.0 | | 2002 | 102 | 1 | 1.0 | 0.0 | | 2009 | 52708 | 1 | 3.0 | 0.0 | | ... | ... | ... | ... | ... | +------+---------+-------+---------------+----------------+ [10000 rows x 5 columns] The groupby function can take a variable length list of aggregation specifiers so if we want the count and the 0.25 and 0.75 quantiles of ratings: >>> user_rating_stats = sf.groupby(['user_id', 'time'], agg.COUNT(), ... {'rating_quantiles': agg.QUANTILE('rating',[0.25, 0.75])}) >>> user_rating_stats +------+---------+-------+------------------------+ | time | user_id | Count | rating_quantiles | +------+---------+-------+------------------------+ | 2006 | 61285 | 1 | array('d', [4.0, 4.0]) | | 2000 | 36078 | 1 | array('d', [4.0, 4.0]) | | 2003 | 47158 | 1 | array('d', [3.0, 3.0]) | | 2007 | 34446 | 1 | array('d', [3.0, 3.0]) | | 2010 | 47990 | 1 | array('d', [3.0, 3.0]) | | 2003 | 42120 | 1 | array('d', [5.0, 5.0]) | | 2007 | 44940 | 1 | array('d', [4.0, 4.0]) | | 2008 | 58240 | 1 | array('d', [4.0, 4.0]) | | 2002 | 102 | 1 | array('d', [1.0, 1.0]) | | 2009 | 52708 | 1 | array('d', [3.0, 3.0]) | | ... | ... | ... | ... | +------+---------+-------+------------------------+ [10000 rows x 4 columns] To put all items a user rated into one list value by their star rating: >>> user_rating_stats = sf.groupby(["user_id", "rating"], ... {"rated_movie_ids":agg.CONCAT("movie_id")}) >>> user_rating_stats +--------+---------+----------------------+ | rating | user_id | rated_movie_ids | +--------+---------+----------------------+ | 3 | 31434 | array('d', [1663.0]) | | 5 | 25944 | array('d', [1663.0]) | | 4 | 38827 | array('d', [1663.0]) | | 4 | 51437 | array('d', [1663.0]) | | 4 | 42549 | array('d', [1663.0]) | | 4 | 49532 | array('d', [1663.0]) | | 3 | 26124 | array('d', [1663.0]) | | 4 | 46336 | array('d', [1663.0]) | | 4 | 52133 | array('d', [1663.0]) | | 5 | 62361 | array('d', [1663.0]) | | ... | ... | ... | +--------+---------+----------------------+ [9952 rows x 3 columns] To put all items and rating of a given user together into a dictionary value: >>> user_rating_stats = sf.groupby("user_id", ... {"movie_rating":agg.CONCAT("movie_id", "rating")}) >>> user_rating_stats +---------+--------------+ | user_id | movie_rating | +---------+--------------+ | 62361 | {1663: 5} | | 30727 | {1663: 4} | | 40111 | {1663: 2} | | 50513 | {1663: 4} | | 35140 | {1663: 4} | | 42352 | {1663: 5} | | 29667 | {1663: 4} | | 46242 | {1663: 5} | | 58310 | {1663: 2} | | 64614 | {1663: 2} | | ... | ... | +---------+--------------+ [9852 rows x 2 columns] """ # some basic checking first # make sure key_column_names is a list if isinstance(key_column_names, str): key_column_names = [key_column_names] # check that every column is a string, and is a valid column name my_column_names = self.column_names() key_columns_array = [] for column in key_column_names: if not isinstance(column, str): raise TypeError("Column name must be a string") if column not in my_column_names: raise KeyError("Column " + column + " does not exist in SFrame") if self[column].dtype == dict: raise TypeError("Cannot group on a dictionary column.") key_columns_array.append(column) group_output_columns = [] group_columns = [] group_ops = [] all_ops = [operations] + list(args) for op_entry in all_ops: # if it is not a dict, nor a list, it is just a single aggregator # element (probably COUNT). wrap it in a list so we can reuse the # list processing code operation = op_entry if not(isinstance(operation, list) or isinstance(operation, dict)): operation = [operation] if isinstance(operation, dict): # now sweep the dict and add to group_columns and group_ops for key in operation: val = operation[key] if type(val) is tuple: (op, column) = val if (op == '__builtin__avg__' and self[column[0]].dtype in [array.array, numpy.ndarray]): op = '__builtin__vector__avg__' if (op == '__builtin__sum__' and self[column[0]].dtype in [array.array, numpy.ndarray]): op = '__builtin__vector__sum__' if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and ((type(column[0]) is tuple) != (type(key) is tuple)): raise TypeError("Output column(s) and aggregate column(s) for aggregate operation should be either all tuple or all string.") if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and type(column[0]) is tuple: for (col,output) in zip(column[0],key): group_columns = group_columns + [[col,column[1]]] group_ops = group_ops + [op] group_output_columns = group_output_columns + [output] else: group_columns = group_columns + [column] group_ops = group_ops + [op] group_output_columns = group_output_columns + [key] if (op == '__builtin__concat__dict__'): key_column = column[0] key_column_type = self.select_column(key_column).dtype if not key_column_type in (int, float, str): raise TypeError('CONCAT key column must be int, float or str type') elif val == aggregate.COUNT: group_output_columns = group_output_columns + [key] val = aggregate.COUNT() (op, column) = val group_columns = group_columns + [column] group_ops = group_ops + [op] else: raise TypeError("Unexpected type in aggregator definition of output column: " + key) elif isinstance(operation, list): # we will be using automatically defined column names for val in operation: if type(val) is tuple: (op, column) = val if (op == '__builtin__avg__' and self[column[0]].dtype in [array.array, numpy.ndarray]): op = '__builtin__vector__avg__' if (op == '__builtin__sum__' and self[column[0]].dtype in [array.array, numpy.ndarray]): op = '__builtin__vector__sum__' if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and type(column[0]) is tuple: for col in column[0]: group_columns = group_columns + [[col,column[1]]] group_ops = group_ops + [op] group_output_columns = group_output_columns + [""] else: group_columns = group_columns + [column] group_ops = group_ops + [op] group_output_columns = group_output_columns + [""] if (op == '__builtin__concat__dict__'): key_column = column[0] key_column_type = self.select_column(key_column).dtype if not key_column_type in (int, float, str): raise TypeError('CONCAT key column must be int, float or str type') elif val == aggregate.COUNT: group_output_columns = group_output_columns + [""] val = aggregate.COUNT() (op, column) = val group_columns = group_columns + [column] group_ops = group_ops + [op] else: raise TypeError("Unexpected type in aggregator definition.") # let's validate group_columns and group_ops are valid for (cols, op) in zip(group_columns, group_ops): for col in cols: if not isinstance(col, str): raise TypeError("Column name must be a string") if not isinstance(op, str): raise TypeError("Operation type not recognized.") if op is not aggregate.COUNT()[0]: for col in cols: if col not in my_column_names: raise KeyError("Column " + col + " does not exist in SFrame") with cython_context(): return SFrame(_proxy=self.__proxy__.groupby_aggregate(key_columns_array, group_columns, group_output_columns, group_ops))
def function[groupby, parameter[self, key_column_names, operations]]: constant[ Perform a group on the key_column_names followed by aggregations on the columns listed in operations. The operations parameter is a dictionary that indicates which aggregation operators to use and which columns to use them on. The available operators are SUM, MAX, MIN, COUNT, AVG, VAR, STDV, CONCAT, SELECT_ONE, ARGMIN, ARGMAX, and QUANTILE. For convenience, aggregators MEAN, STD, and VARIANCE are available as synonyms for AVG, STDV, and VAR. See :mod:`~turicreate.aggregate` for more detail on the aggregators. Parameters ---------- key_column_names : string | list[string] Column(s) to group by. Key columns can be of any type other than dictionary. operations : dict, list Dictionary of columns and aggregation operations. Each key is a output column name and each value is an aggregator. This can also be a list of aggregators, in which case column names will be automatically assigned. *args All other remaining arguments will be interpreted in the same way as the operations argument. Returns ------- out_sf : SFrame A new SFrame, with a column for each groupby column and each aggregation operation. See Also -------- aggregate Notes ----- * Numeric aggregators (such as sum, mean, stdev etc.) follow the skip None policy i.e they will omit all missing values from the aggregation. As an example, `sum([None, 5, 10]) = 15` because the `None` value is skipped. * Aggregators have a default value when no values (after skipping all `None` values) are present. Default values are `None` for ['ARGMAX', 'ARGMIN', 'AVG', 'STD', 'MEAN', 'MIN', 'MAX'], `0` for ['COUNT' 'COUNT_DISTINCT', 'DISTINCT'] `[]` for 'CONCAT', 'QUANTILE', 'DISTINCT', and `{}` for 'FREQ_COUNT'. Examples -------- Suppose we have an SFrame with movie ratings by many users. >>> import turicreate.aggregate as agg >>> url = 'https://static.turi.com/datasets/rating_data_example.csv' >>> sf = turicreate.SFrame.read_csv(url) >>> sf +---------+----------+--------+ | user_id | movie_id | rating | +---------+----------+--------+ | 25904 | 1663 | 3 | | 25907 | 1663 | 3 | | 25923 | 1663 | 3 | | 25924 | 1663 | 3 | | 25928 | 1663 | 2 | | 25933 | 1663 | 4 | | 25934 | 1663 | 4 | | 25935 | 1663 | 4 | | 25936 | 1663 | 5 | | 25937 | 1663 | 2 | | ... | ... | ... | +---------+----------+--------+ [10000 rows x 3 columns] Compute the number of occurrences of each user. >>> user_count = sf.groupby(key_column_names='user_id', ... operations={'count': agg.COUNT()}) >>> user_count +---------+-------+ | user_id | count | +---------+-------+ | 62361 | 1 | | 30727 | 1 | | 40111 | 1 | | 50513 | 1 | | 35140 | 1 | | 42352 | 1 | | 29667 | 1 | | 46242 | 1 | | 58310 | 1 | | 64614 | 1 | | ... | ... | +---------+-------+ [9852 rows x 2 columns] Compute the mean and standard deviation of ratings per user. >>> user_rating_stats = sf.groupby(key_column_names='user_id', ... operations={ ... 'mean_rating': agg.MEAN('rating'), ... 'std_rating': agg.STD('rating') ... }) >>> user_rating_stats +---------+-------------+------------+ | user_id | mean_rating | std_rating | +---------+-------------+------------+ | 62361 | 5.0 | 0.0 | | 30727 | 4.0 | 0.0 | | 40111 | 2.0 | 0.0 | | 50513 | 4.0 | 0.0 | | 35140 | 4.0 | 0.0 | | 42352 | 5.0 | 0.0 | | 29667 | 4.0 | 0.0 | | 46242 | 5.0 | 0.0 | | 58310 | 2.0 | 0.0 | | 64614 | 2.0 | 0.0 | | ... | ... | ... | +---------+-------------+------------+ [9852 rows x 3 columns] Compute the movie with the minimum rating per user. >>> chosen_movies = sf.groupby(key_column_names='user_id', ... operations={ ... 'worst_movies': agg.ARGMIN('rating','movie_id') ... }) >>> chosen_movies +---------+-------------+ | user_id | worst_movies | +---------+-------------+ | 62361 | 1663 | | 30727 | 1663 | | 40111 | 1663 | | 50513 | 1663 | | 35140 | 1663 | | 42352 | 1663 | | 29667 | 1663 | | 46242 | 1663 | | 58310 | 1663 | | 64614 | 1663 | | ... | ... | +---------+-------------+ [9852 rows x 2 columns] Compute the movie with the max rating per user and also the movie with the maximum imdb-ranking per user. >>> sf['imdb-ranking'] = sf['rating'] * 10 >>> chosen_movies = sf.groupby(key_column_names='user_id', ... operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie_id')}) >>> chosen_movies +---------+------------------+------------------------+ | user_id | max_rating_movie | max_imdb_ranking_movie | +---------+------------------+------------------------+ | 62361 | 1663 | 16630 | | 30727 | 1663 | 16630 | | 40111 | 1663 | 16630 | | 50513 | 1663 | 16630 | | 35140 | 1663 | 16630 | | 42352 | 1663 | 16630 | | 29667 | 1663 | 16630 | | 46242 | 1663 | 16630 | | 58310 | 1663 | 16630 | | 64614 | 1663 | 16630 | | ... | ... | ... | +---------+------------------+------------------------+ [9852 rows x 3 columns] Compute the movie with the max rating per user. >>> chosen_movies = sf.groupby(key_column_names='user_id', operations={'best_movies': agg.ARGMAX('rating','movie')}) Compute the movie with the max rating per user and also the movie with the maximum imdb-ranking per user. >>> chosen_movies = sf.groupby(key_column_names='user_id', operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie')}) Compute the count, mean, and standard deviation of ratings per (user, time), automatically assigning output column names. >>> sf['time'] = sf.apply(lambda x: (x['user_id'] + x['movie_id']) % 11 + 2000) >>> user_rating_stats = sf.groupby(['user_id', 'time'], ... [agg.COUNT(), ... agg.AVG('rating'), ... agg.STDV('rating')]) >>> user_rating_stats +------+---------+-------+---------------+----------------+ | time | user_id | Count | Avg of rating | Stdv of rating | +------+---------+-------+---------------+----------------+ | 2006 | 61285 | 1 | 4.0 | 0.0 | | 2000 | 36078 | 1 | 4.0 | 0.0 | | 2003 | 47158 | 1 | 3.0 | 0.0 | | 2007 | 34446 | 1 | 3.0 | 0.0 | | 2010 | 47990 | 1 | 3.0 | 0.0 | | 2003 | 42120 | 1 | 5.0 | 0.0 | | 2007 | 44940 | 1 | 4.0 | 0.0 | | 2008 | 58240 | 1 | 4.0 | 0.0 | | 2002 | 102 | 1 | 1.0 | 0.0 | | 2009 | 52708 | 1 | 3.0 | 0.0 | | ... | ... | ... | ... | ... | +------+---------+-------+---------------+----------------+ [10000 rows x 5 columns] The groupby function can take a variable length list of aggregation specifiers so if we want the count and the 0.25 and 0.75 quantiles of ratings: >>> user_rating_stats = sf.groupby(['user_id', 'time'], agg.COUNT(), ... {'rating_quantiles': agg.QUANTILE('rating',[0.25, 0.75])}) >>> user_rating_stats +------+---------+-------+------------------------+ | time | user_id | Count | rating_quantiles | +------+---------+-------+------------------------+ | 2006 | 61285 | 1 | array('d', [4.0, 4.0]) | | 2000 | 36078 | 1 | array('d', [4.0, 4.0]) | | 2003 | 47158 | 1 | array('d', [3.0, 3.0]) | | 2007 | 34446 | 1 | array('d', [3.0, 3.0]) | | 2010 | 47990 | 1 | array('d', [3.0, 3.0]) | | 2003 | 42120 | 1 | array('d', [5.0, 5.0]) | | 2007 | 44940 | 1 | array('d', [4.0, 4.0]) | | 2008 | 58240 | 1 | array('d', [4.0, 4.0]) | | 2002 | 102 | 1 | array('d', [1.0, 1.0]) | | 2009 | 52708 | 1 | array('d', [3.0, 3.0]) | | ... | ... | ... | ... | +------+---------+-------+------------------------+ [10000 rows x 4 columns] To put all items a user rated into one list value by their star rating: >>> user_rating_stats = sf.groupby(["user_id", "rating"], ... {"rated_movie_ids":agg.CONCAT("movie_id")}) >>> user_rating_stats +--------+---------+----------------------+ | rating | user_id | rated_movie_ids | +--------+---------+----------------------+ | 3 | 31434 | array('d', [1663.0]) | | 5 | 25944 | array('d', [1663.0]) | | 4 | 38827 | array('d', [1663.0]) | | 4 | 51437 | array('d', [1663.0]) | | 4 | 42549 | array('d', [1663.0]) | | 4 | 49532 | array('d', [1663.0]) | | 3 | 26124 | array('d', [1663.0]) | | 4 | 46336 | array('d', [1663.0]) | | 4 | 52133 | array('d', [1663.0]) | | 5 | 62361 | array('d', [1663.0]) | | ... | ... | ... | +--------+---------+----------------------+ [9952 rows x 3 columns] To put all items and rating of a given user together into a dictionary value: >>> user_rating_stats = sf.groupby("user_id", ... {"movie_rating":agg.CONCAT("movie_id", "rating")}) >>> user_rating_stats +---------+--------------+ | user_id | movie_rating | +---------+--------------+ | 62361 | {1663: 5} | | 30727 | {1663: 4} | | 40111 | {1663: 2} | | 50513 | {1663: 4} | | 35140 | {1663: 4} | | 42352 | {1663: 5} | | 29667 | {1663: 4} | | 46242 | {1663: 5} | | 58310 | {1663: 2} | | 64614 | {1663: 2} | | ... | ... | +---------+--------------+ [9852 rows x 2 columns] ] if call[name[isinstance], parameter[name[key_column_names], name[str]]] begin[:] variable[key_column_names] assign[=] list[[<ast.Name object at 0x7da1b20eded0>]] variable[my_column_names] assign[=] call[name[self].column_names, parameter[]] variable[key_columns_array] assign[=] list[[]] for taget[name[column]] in starred[name[key_column_names]] begin[:] if <ast.UnaryOp object at 0x7da1b20ee680> begin[:] <ast.Raise object at 0x7da1b20ee560> if compare[name[column] <ast.NotIn object at 0x7da2590d7190> name[my_column_names]] begin[:] <ast.Raise object at 0x7da1b20eda80> if compare[call[name[self]][name[column]].dtype equal[==] name[dict]] begin[:] <ast.Raise object at 0x7da1b20ed2d0> call[name[key_columns_array].append, parameter[name[column]]] variable[group_output_columns] assign[=] list[[]] variable[group_columns] assign[=] list[[]] variable[group_ops] assign[=] list[[]] variable[all_ops] assign[=] binary_operation[list[[<ast.Name object at 0x7da1b20ec9a0>]] + call[name[list], parameter[name[args]]]] for taget[name[op_entry]] in starred[name[all_ops]] begin[:] variable[operation] assign[=] name[op_entry] if <ast.UnaryOp object at 0x7da1b20ed1e0> begin[:] variable[operation] assign[=] list[[<ast.Name object at 0x7da1b20ee3e0>]] if call[name[isinstance], parameter[name[operation], name[dict]]] begin[:] for taget[name[key]] in starred[name[operation]] begin[:] variable[val] assign[=] call[name[operation]][name[key]] if compare[call[name[type], parameter[name[val]]] is name[tuple]] begin[:] <ast.Tuple object at 0x7da1b20edfc0> assign[=] name[val] if <ast.BoolOp object at 0x7da1b20ee440> begin[:] variable[op] assign[=] constant[__builtin__vector__avg__] if <ast.BoolOp object at 0x7da1b20ecc10> begin[:] variable[op] assign[=] constant[__builtin__vector__sum__] if <ast.BoolOp object at 0x7da1b20341f0> begin[:] <ast.Raise object at 0x7da1b20340a0> if <ast.BoolOp object at 0x7da1b2035090> begin[:] for taget[tuple[[<ast.Name object at 0x7da1b2035cc0>, <ast.Name object at 0x7da1b2035f30>]]] in starred[call[name[zip], parameter[call[name[column]][constant[0]], name[key]]]] begin[:] variable[group_columns] assign[=] binary_operation[name[group_columns] + list[[<ast.List object at 0x7da1b2034910>]]] variable[group_ops] assign[=] binary_operation[name[group_ops] + list[[<ast.Name object at 0x7da1b2037490>]]] variable[group_output_columns] assign[=] binary_operation[name[group_output_columns] + list[[<ast.Name object at 0x7da1b2034a00>]]] if compare[name[op] equal[==] constant[__builtin__concat__dict__]] begin[:] variable[key_column] assign[=] call[name[column]][constant[0]] variable[key_column_type] assign[=] call[name[self].select_column, parameter[name[key_column]]].dtype if <ast.UnaryOp object at 0x7da1b2034e50> begin[:] <ast.Raise object at 0x7da1b2036170> for taget[tuple[[<ast.Name object at 0x7da1b1f8db70>, <ast.Name object at 0x7da1b1f8cb50>]]] in starred[call[name[zip], parameter[name[group_columns], name[group_ops]]]] begin[:] for taget[name[col]] in starred[name[cols]] begin[:] if <ast.UnaryOp object at 0x7da1b1f8f310> begin[:] <ast.Raise object at 0x7da1b1f8f9d0> if <ast.UnaryOp object at 0x7da1b1f8e890> begin[:] <ast.Raise object at 0x7da1b1f8c6d0> if compare[name[op] is_not call[call[name[aggregate].COUNT, parameter[]]][constant[0]]] begin[:] for taget[name[col]] in starred[name[cols]] begin[:] if compare[name[col] <ast.NotIn object at 0x7da2590d7190> name[my_column_names]] begin[:] <ast.Raise object at 0x7da1b1f8de70> with call[name[cython_context], parameter[]] begin[:] return[call[name[SFrame], parameter[]]]
keyword[def] identifier[groupby] ( identifier[self] , identifier[key_column_names] , identifier[operations] ,* identifier[args] ): literal[string] keyword[if] identifier[isinstance] ( identifier[key_column_names] , identifier[str] ): identifier[key_column_names] =[ identifier[key_column_names] ] identifier[my_column_names] = identifier[self] . identifier[column_names] () identifier[key_columns_array] =[] keyword[for] identifier[column] keyword[in] identifier[key_column_names] : keyword[if] keyword[not] identifier[isinstance] ( identifier[column] , identifier[str] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[if] identifier[column] keyword[not] keyword[in] identifier[my_column_names] : keyword[raise] identifier[KeyError] ( literal[string] + identifier[column] + literal[string] ) keyword[if] identifier[self] [ identifier[column] ]. identifier[dtype] == identifier[dict] : keyword[raise] identifier[TypeError] ( literal[string] ) identifier[key_columns_array] . identifier[append] ( identifier[column] ) identifier[group_output_columns] =[] identifier[group_columns] =[] identifier[group_ops] =[] identifier[all_ops] =[ identifier[operations] ]+ identifier[list] ( identifier[args] ) keyword[for] identifier[op_entry] keyword[in] identifier[all_ops] : identifier[operation] = identifier[op_entry] keyword[if] keyword[not] ( identifier[isinstance] ( identifier[operation] , identifier[list] ) keyword[or] identifier[isinstance] ( identifier[operation] , identifier[dict] )): identifier[operation] =[ identifier[operation] ] keyword[if] identifier[isinstance] ( identifier[operation] , identifier[dict] ): keyword[for] identifier[key] keyword[in] identifier[operation] : identifier[val] = identifier[operation] [ identifier[key] ] keyword[if] identifier[type] ( identifier[val] ) keyword[is] identifier[tuple] : ( identifier[op] , identifier[column] )= identifier[val] keyword[if] ( identifier[op] == literal[string] keyword[and] identifier[self] [ identifier[column] [ literal[int] ]]. identifier[dtype] keyword[in] [ identifier[array] . identifier[array] , identifier[numpy] . identifier[ndarray] ]): identifier[op] = literal[string] keyword[if] ( identifier[op] == literal[string] keyword[and] identifier[self] [ identifier[column] [ literal[int] ]]. identifier[dtype] keyword[in] [ identifier[array] . identifier[array] , identifier[numpy] . identifier[ndarray] ]): identifier[op] = literal[string] keyword[if] ( identifier[op] == literal[string] keyword[or] identifier[op] == literal[string] ) keyword[and] (( identifier[type] ( identifier[column] [ literal[int] ]) keyword[is] identifier[tuple] )!=( identifier[type] ( identifier[key] ) keyword[is] identifier[tuple] )): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[if] ( identifier[op] == literal[string] keyword[or] identifier[op] == literal[string] ) keyword[and] identifier[type] ( identifier[column] [ literal[int] ]) keyword[is] identifier[tuple] : keyword[for] ( identifier[col] , identifier[output] ) keyword[in] identifier[zip] ( identifier[column] [ literal[int] ], identifier[key] ): identifier[group_columns] = identifier[group_columns] +[[ identifier[col] , identifier[column] [ literal[int] ]]] identifier[group_ops] = identifier[group_ops] +[ identifier[op] ] identifier[group_output_columns] = identifier[group_output_columns] +[ identifier[output] ] keyword[else] : identifier[group_columns] = identifier[group_columns] +[ identifier[column] ] identifier[group_ops] = identifier[group_ops] +[ identifier[op] ] identifier[group_output_columns] = identifier[group_output_columns] +[ identifier[key] ] keyword[if] ( identifier[op] == literal[string] ): identifier[key_column] = identifier[column] [ literal[int] ] identifier[key_column_type] = identifier[self] . identifier[select_column] ( identifier[key_column] ). identifier[dtype] keyword[if] keyword[not] identifier[key_column_type] keyword[in] ( identifier[int] , identifier[float] , identifier[str] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[elif] identifier[val] == identifier[aggregate] . identifier[COUNT] : identifier[group_output_columns] = identifier[group_output_columns] +[ identifier[key] ] identifier[val] = identifier[aggregate] . identifier[COUNT] () ( identifier[op] , identifier[column] )= identifier[val] identifier[group_columns] = identifier[group_columns] +[ identifier[column] ] identifier[group_ops] = identifier[group_ops] +[ identifier[op] ] keyword[else] : keyword[raise] identifier[TypeError] ( literal[string] + identifier[key] ) keyword[elif] identifier[isinstance] ( identifier[operation] , identifier[list] ): keyword[for] identifier[val] keyword[in] identifier[operation] : keyword[if] identifier[type] ( identifier[val] ) keyword[is] identifier[tuple] : ( identifier[op] , identifier[column] )= identifier[val] keyword[if] ( identifier[op] == literal[string] keyword[and] identifier[self] [ identifier[column] [ literal[int] ]]. identifier[dtype] keyword[in] [ identifier[array] . identifier[array] , identifier[numpy] . identifier[ndarray] ]): identifier[op] = literal[string] keyword[if] ( identifier[op] == literal[string] keyword[and] identifier[self] [ identifier[column] [ literal[int] ]]. identifier[dtype] keyword[in] [ identifier[array] . identifier[array] , identifier[numpy] . identifier[ndarray] ]): identifier[op] = literal[string] keyword[if] ( identifier[op] == literal[string] keyword[or] identifier[op] == literal[string] ) keyword[and] identifier[type] ( identifier[column] [ literal[int] ]) keyword[is] identifier[tuple] : keyword[for] identifier[col] keyword[in] identifier[column] [ literal[int] ]: identifier[group_columns] = identifier[group_columns] +[[ identifier[col] , identifier[column] [ literal[int] ]]] identifier[group_ops] = identifier[group_ops] +[ identifier[op] ] identifier[group_output_columns] = identifier[group_output_columns] +[ literal[string] ] keyword[else] : identifier[group_columns] = identifier[group_columns] +[ identifier[column] ] identifier[group_ops] = identifier[group_ops] +[ identifier[op] ] identifier[group_output_columns] = identifier[group_output_columns] +[ literal[string] ] keyword[if] ( identifier[op] == literal[string] ): identifier[key_column] = identifier[column] [ literal[int] ] identifier[key_column_type] = identifier[self] . identifier[select_column] ( identifier[key_column] ). identifier[dtype] keyword[if] keyword[not] identifier[key_column_type] keyword[in] ( identifier[int] , identifier[float] , identifier[str] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[elif] identifier[val] == identifier[aggregate] . identifier[COUNT] : identifier[group_output_columns] = identifier[group_output_columns] +[ literal[string] ] identifier[val] = identifier[aggregate] . identifier[COUNT] () ( identifier[op] , identifier[column] )= identifier[val] identifier[group_columns] = identifier[group_columns] +[ identifier[column] ] identifier[group_ops] = identifier[group_ops] +[ identifier[op] ] keyword[else] : keyword[raise] identifier[TypeError] ( literal[string] ) keyword[for] ( identifier[cols] , identifier[op] ) keyword[in] identifier[zip] ( identifier[group_columns] , identifier[group_ops] ): keyword[for] identifier[col] keyword[in] identifier[cols] : keyword[if] keyword[not] identifier[isinstance] ( identifier[col] , identifier[str] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[op] , identifier[str] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[if] identifier[op] keyword[is] keyword[not] identifier[aggregate] . identifier[COUNT] ()[ literal[int] ]: keyword[for] identifier[col] keyword[in] identifier[cols] : keyword[if] identifier[col] keyword[not] keyword[in] identifier[my_column_names] : keyword[raise] identifier[KeyError] ( literal[string] + identifier[col] + literal[string] ) keyword[with] identifier[cython_context] (): keyword[return] identifier[SFrame] ( identifier[_proxy] = identifier[self] . identifier[__proxy__] . identifier[groupby_aggregate] ( identifier[key_columns_array] , identifier[group_columns] , identifier[group_output_columns] , identifier[group_ops] ))
def groupby(self, key_column_names, operations, *args): """ Perform a group on the key_column_names followed by aggregations on the columns listed in operations. The operations parameter is a dictionary that indicates which aggregation operators to use and which columns to use them on. The available operators are SUM, MAX, MIN, COUNT, AVG, VAR, STDV, CONCAT, SELECT_ONE, ARGMIN, ARGMAX, and QUANTILE. For convenience, aggregators MEAN, STD, and VARIANCE are available as synonyms for AVG, STDV, and VAR. See :mod:`~turicreate.aggregate` for more detail on the aggregators. Parameters ---------- key_column_names : string | list[string] Column(s) to group by. Key columns can be of any type other than dictionary. operations : dict, list Dictionary of columns and aggregation operations. Each key is a output column name and each value is an aggregator. This can also be a list of aggregators, in which case column names will be automatically assigned. *args All other remaining arguments will be interpreted in the same way as the operations argument. Returns ------- out_sf : SFrame A new SFrame, with a column for each groupby column and each aggregation operation. See Also -------- aggregate Notes ----- * Numeric aggregators (such as sum, mean, stdev etc.) follow the skip None policy i.e they will omit all missing values from the aggregation. As an example, `sum([None, 5, 10]) = 15` because the `None` value is skipped. * Aggregators have a default value when no values (after skipping all `None` values) are present. Default values are `None` for ['ARGMAX', 'ARGMIN', 'AVG', 'STD', 'MEAN', 'MIN', 'MAX'], `0` for ['COUNT' 'COUNT_DISTINCT', 'DISTINCT'] `[]` for 'CONCAT', 'QUANTILE', 'DISTINCT', and `{}` for 'FREQ_COUNT'. Examples -------- Suppose we have an SFrame with movie ratings by many users. >>> import turicreate.aggregate as agg >>> url = 'https://static.turi.com/datasets/rating_data_example.csv' >>> sf = turicreate.SFrame.read_csv(url) >>> sf +---------+----------+--------+ | user_id | movie_id | rating | +---------+----------+--------+ | 25904 | 1663 | 3 | | 25907 | 1663 | 3 | | 25923 | 1663 | 3 | | 25924 | 1663 | 3 | | 25928 | 1663 | 2 | | 25933 | 1663 | 4 | | 25934 | 1663 | 4 | | 25935 | 1663 | 4 | | 25936 | 1663 | 5 | | 25937 | 1663 | 2 | | ... | ... | ... | +---------+----------+--------+ [10000 rows x 3 columns] Compute the number of occurrences of each user. >>> user_count = sf.groupby(key_column_names='user_id', ... operations={'count': agg.COUNT()}) >>> user_count +---------+-------+ | user_id | count | +---------+-------+ | 62361 | 1 | | 30727 | 1 | | 40111 | 1 | | 50513 | 1 | | 35140 | 1 | | 42352 | 1 | | 29667 | 1 | | 46242 | 1 | | 58310 | 1 | | 64614 | 1 | | ... | ... | +---------+-------+ [9852 rows x 2 columns] Compute the mean and standard deviation of ratings per user. >>> user_rating_stats = sf.groupby(key_column_names='user_id', ... operations={ ... 'mean_rating': agg.MEAN('rating'), ... 'std_rating': agg.STD('rating') ... }) >>> user_rating_stats +---------+-------------+------------+ | user_id | mean_rating | std_rating | +---------+-------------+------------+ | 62361 | 5.0 | 0.0 | | 30727 | 4.0 | 0.0 | | 40111 | 2.0 | 0.0 | | 50513 | 4.0 | 0.0 | | 35140 | 4.0 | 0.0 | | 42352 | 5.0 | 0.0 | | 29667 | 4.0 | 0.0 | | 46242 | 5.0 | 0.0 | | 58310 | 2.0 | 0.0 | | 64614 | 2.0 | 0.0 | | ... | ... | ... | +---------+-------------+------------+ [9852 rows x 3 columns] Compute the movie with the minimum rating per user. >>> chosen_movies = sf.groupby(key_column_names='user_id', ... operations={ ... 'worst_movies': agg.ARGMIN('rating','movie_id') ... }) >>> chosen_movies +---------+-------------+ | user_id | worst_movies | +---------+-------------+ | 62361 | 1663 | | 30727 | 1663 | | 40111 | 1663 | | 50513 | 1663 | | 35140 | 1663 | | 42352 | 1663 | | 29667 | 1663 | | 46242 | 1663 | | 58310 | 1663 | | 64614 | 1663 | | ... | ... | +---------+-------------+ [9852 rows x 2 columns] Compute the movie with the max rating per user and also the movie with the maximum imdb-ranking per user. >>> sf['imdb-ranking'] = sf['rating'] * 10 >>> chosen_movies = sf.groupby(key_column_names='user_id', ... operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie_id')}) >>> chosen_movies +---------+------------------+------------------------+ | user_id | max_rating_movie | max_imdb_ranking_movie | +---------+------------------+------------------------+ | 62361 | 1663 | 16630 | | 30727 | 1663 | 16630 | | 40111 | 1663 | 16630 | | 50513 | 1663 | 16630 | | 35140 | 1663 | 16630 | | 42352 | 1663 | 16630 | | 29667 | 1663 | 16630 | | 46242 | 1663 | 16630 | | 58310 | 1663 | 16630 | | 64614 | 1663 | 16630 | | ... | ... | ... | +---------+------------------+------------------------+ [9852 rows x 3 columns] Compute the movie with the max rating per user. >>> chosen_movies = sf.groupby(key_column_names='user_id', operations={'best_movies': agg.ARGMAX('rating','movie')}) Compute the movie with the max rating per user and also the movie with the maximum imdb-ranking per user. >>> chosen_movies = sf.groupby(key_column_names='user_id', operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie')}) Compute the count, mean, and standard deviation of ratings per (user, time), automatically assigning output column names. >>> sf['time'] = sf.apply(lambda x: (x['user_id'] + x['movie_id']) % 11 + 2000) >>> user_rating_stats = sf.groupby(['user_id', 'time'], ... [agg.COUNT(), ... agg.AVG('rating'), ... agg.STDV('rating')]) >>> user_rating_stats +------+---------+-------+---------------+----------------+ | time | user_id | Count | Avg of rating | Stdv of rating | +------+---------+-------+---------------+----------------+ | 2006 | 61285 | 1 | 4.0 | 0.0 | | 2000 | 36078 | 1 | 4.0 | 0.0 | | 2003 | 47158 | 1 | 3.0 | 0.0 | | 2007 | 34446 | 1 | 3.0 | 0.0 | | 2010 | 47990 | 1 | 3.0 | 0.0 | | 2003 | 42120 | 1 | 5.0 | 0.0 | | 2007 | 44940 | 1 | 4.0 | 0.0 | | 2008 | 58240 | 1 | 4.0 | 0.0 | | 2002 | 102 | 1 | 1.0 | 0.0 | | 2009 | 52708 | 1 | 3.0 | 0.0 | | ... | ... | ... | ... | ... | +------+---------+-------+---------------+----------------+ [10000 rows x 5 columns] The groupby function can take a variable length list of aggregation specifiers so if we want the count and the 0.25 and 0.75 quantiles of ratings: >>> user_rating_stats = sf.groupby(['user_id', 'time'], agg.COUNT(), ... {'rating_quantiles': agg.QUANTILE('rating',[0.25, 0.75])}) >>> user_rating_stats +------+---------+-------+------------------------+ | time | user_id | Count | rating_quantiles | +------+---------+-------+------------------------+ | 2006 | 61285 | 1 | array('d', [4.0, 4.0]) | | 2000 | 36078 | 1 | array('d', [4.0, 4.0]) | | 2003 | 47158 | 1 | array('d', [3.0, 3.0]) | | 2007 | 34446 | 1 | array('d', [3.0, 3.0]) | | 2010 | 47990 | 1 | array('d', [3.0, 3.0]) | | 2003 | 42120 | 1 | array('d', [5.0, 5.0]) | | 2007 | 44940 | 1 | array('d', [4.0, 4.0]) | | 2008 | 58240 | 1 | array('d', [4.0, 4.0]) | | 2002 | 102 | 1 | array('d', [1.0, 1.0]) | | 2009 | 52708 | 1 | array('d', [3.0, 3.0]) | | ... | ... | ... | ... | +------+---------+-------+------------------------+ [10000 rows x 4 columns] To put all items a user rated into one list value by their star rating: >>> user_rating_stats = sf.groupby(["user_id", "rating"], ... {"rated_movie_ids":agg.CONCAT("movie_id")}) >>> user_rating_stats +--------+---------+----------------------+ | rating | user_id | rated_movie_ids | +--------+---------+----------------------+ | 3 | 31434 | array('d', [1663.0]) | | 5 | 25944 | array('d', [1663.0]) | | 4 | 38827 | array('d', [1663.0]) | | 4 | 51437 | array('d', [1663.0]) | | 4 | 42549 | array('d', [1663.0]) | | 4 | 49532 | array('d', [1663.0]) | | 3 | 26124 | array('d', [1663.0]) | | 4 | 46336 | array('d', [1663.0]) | | 4 | 52133 | array('d', [1663.0]) | | 5 | 62361 | array('d', [1663.0]) | | ... | ... | ... | +--------+---------+----------------------+ [9952 rows x 3 columns] To put all items and rating of a given user together into a dictionary value: >>> user_rating_stats = sf.groupby("user_id", ... {"movie_rating":agg.CONCAT("movie_id", "rating")}) >>> user_rating_stats +---------+--------------+ | user_id | movie_rating | +---------+--------------+ | 62361 | {1663: 5} | | 30727 | {1663: 4} | | 40111 | {1663: 2} | | 50513 | {1663: 4} | | 35140 | {1663: 4} | | 42352 | {1663: 5} | | 29667 | {1663: 4} | | 46242 | {1663: 5} | | 58310 | {1663: 2} | | 64614 | {1663: 2} | | ... | ... | +---------+--------------+ [9852 rows x 2 columns] """ # some basic checking first # make sure key_column_names is a list if isinstance(key_column_names, str): key_column_names = [key_column_names] # depends on [control=['if'], data=[]] # check that every column is a string, and is a valid column name my_column_names = self.column_names() key_columns_array = [] for column in key_column_names: if not isinstance(column, str): raise TypeError('Column name must be a string') # depends on [control=['if'], data=[]] if column not in my_column_names: raise KeyError('Column ' + column + ' does not exist in SFrame') # depends on [control=['if'], data=['column']] if self[column].dtype == dict: raise TypeError('Cannot group on a dictionary column.') # depends on [control=['if'], data=[]] key_columns_array.append(column) # depends on [control=['for'], data=['column']] group_output_columns = [] group_columns = [] group_ops = [] all_ops = [operations] + list(args) for op_entry in all_ops: # if it is not a dict, nor a list, it is just a single aggregator # element (probably COUNT). wrap it in a list so we can reuse the # list processing code operation = op_entry if not (isinstance(operation, list) or isinstance(operation, dict)): operation = [operation] # depends on [control=['if'], data=[]] if isinstance(operation, dict): # now sweep the dict and add to group_columns and group_ops for key in operation: val = operation[key] if type(val) is tuple: (op, column) = val if op == '__builtin__avg__' and self[column[0]].dtype in [array.array, numpy.ndarray]: op = '__builtin__vector__avg__' # depends on [control=['if'], data=[]] if op == '__builtin__sum__' and self[column[0]].dtype in [array.array, numpy.ndarray]: op = '__builtin__vector__sum__' # depends on [control=['if'], data=[]] if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and (type(column[0]) is tuple) != (type(key) is tuple): raise TypeError('Output column(s) and aggregate column(s) for aggregate operation should be either all tuple or all string.') # depends on [control=['if'], data=[]] if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and type(column[0]) is tuple: for (col, output) in zip(column[0], key): group_columns = group_columns + [[col, column[1]]] group_ops = group_ops + [op] group_output_columns = group_output_columns + [output] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] else: group_columns = group_columns + [column] group_ops = group_ops + [op] group_output_columns = group_output_columns + [key] if op == '__builtin__concat__dict__': key_column = column[0] key_column_type = self.select_column(key_column).dtype if not key_column_type in (int, float, str): raise TypeError('CONCAT key column must be int, float or str type') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['tuple']] elif val == aggregate.COUNT: group_output_columns = group_output_columns + [key] val = aggregate.COUNT() (op, column) = val group_columns = group_columns + [column] group_ops = group_ops + [op] # depends on [control=['if'], data=['val']] else: raise TypeError('Unexpected type in aggregator definition of output column: ' + key) # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=[]] elif isinstance(operation, list): # we will be using automatically defined column names for val in operation: if type(val) is tuple: (op, column) = val if op == '__builtin__avg__' and self[column[0]].dtype in [array.array, numpy.ndarray]: op = '__builtin__vector__avg__' # depends on [control=['if'], data=[]] if op == '__builtin__sum__' and self[column[0]].dtype in [array.array, numpy.ndarray]: op = '__builtin__vector__sum__' # depends on [control=['if'], data=[]] if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and type(column[0]) is tuple: for col in column[0]: group_columns = group_columns + [[col, column[1]]] group_ops = group_ops + [op] group_output_columns = group_output_columns + [''] # depends on [control=['for'], data=['col']] # depends on [control=['if'], data=[]] else: group_columns = group_columns + [column] group_ops = group_ops + [op] group_output_columns = group_output_columns + [''] if op == '__builtin__concat__dict__': key_column = column[0] key_column_type = self.select_column(key_column).dtype if not key_column_type in (int, float, str): raise TypeError('CONCAT key column must be int, float or str type') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['tuple']] elif val == aggregate.COUNT: group_output_columns = group_output_columns + [''] val = aggregate.COUNT() (op, column) = val group_columns = group_columns + [column] group_ops = group_ops + [op] # depends on [control=['if'], data=['val']] else: raise TypeError('Unexpected type in aggregator definition.') # depends on [control=['for'], data=['val']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['op_entry']] # let's validate group_columns and group_ops are valid for (cols, op) in zip(group_columns, group_ops): for col in cols: if not isinstance(col, str): raise TypeError('Column name must be a string') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['col']] if not isinstance(op, str): raise TypeError('Operation type not recognized.') # depends on [control=['if'], data=[]] if op is not aggregate.COUNT()[0]: for col in cols: if col not in my_column_names: raise KeyError('Column ' + col + ' does not exist in SFrame') # depends on [control=['if'], data=['col']] # depends on [control=['for'], data=['col']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] with cython_context(): return SFrame(_proxy=self.__proxy__.groupby_aggregate(key_columns_array, group_columns, group_output_columns, group_ops)) # depends on [control=['with'], data=[]]
def add(self, order, cells, no_validation=False): """Add cells at a given order to the MOC. The cells are inserted into the MOC at the specified order. This leaves the MOC in an un-normalized state. The cells are given as a collection of integers (or types which can be converted to integers). >>> m = MOC() >>> m.add(4, (20, 21)) >>> m.cells 2 >>> m.add(5, (88, 89)) >>> m.cells 4 The `no_validation` option can be given to skip validation of the cell numbers. They must already be integers in the correct range. """ self._normalized = False order = self._validate_order(order) if no_validation: # Simply add the given cells to the set with no validation. self._orders[order].update(cells) else: # Collect validated cell numbers in a set for addition. cell_set = set() for cell in cells: cell = self._validate_cell(order, cell) cell_set.add(cell) self._orders[order].update(cell_set)
def function[add, parameter[self, order, cells, no_validation]]: constant[Add cells at a given order to the MOC. The cells are inserted into the MOC at the specified order. This leaves the MOC in an un-normalized state. The cells are given as a collection of integers (or types which can be converted to integers). >>> m = MOC() >>> m.add(4, (20, 21)) >>> m.cells 2 >>> m.add(5, (88, 89)) >>> m.cells 4 The `no_validation` option can be given to skip validation of the cell numbers. They must already be integers in the correct range. ] name[self]._normalized assign[=] constant[False] variable[order] assign[=] call[name[self]._validate_order, parameter[name[order]]] if name[no_validation] begin[:] call[call[name[self]._orders][name[order]].update, parameter[name[cells]]]
keyword[def] identifier[add] ( identifier[self] , identifier[order] , identifier[cells] , identifier[no_validation] = keyword[False] ): literal[string] identifier[self] . identifier[_normalized] = keyword[False] identifier[order] = identifier[self] . identifier[_validate_order] ( identifier[order] ) keyword[if] identifier[no_validation] : identifier[self] . identifier[_orders] [ identifier[order] ]. identifier[update] ( identifier[cells] ) keyword[else] : identifier[cell_set] = identifier[set] () keyword[for] identifier[cell] keyword[in] identifier[cells] : identifier[cell] = identifier[self] . identifier[_validate_cell] ( identifier[order] , identifier[cell] ) identifier[cell_set] . identifier[add] ( identifier[cell] ) identifier[self] . identifier[_orders] [ identifier[order] ]. identifier[update] ( identifier[cell_set] )
def add(self, order, cells, no_validation=False): """Add cells at a given order to the MOC. The cells are inserted into the MOC at the specified order. This leaves the MOC in an un-normalized state. The cells are given as a collection of integers (or types which can be converted to integers). >>> m = MOC() >>> m.add(4, (20, 21)) >>> m.cells 2 >>> m.add(5, (88, 89)) >>> m.cells 4 The `no_validation` option can be given to skip validation of the cell numbers. They must already be integers in the correct range. """ self._normalized = False order = self._validate_order(order) if no_validation: # Simply add the given cells to the set with no validation. self._orders[order].update(cells) # depends on [control=['if'], data=[]] else: # Collect validated cell numbers in a set for addition. cell_set = set() for cell in cells: cell = self._validate_cell(order, cell) cell_set.add(cell) # depends on [control=['for'], data=['cell']] self._orders[order].update(cell_set)
def _get_edgesobj(gosubdag, **kws): """Return specfied GoSubDag initialization object.""" # Keyword args (kws): # 1. dst_srcs_list Used for edges pruned such that only GO terms # are retained which are between the sets of dst & srcs. # 2 traverse_parent & traverse_child # Used to generate a GoSubDag with all parent terms and/or # all child terms, without pruning any paths. # Call function, get_edgesobj, with: # get_edgesobj(go2obj, dst_srcs_list=...) # Or any of: # get_edgesobj(go2obj, go_sources=...) # get_edgesobj(go2obj, go_sources=..., traverse_parent=...,) # get_edgesobj(go2obj, go_sources=..., traverse_child=...,) # get_edgesobj(go2obj, go_sources=..., traverse_parent=..., traverse_child=...,) dst_srcs_list = kws.get('dst_srcs_list', None) if dst_srcs_list is not None: return EdgesPath(gosubdag, dst_srcs_list) return EdgesRelatives(gosubdag, kws.get('traverse_parent', True), kws.get('traverse_child', False))
def function[_get_edgesobj, parameter[gosubdag]]: constant[Return specfied GoSubDag initialization object.] variable[dst_srcs_list] assign[=] call[name[kws].get, parameter[constant[dst_srcs_list], constant[None]]] if compare[name[dst_srcs_list] is_not constant[None]] begin[:] return[call[name[EdgesPath], parameter[name[gosubdag], name[dst_srcs_list]]]] return[call[name[EdgesRelatives], parameter[name[gosubdag], call[name[kws].get, parameter[constant[traverse_parent], constant[True]]], call[name[kws].get, parameter[constant[traverse_child], constant[False]]]]]]
keyword[def] identifier[_get_edgesobj] ( identifier[gosubdag] ,** identifier[kws] ): literal[string] identifier[dst_srcs_list] = identifier[kws] . identifier[get] ( literal[string] , keyword[None] ) keyword[if] identifier[dst_srcs_list] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[EdgesPath] ( identifier[gosubdag] , identifier[dst_srcs_list] ) keyword[return] identifier[EdgesRelatives] ( identifier[gosubdag] , identifier[kws] . identifier[get] ( literal[string] , keyword[True] ), identifier[kws] . identifier[get] ( literal[string] , keyword[False] ))
def _get_edgesobj(gosubdag, **kws): """Return specfied GoSubDag initialization object.""" # Keyword args (kws): # 1. dst_srcs_list Used for edges pruned such that only GO terms # are retained which are between the sets of dst & srcs. # 2 traverse_parent & traverse_child # Used to generate a GoSubDag with all parent terms and/or # all child terms, without pruning any paths. # Call function, get_edgesobj, with: # get_edgesobj(go2obj, dst_srcs_list=...) # Or any of: # get_edgesobj(go2obj, go_sources=...) # get_edgesobj(go2obj, go_sources=..., traverse_parent=...,) # get_edgesobj(go2obj, go_sources=..., traverse_child=...,) # get_edgesobj(go2obj, go_sources=..., traverse_parent=..., traverse_child=...,) dst_srcs_list = kws.get('dst_srcs_list', None) if dst_srcs_list is not None: return EdgesPath(gosubdag, dst_srcs_list) # depends on [control=['if'], data=['dst_srcs_list']] return EdgesRelatives(gosubdag, kws.get('traverse_parent', True), kws.get('traverse_child', False))
def isVideo(self): """ Is the stream labelled as a video stream. """ val=False if self.__dict__['codec_type']: if self.codec_type == 'video': val=True return val
def function[isVideo, parameter[self]]: constant[ Is the stream labelled as a video stream. ] variable[val] assign[=] constant[False] if call[name[self].__dict__][constant[codec_type]] begin[:] if compare[name[self].codec_type equal[==] constant[video]] begin[:] variable[val] assign[=] constant[True] return[name[val]]
keyword[def] identifier[isVideo] ( identifier[self] ): literal[string] identifier[val] = keyword[False] keyword[if] identifier[self] . identifier[__dict__] [ literal[string] ]: keyword[if] identifier[self] . identifier[codec_type] == literal[string] : identifier[val] = keyword[True] keyword[return] identifier[val]
def isVideo(self): """ Is the stream labelled as a video stream. """ val = False if self.__dict__['codec_type']: if self.codec_type == 'video': val = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return val
def split(self, string, maxsplit=0): """Split string by the occurrences of pattern.""" splitlist = [] state = _State(string, 0, sys.maxint, self.flags) n = 0 last = state.start while not maxsplit or n < maxsplit: state.reset() state.string_position = state.start if not state.search(self._code): break if state.start == state.string_position: # zero-width match if last == state.end: # or end of string break state.start += 1 continue splitlist.append(string[last:state.start]) # add groups (if any) if self.groups: match = SRE_Match(self, state) # TODO: Use .extend once it is implemented. # splitlist.extend(list(match.groups(None))) splitlist += (list(match.groups(None))) n += 1 last = state.start = state.string_position splitlist.append(string[last:state.end]) return splitlist
def function[split, parameter[self, string, maxsplit]]: constant[Split string by the occurrences of pattern.] variable[splitlist] assign[=] list[[]] variable[state] assign[=] call[name[_State], parameter[name[string], constant[0], name[sys].maxint, name[self].flags]] variable[n] assign[=] constant[0] variable[last] assign[=] name[state].start while <ast.BoolOp object at 0x7da20c6c7b20> begin[:] call[name[state].reset, parameter[]] name[state].string_position assign[=] name[state].start if <ast.UnaryOp object at 0x7da1b2347340> begin[:] break if compare[name[state].start equal[==] name[state].string_position] begin[:] if compare[name[last] equal[==] name[state].end] begin[:] break <ast.AugAssign object at 0x7da1b2347700> continue call[name[splitlist].append, parameter[call[name[string]][<ast.Slice object at 0x7da1b23457e0>]]] if name[self].groups begin[:] variable[match] assign[=] call[name[SRE_Match], parameter[name[self], name[state]]] <ast.AugAssign object at 0x7da18f813ee0> <ast.AugAssign object at 0x7da18f810310> variable[last] assign[=] name[state].string_position call[name[splitlist].append, parameter[call[name[string]][<ast.Slice object at 0x7da18f810d90>]]] return[name[splitlist]]
keyword[def] identifier[split] ( identifier[self] , identifier[string] , identifier[maxsplit] = literal[int] ): literal[string] identifier[splitlist] =[] identifier[state] = identifier[_State] ( identifier[string] , literal[int] , identifier[sys] . identifier[maxint] , identifier[self] . identifier[flags] ) identifier[n] = literal[int] identifier[last] = identifier[state] . identifier[start] keyword[while] keyword[not] identifier[maxsplit] keyword[or] identifier[n] < identifier[maxsplit] : identifier[state] . identifier[reset] () identifier[state] . identifier[string_position] = identifier[state] . identifier[start] keyword[if] keyword[not] identifier[state] . identifier[search] ( identifier[self] . identifier[_code] ): keyword[break] keyword[if] identifier[state] . identifier[start] == identifier[state] . identifier[string_position] : keyword[if] identifier[last] == identifier[state] . identifier[end] : keyword[break] identifier[state] . identifier[start] += literal[int] keyword[continue] identifier[splitlist] . identifier[append] ( identifier[string] [ identifier[last] : identifier[state] . identifier[start] ]) keyword[if] identifier[self] . identifier[groups] : identifier[match] = identifier[SRE_Match] ( identifier[self] , identifier[state] ) identifier[splitlist] +=( identifier[list] ( identifier[match] . identifier[groups] ( keyword[None] ))) identifier[n] += literal[int] identifier[last] = identifier[state] . identifier[start] = identifier[state] . identifier[string_position] identifier[splitlist] . identifier[append] ( identifier[string] [ identifier[last] : identifier[state] . identifier[end] ]) keyword[return] identifier[splitlist]
def split(self, string, maxsplit=0): """Split string by the occurrences of pattern.""" splitlist = [] state = _State(string, 0, sys.maxint, self.flags) n = 0 last = state.start while not maxsplit or n < maxsplit: state.reset() state.string_position = state.start if not state.search(self._code): break # depends on [control=['if'], data=[]] if state.start == state.string_position: # zero-width match if last == state.end: # or end of string break # depends on [control=['if'], data=[]] state.start += 1 continue # depends on [control=['if'], data=[]] splitlist.append(string[last:state.start]) # add groups (if any) if self.groups: match = SRE_Match(self, state) # TODO: Use .extend once it is implemented. # splitlist.extend(list(match.groups(None))) splitlist += list(match.groups(None)) # depends on [control=['if'], data=[]] n += 1 last = state.start = state.string_position # depends on [control=['while'], data=[]] splitlist.append(string[last:state.end]) return splitlist
def create_contact(self, *args, **kwargs): """Creates a contact""" url = 'contacts.json' contact_data = { 'active': True, 'helpdesk_agent': False, 'description': 'Freshdesk Contact' } contact_data.update(kwargs) payload = { 'user': contact_data } return Contact(**self._api._post(url, data=payload)['user'])
def function[create_contact, parameter[self]]: constant[Creates a contact] variable[url] assign[=] constant[contacts.json] variable[contact_data] assign[=] dictionary[[<ast.Constant object at 0x7da1b113bac0>, <ast.Constant object at 0x7da1b1138a00>, <ast.Constant object at 0x7da1b11385e0>], [<ast.Constant object at 0x7da1b1139450>, <ast.Constant object at 0x7da1b113b190>, <ast.Constant object at 0x7da1b113b430>]] call[name[contact_data].update, parameter[name[kwargs]]] variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da1b11396c0>], [<ast.Name object at 0x7da1b1139360>]] return[call[name[Contact], parameter[]]]
keyword[def] identifier[create_contact] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[url] = literal[string] identifier[contact_data] ={ literal[string] : keyword[True] , literal[string] : keyword[False] , literal[string] : literal[string] } identifier[contact_data] . identifier[update] ( identifier[kwargs] ) identifier[payload] ={ literal[string] : identifier[contact_data] } keyword[return] identifier[Contact] (** identifier[self] . identifier[_api] . identifier[_post] ( identifier[url] , identifier[data] = identifier[payload] )[ literal[string] ])
def create_contact(self, *args, **kwargs): """Creates a contact""" url = 'contacts.json' contact_data = {'active': True, 'helpdesk_agent': False, 'description': 'Freshdesk Contact'} contact_data.update(kwargs) payload = {'user': contact_data} return Contact(**self._api._post(url, data=payload)['user'])
def enbw(wnd): """ Equivalent Noise Bandwidth in bins (Processing Gain reciprocal). """ return sum(el ** 2 for el in wnd) / sum(wnd) ** 2 * len(wnd)
def function[enbw, parameter[wnd]]: constant[ Equivalent Noise Bandwidth in bins (Processing Gain reciprocal). ] return[binary_operation[binary_operation[call[name[sum], parameter[<ast.GeneratorExp object at 0x7da1b06d14b0>]] / binary_operation[call[name[sum], parameter[name[wnd]]] ** constant[2]]] * call[name[len], parameter[name[wnd]]]]]
keyword[def] identifier[enbw] ( identifier[wnd] ): literal[string] keyword[return] identifier[sum] ( identifier[el] ** literal[int] keyword[for] identifier[el] keyword[in] identifier[wnd] )/ identifier[sum] ( identifier[wnd] )** literal[int] * identifier[len] ( identifier[wnd] )
def enbw(wnd): """ Equivalent Noise Bandwidth in bins (Processing Gain reciprocal). """ return sum((el ** 2 for el in wnd)) / sum(wnd) ** 2 * len(wnd)
def cmd_nc(host, port, family, ssl_enable, crlf, source_ip, source_port, protocol): """Some kind of netcat/ncat replacement. The execution emulates the feeling of this popular tools. Example: \b $ habu.nc --crlf www.portantier.com 80 Connected to 45.77.113.133 80 HEAD / HTTP/1.0 \b HTTP/1.0 301 Moved Permanently Date: Thu, 26 Jul 2018 21:10:51 GMT Server: OpenBSD httpd Connection: close Content-Type: text/html Content-Length: 443 Location: https://www.portantier.com/ """ resolved = socket.getaddrinfo(host, port) families = { '4' : [ socket.AF_INET ], '6' : [ socket.AF_INET6 ], '46': [ socket.AF_INET, socket.AF_INET6] } address = None for r in resolved: if r[0] in families[family]: address = r # (<AddressFamily.AF_INET6: 10>, <SocketType.SOCK_STREAM: 1>, 6, '', ('2606:2800:220:1:248:1893:25c8:1946', 80, 0, 0)) if not address: print('Could not resolve {} to the ip address family selected ({})'.format(host, family), file=sys.stderr) sys.exit(1) to_send = b'' if not source_ip: source_ip = which_source_for(address[4][0]) if protocol == 'tcp': s = socket.socket(address[0], socket.SOCK_STREAM) else: s = socket.socket(address[0], socket.SOCK_DGRAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((source_ip, source_port)) if ssl_enable: ssl_context = ssl.SSLContext() s = ssl_context.wrap_socket(s, server_side=False) try: s.connect((address[4][0], port)) print('Connected to', address[4][0], port, file=sys.stderr) except Exception as e: print(e, file=sys.stderr) sys.exit(1) while True: iready, oready, eready = select.select([sys.stdin, s], [], [s]) for i in iready: if i == sys.stdin: if crlf: to_send += i.readline().replace('\n', '\r\n').encode() else: to_send += i.readline().encode() else: received = s.recv(4096) if not received: sys.exit(1) os.write(sys.stdout.fileno(), received) iready, oready, eready = select.select([], [s], [s]) for o in oready: if to_send: o.send(to_send) to_send = b'' s.close()
def function[cmd_nc, parameter[host, port, family, ssl_enable, crlf, source_ip, source_port, protocol]]: constant[Some kind of netcat/ncat replacement. The execution emulates the feeling of this popular tools. Example:  $ habu.nc --crlf www.portantier.com 80 Connected to 45.77.113.133 80 HEAD / HTTP/1.0  HTTP/1.0 301 Moved Permanently Date: Thu, 26 Jul 2018 21:10:51 GMT Server: OpenBSD httpd Connection: close Content-Type: text/html Content-Length: 443 Location: https://www.portantier.com/ ] variable[resolved] assign[=] call[name[socket].getaddrinfo, parameter[name[host], name[port]]] variable[families] assign[=] dictionary[[<ast.Constant object at 0x7da1b1d8c940>, <ast.Constant object at 0x7da1b1d8f100>, <ast.Constant object at 0x7da1b1d8f0d0>], [<ast.List object at 0x7da1b1d8f250>, <ast.List object at 0x7da1b1d8d570>, <ast.List object at 0x7da1b1d8e260>]] variable[address] assign[=] constant[None] for taget[name[r]] in starred[name[resolved]] begin[:] if compare[call[name[r]][constant[0]] in call[name[families]][name[family]]] begin[:] variable[address] assign[=] name[r] if <ast.UnaryOp object at 0x7da1b1d8de70> begin[:] call[name[print], parameter[call[constant[Could not resolve {} to the ip address family selected ({})].format, parameter[name[host], name[family]]]]] call[name[sys].exit, parameter[constant[1]]] variable[to_send] assign[=] constant[b''] if <ast.UnaryOp object at 0x7da1b1d8cb80> begin[:] variable[source_ip] assign[=] call[name[which_source_for], parameter[call[call[name[address]][constant[4]]][constant[0]]]] if compare[name[protocol] equal[==] constant[tcp]] begin[:] variable[s] assign[=] call[name[socket].socket, parameter[call[name[address]][constant[0]], name[socket].SOCK_STREAM]] call[name[s].setsockopt, parameter[name[socket].SOL_SOCKET, name[socket].SO_REUSEADDR, constant[1]]] call[name[s].bind, parameter[tuple[[<ast.Name object at 0x7da1b1d8f040>, <ast.Name object at 0x7da1b1d8d930>]]]] if name[ssl_enable] begin[:] variable[ssl_context] assign[=] call[name[ssl].SSLContext, parameter[]] variable[s] assign[=] call[name[ssl_context].wrap_socket, parameter[name[s]]] <ast.Try object at 0x7da1b22ad900> while constant[True] begin[:] <ast.Tuple object at 0x7da2044c0550> assign[=] call[name[select].select, parameter[list[[<ast.Attribute object at 0x7da1b1d8fb20>, <ast.Name object at 0x7da1b1d8cc70>]], list[[]], list[[<ast.Name object at 0x7da1b1d8f0a0>]]]] for taget[name[i]] in starred[name[iready]] begin[:] if compare[name[i] equal[==] name[sys].stdin] begin[:] if name[crlf] begin[:] <ast.AugAssign object at 0x7da1b1d8eb00> <ast.Tuple object at 0x7da1b1d8d2a0> assign[=] call[name[select].select, parameter[list[[]], list[[<ast.Name object at 0x7da1b1d8f940>]], list[[<ast.Name object at 0x7da1b1d8fd30>]]]] for taget[name[o]] in starred[name[oready]] begin[:] if name[to_send] begin[:] call[name[o].send, parameter[name[to_send]]] variable[to_send] assign[=] constant[b''] call[name[s].close, parameter[]]
keyword[def] identifier[cmd_nc] ( identifier[host] , identifier[port] , identifier[family] , identifier[ssl_enable] , identifier[crlf] , identifier[source_ip] , identifier[source_port] , identifier[protocol] ): literal[string] identifier[resolved] = identifier[socket] . identifier[getaddrinfo] ( identifier[host] , identifier[port] ) identifier[families] ={ literal[string] :[ identifier[socket] . identifier[AF_INET] ], literal[string] :[ identifier[socket] . identifier[AF_INET6] ], literal[string] :[ identifier[socket] . identifier[AF_INET] , identifier[socket] . identifier[AF_INET6] ] } identifier[address] = keyword[None] keyword[for] identifier[r] keyword[in] identifier[resolved] : keyword[if] identifier[r] [ literal[int] ] keyword[in] identifier[families] [ identifier[family] ]: identifier[address] = identifier[r] keyword[if] keyword[not] identifier[address] : identifier[print] ( literal[string] . identifier[format] ( identifier[host] , identifier[family] ), identifier[file] = identifier[sys] . identifier[stderr] ) identifier[sys] . identifier[exit] ( literal[int] ) identifier[to_send] = literal[string] keyword[if] keyword[not] identifier[source_ip] : identifier[source_ip] = identifier[which_source_for] ( identifier[address] [ literal[int] ][ literal[int] ]) keyword[if] identifier[protocol] == literal[string] : identifier[s] = identifier[socket] . identifier[socket] ( identifier[address] [ literal[int] ], identifier[socket] . identifier[SOCK_STREAM] ) keyword[else] : identifier[s] = identifier[socket] . identifier[socket] ( identifier[address] [ literal[int] ], identifier[socket] . identifier[SOCK_DGRAM] ) identifier[s] . identifier[setsockopt] ( identifier[socket] . identifier[SOL_SOCKET] , identifier[socket] . identifier[SO_REUSEADDR] , literal[int] ) identifier[s] . identifier[bind] (( identifier[source_ip] , identifier[source_port] )) keyword[if] identifier[ssl_enable] : identifier[ssl_context] = identifier[ssl] . identifier[SSLContext] () identifier[s] = identifier[ssl_context] . identifier[wrap_socket] ( identifier[s] , identifier[server_side] = keyword[False] ) keyword[try] : identifier[s] . identifier[connect] (( identifier[address] [ literal[int] ][ literal[int] ], identifier[port] )) identifier[print] ( literal[string] , identifier[address] [ literal[int] ][ literal[int] ], identifier[port] , identifier[file] = identifier[sys] . identifier[stderr] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[print] ( identifier[e] , identifier[file] = identifier[sys] . identifier[stderr] ) identifier[sys] . identifier[exit] ( literal[int] ) keyword[while] keyword[True] : identifier[iready] , identifier[oready] , identifier[eready] = identifier[select] . identifier[select] ([ identifier[sys] . identifier[stdin] , identifier[s] ],[],[ identifier[s] ]) keyword[for] identifier[i] keyword[in] identifier[iready] : keyword[if] identifier[i] == identifier[sys] . identifier[stdin] : keyword[if] identifier[crlf] : identifier[to_send] += identifier[i] . identifier[readline] (). identifier[replace] ( literal[string] , literal[string] ). identifier[encode] () keyword[else] : identifier[to_send] += identifier[i] . identifier[readline] (). identifier[encode] () keyword[else] : identifier[received] = identifier[s] . identifier[recv] ( literal[int] ) keyword[if] keyword[not] identifier[received] : identifier[sys] . identifier[exit] ( literal[int] ) identifier[os] . identifier[write] ( identifier[sys] . identifier[stdout] . identifier[fileno] (), identifier[received] ) identifier[iready] , identifier[oready] , identifier[eready] = identifier[select] . identifier[select] ([],[ identifier[s] ],[ identifier[s] ]) keyword[for] identifier[o] keyword[in] identifier[oready] : keyword[if] identifier[to_send] : identifier[o] . identifier[send] ( identifier[to_send] ) identifier[to_send] = literal[string] identifier[s] . identifier[close] ()
def cmd_nc(host, port, family, ssl_enable, crlf, source_ip, source_port, protocol): """Some kind of netcat/ncat replacement. The execution emulates the feeling of this popular tools. Example: \x08 $ habu.nc --crlf www.portantier.com 80 Connected to 45.77.113.133 80 HEAD / HTTP/1.0 \x08 HTTP/1.0 301 Moved Permanently Date: Thu, 26 Jul 2018 21:10:51 GMT Server: OpenBSD httpd Connection: close Content-Type: text/html Content-Length: 443 Location: https://www.portantier.com/ """ resolved = socket.getaddrinfo(host, port) families = {'4': [socket.AF_INET], '6': [socket.AF_INET6], '46': [socket.AF_INET, socket.AF_INET6]} address = None for r in resolved: if r[0] in families[family]: address = r # (<AddressFamily.AF_INET6: 10>, <SocketType.SOCK_STREAM: 1>, 6, '', ('2606:2800:220:1:248:1893:25c8:1946', 80, 0, 0)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['r']] if not address: print('Could not resolve {} to the ip address family selected ({})'.format(host, family), file=sys.stderr) sys.exit(1) # depends on [control=['if'], data=[]] to_send = b'' if not source_ip: source_ip = which_source_for(address[4][0]) # depends on [control=['if'], data=[]] if protocol == 'tcp': s = socket.socket(address[0], socket.SOCK_STREAM) # depends on [control=['if'], data=[]] else: s = socket.socket(address[0], socket.SOCK_DGRAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((source_ip, source_port)) if ssl_enable: ssl_context = ssl.SSLContext() s = ssl_context.wrap_socket(s, server_side=False) # depends on [control=['if'], data=[]] try: s.connect((address[4][0], port)) print('Connected to', address[4][0], port, file=sys.stderr) # depends on [control=['try'], data=[]] except Exception as e: print(e, file=sys.stderr) sys.exit(1) # depends on [control=['except'], data=['e']] while True: (iready, oready, eready) = select.select([sys.stdin, s], [], [s]) for i in iready: if i == sys.stdin: if crlf: to_send += i.readline().replace('\n', '\r\n').encode() # depends on [control=['if'], data=[]] else: to_send += i.readline().encode() # depends on [control=['if'], data=['i']] else: received = s.recv(4096) if not received: sys.exit(1) # depends on [control=['if'], data=[]] os.write(sys.stdout.fileno(), received) # depends on [control=['for'], data=['i']] (iready, oready, eready) = select.select([], [s], [s]) for o in oready: if to_send: o.send(to_send) to_send = b'' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['o']] # depends on [control=['while'], data=[]] s.close()
def labels(self, filters=None, params=None): """ Gets the security labels from a Indicator/Group/Victim Yields: A Security label """ if not self.can_update(): self._tcex.handle_error(910, [self.type]) for l in self.tc_requests.labels( self.api_type, self.api_sub_type, self.unique_id, owner=self.owner, filters=filters, params=params, ): yield l
def function[labels, parameter[self, filters, params]]: constant[ Gets the security labels from a Indicator/Group/Victim Yields: A Security label ] if <ast.UnaryOp object at 0x7da1b0ce0730> begin[:] call[name[self]._tcex.handle_error, parameter[constant[910], list[[<ast.Attribute object at 0x7da1b0ce38e0>]]]] for taget[name[l]] in starred[call[name[self].tc_requests.labels, parameter[name[self].api_type, name[self].api_sub_type, name[self].unique_id]]] begin[:] <ast.Yield object at 0x7da2044c29e0>
keyword[def] identifier[labels] ( identifier[self] , identifier[filters] = keyword[None] , identifier[params] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[can_update] (): identifier[self] . identifier[_tcex] . identifier[handle_error] ( literal[int] ,[ identifier[self] . identifier[type] ]) keyword[for] identifier[l] keyword[in] identifier[self] . identifier[tc_requests] . identifier[labels] ( identifier[self] . identifier[api_type] , identifier[self] . identifier[api_sub_type] , identifier[self] . identifier[unique_id] , identifier[owner] = identifier[self] . identifier[owner] , identifier[filters] = identifier[filters] , identifier[params] = identifier[params] , ): keyword[yield] identifier[l]
def labels(self, filters=None, params=None): """ Gets the security labels from a Indicator/Group/Victim Yields: A Security label """ if not self.can_update(): self._tcex.handle_error(910, [self.type]) # depends on [control=['if'], data=[]] for l in self.tc_requests.labels(self.api_type, self.api_sub_type, self.unique_id, owner=self.owner, filters=filters, params=params): yield l # depends on [control=['for'], data=['l']]
def reversebait(self, maskmiddle='f', k=19): """ Use the freshly-baited FASTQ files to bait out sequence from the original target files. This will reduce the number of possibly targets against which the baited reads must be aligned """ logging.info('Performing reverse kmer baiting of targets with FASTQ files') if self.kmer_size is None: kmer = k else: kmer = self.kmer_size with progressbar(self.runmetadata) as bar: for sample in bar: if sample.general.bestassemblyfile != 'NA' and sample[self.analysistype].runanalysis: outfile = os.path.join(sample[self.analysistype].outputdir, 'baitedtargets.fa') sample[self.analysistype].revbbdukcmd = \ 'bbduk.sh -Xmx{mem} ref={ref} in={in1} k={kmer} threads={cpus} mincovfraction={mcf} ' \ 'maskmiddle={mm} outm={outm}' \ .format(mem=self.mem, ref=sample[self.analysistype].baitedfastq, in1=sample[self.analysistype].baitfile, kmer=kmer, cpus=str(self.cpus), mcf=self.cutoff, mm=maskmiddle, outm=outfile) # Run the system call (if necessary) if not os.path.isfile(outfile): out, err = run_subprocess(sample[self.analysistype].revbbdukcmd) write_to_logfile(sample[self.analysistype].bbdukcmd, sample[self.analysistype].bbdukcmd, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr) # Set the baitfile to use in the mapping steps as the newly created outfile sample[self.analysistype].baitfile = outfile
def function[reversebait, parameter[self, maskmiddle, k]]: constant[ Use the freshly-baited FASTQ files to bait out sequence from the original target files. This will reduce the number of possibly targets against which the baited reads must be aligned ] call[name[logging].info, parameter[constant[Performing reverse kmer baiting of targets with FASTQ files]]] if compare[name[self].kmer_size is constant[None]] begin[:] variable[kmer] assign[=] name[k] with call[name[progressbar], parameter[name[self].runmetadata]] begin[:] for taget[name[sample]] in starred[name[bar]] begin[:] if <ast.BoolOp object at 0x7da1b1ff07c0> begin[:] variable[outfile] assign[=] call[name[os].path.join, parameter[call[name[sample]][name[self].analysistype].outputdir, constant[baitedtargets.fa]]] call[name[sample]][name[self].analysistype].revbbdukcmd assign[=] call[constant[bbduk.sh -Xmx{mem} ref={ref} in={in1} k={kmer} threads={cpus} mincovfraction={mcf} maskmiddle={mm} outm={outm}].format, parameter[]] if <ast.UnaryOp object at 0x7da1b1fbbd00> begin[:] <ast.Tuple object at 0x7da1b1fba740> assign[=] call[name[run_subprocess], parameter[call[name[sample]][name[self].analysistype].revbbdukcmd]] call[name[write_to_logfile], parameter[call[name[sample]][name[self].analysistype].bbdukcmd, call[name[sample]][name[self].analysistype].bbdukcmd, name[self].logfile, name[sample].general.logout, name[sample].general.logerr, call[name[sample]][name[self].analysistype].logout, call[name[sample]][name[self].analysistype].logerr]] call[name[write_to_logfile], parameter[name[out], name[err], name[self].logfile, name[sample].general.logout, name[sample].general.logerr, call[name[sample]][name[self].analysistype].logout, call[name[sample]][name[self].analysistype].logerr]] call[name[sample]][name[self].analysistype].baitfile assign[=] name[outfile]
keyword[def] identifier[reversebait] ( identifier[self] , identifier[maskmiddle] = literal[string] , identifier[k] = literal[int] ): literal[string] identifier[logging] . identifier[info] ( literal[string] ) keyword[if] identifier[self] . identifier[kmer_size] keyword[is] keyword[None] : identifier[kmer] = identifier[k] keyword[else] : identifier[kmer] = identifier[self] . identifier[kmer_size] keyword[with] identifier[progressbar] ( identifier[self] . identifier[runmetadata] ) keyword[as] identifier[bar] : keyword[for] identifier[sample] keyword[in] identifier[bar] : keyword[if] identifier[sample] . identifier[general] . identifier[bestassemblyfile] != literal[string] keyword[and] identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[runanalysis] : identifier[outfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[outputdir] , literal[string] ) identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[revbbdukcmd] = literal[string] literal[string] . identifier[format] ( identifier[mem] = identifier[self] . identifier[mem] , identifier[ref] = identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[baitedfastq] , identifier[in1] = identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[baitfile] , identifier[kmer] = identifier[kmer] , identifier[cpus] = identifier[str] ( identifier[self] . identifier[cpus] ), identifier[mcf] = identifier[self] . identifier[cutoff] , identifier[mm] = identifier[maskmiddle] , identifier[outm] = identifier[outfile] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[outfile] ): identifier[out] , identifier[err] = identifier[run_subprocess] ( identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[revbbdukcmd] ) identifier[write_to_logfile] ( identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[bbdukcmd] , identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[bbdukcmd] , identifier[self] . identifier[logfile] , identifier[sample] . identifier[general] . identifier[logout] , identifier[sample] . identifier[general] . identifier[logerr] , identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[logout] , identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[logerr] ) identifier[write_to_logfile] ( identifier[out] , identifier[err] , identifier[self] . identifier[logfile] , identifier[sample] . identifier[general] . identifier[logout] , identifier[sample] . identifier[general] . identifier[logerr] , identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[logout] , identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[logerr] ) identifier[sample] [ identifier[self] . identifier[analysistype] ]. identifier[baitfile] = identifier[outfile]
def reversebait(self, maskmiddle='f', k=19): """ Use the freshly-baited FASTQ files to bait out sequence from the original target files. This will reduce the number of possibly targets against which the baited reads must be aligned """ logging.info('Performing reverse kmer baiting of targets with FASTQ files') if self.kmer_size is None: kmer = k # depends on [control=['if'], data=[]] else: kmer = self.kmer_size with progressbar(self.runmetadata) as bar: for sample in bar: if sample.general.bestassemblyfile != 'NA' and sample[self.analysistype].runanalysis: outfile = os.path.join(sample[self.analysistype].outputdir, 'baitedtargets.fa') sample[self.analysistype].revbbdukcmd = 'bbduk.sh -Xmx{mem} ref={ref} in={in1} k={kmer} threads={cpus} mincovfraction={mcf} maskmiddle={mm} outm={outm}'.format(mem=self.mem, ref=sample[self.analysistype].baitedfastq, in1=sample[self.analysistype].baitfile, kmer=kmer, cpus=str(self.cpus), mcf=self.cutoff, mm=maskmiddle, outm=outfile) # Run the system call (if necessary) if not os.path.isfile(outfile): (out, err) = run_subprocess(sample[self.analysistype].revbbdukcmd) write_to_logfile(sample[self.analysistype].bbdukcmd, sample[self.analysistype].bbdukcmd, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr) # depends on [control=['if'], data=[]] # Set the baitfile to use in the mapping steps as the newly created outfile sample[self.analysistype].baitfile = outfile # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['sample']] # depends on [control=['with'], data=['bar']]
def addcomment(self, invoice_increment_id, comment=None, email=False, include_comment=False): """ Add comment to invoice or change its state :param invoice_increment_id: Invoice ID """ if comment is None: comment = "" return bool( self.call( 'sales_order_invoice.addComment', [invoice_increment_id, comment, email, include_comment] ) )
def function[addcomment, parameter[self, invoice_increment_id, comment, email, include_comment]]: constant[ Add comment to invoice or change its state :param invoice_increment_id: Invoice ID ] if compare[name[comment] is constant[None]] begin[:] variable[comment] assign[=] constant[] return[call[name[bool], parameter[call[name[self].call, parameter[constant[sales_order_invoice.addComment], list[[<ast.Name object at 0x7da1b04d0b50>, <ast.Name object at 0x7da1b04d3e80>, <ast.Name object at 0x7da1b04d2bf0>, <ast.Name object at 0x7da1b04d2b60>]]]]]]]
keyword[def] identifier[addcomment] ( identifier[self] , identifier[invoice_increment_id] , identifier[comment] = keyword[None] , identifier[email] = keyword[False] , identifier[include_comment] = keyword[False] ): literal[string] keyword[if] identifier[comment] keyword[is] keyword[None] : identifier[comment] = literal[string] keyword[return] identifier[bool] ( identifier[self] . identifier[call] ( literal[string] , [ identifier[invoice_increment_id] , identifier[comment] , identifier[email] , identifier[include_comment] ] ) )
def addcomment(self, invoice_increment_id, comment=None, email=False, include_comment=False): """ Add comment to invoice or change its state :param invoice_increment_id: Invoice ID """ if comment is None: comment = '' # depends on [control=['if'], data=['comment']] return bool(self.call('sales_order_invoice.addComment', [invoice_increment_id, comment, email, include_comment]))
def setup_cmd_parser(cls): """Returns the Git argument parser.""" parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES, from_date=True, to_date=True) # Optional arguments group = parser.parser.add_argument_group('Git arguments') group.add_argument('--branches', dest='branches', nargs='+', type=str, default=None, help="Fetch commits only from these branches") # Mutual exclusive parameters exgroup = group.add_mutually_exclusive_group() exgroup.add_argument('--git-path', dest='git_path', help="Path where the Git repository will be cloned") exgroup.add_argument('--git-log', dest='git_log', help="Path to the Git log file") exgroup_fetch = group.add_mutually_exclusive_group() exgroup_fetch.add_argument('--latest-items', dest='latest_items', action='store_true', help="Fetch latest commits added to the repository") exgroup_fetch.add_argument('--no-update', dest='no_update', action='store_true', help="Fetch all commits without updating the repository") # Required arguments parser.parser.add_argument('uri', help="URI of the Git log repository") return parser
def function[setup_cmd_parser, parameter[cls]]: constant[Returns the Git argument parser.] variable[parser] assign[=] call[name[BackendCommandArgumentParser], parameter[name[cls].BACKEND.CATEGORIES]] variable[group] assign[=] call[name[parser].parser.add_argument_group, parameter[constant[Git arguments]]] call[name[group].add_argument, parameter[constant[--branches]]] variable[exgroup] assign[=] call[name[group].add_mutually_exclusive_group, parameter[]] call[name[exgroup].add_argument, parameter[constant[--git-path]]] call[name[exgroup].add_argument, parameter[constant[--git-log]]] variable[exgroup_fetch] assign[=] call[name[group].add_mutually_exclusive_group, parameter[]] call[name[exgroup_fetch].add_argument, parameter[constant[--latest-items]]] call[name[exgroup_fetch].add_argument, parameter[constant[--no-update]]] call[name[parser].parser.add_argument, parameter[constant[uri]]] return[name[parser]]
keyword[def] identifier[setup_cmd_parser] ( identifier[cls] ): literal[string] identifier[parser] = identifier[BackendCommandArgumentParser] ( identifier[cls] . identifier[BACKEND] . identifier[CATEGORIES] , identifier[from_date] = keyword[True] , identifier[to_date] = keyword[True] ) identifier[group] = identifier[parser] . identifier[parser] . identifier[add_argument_group] ( literal[string] ) identifier[group] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] , identifier[nargs] = literal[string] , identifier[type] = identifier[str] , identifier[default] = keyword[None] , identifier[help] = literal[string] ) identifier[exgroup] = identifier[group] . identifier[add_mutually_exclusive_group] () identifier[exgroup] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] , identifier[help] = literal[string] ) identifier[exgroup] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] , identifier[help] = literal[string] ) identifier[exgroup_fetch] = identifier[group] . identifier[add_mutually_exclusive_group] () identifier[exgroup_fetch] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] , identifier[action] = literal[string] , identifier[help] = literal[string] ) identifier[exgroup_fetch] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] , identifier[action] = literal[string] , identifier[help] = literal[string] ) identifier[parser] . identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ) keyword[return] identifier[parser]
def setup_cmd_parser(cls): """Returns the Git argument parser.""" parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES, from_date=True, to_date=True) # Optional arguments group = parser.parser.add_argument_group('Git arguments') group.add_argument('--branches', dest='branches', nargs='+', type=str, default=None, help='Fetch commits only from these branches') # Mutual exclusive parameters exgroup = group.add_mutually_exclusive_group() exgroup.add_argument('--git-path', dest='git_path', help='Path where the Git repository will be cloned') exgroup.add_argument('--git-log', dest='git_log', help='Path to the Git log file') exgroup_fetch = group.add_mutually_exclusive_group() exgroup_fetch.add_argument('--latest-items', dest='latest_items', action='store_true', help='Fetch latest commits added to the repository') exgroup_fetch.add_argument('--no-update', dest='no_update', action='store_true', help='Fetch all commits without updating the repository') # Required arguments parser.parser.add_argument('uri', help='URI of the Git log repository') return parser
def strings_to_list_string(strings): '''Takes a list of strings presumably containing words and phrases, and returns a "list" form of those strings, like: >>> strings_to_list_string(('cats', 'dogs')) >>> 'cats and dogs' or >>> strings_to_list_string(('pizza', 'pop', 'chips')) >>> 'pizza, pop, and chips' Raises ValueError if strings is empty. ''' if isinstance(strings, six.string_types): raise TypeError('strings must be an iterable of strings, not a string ' 'itself') if len(strings) == 0: raise ValueError('strings may not be empty') elif len(strings) == 1: return strings[0] elif len(strings) == 2: return ' and '.join(strings) else: return '{0}, and {1}'.format(', '.join(strings[:-1]), strings[-1])
def function[strings_to_list_string, parameter[strings]]: constant[Takes a list of strings presumably containing words and phrases, and returns a "list" form of those strings, like: >>> strings_to_list_string(('cats', 'dogs')) >>> 'cats and dogs' or >>> strings_to_list_string(('pizza', 'pop', 'chips')) >>> 'pizza, pop, and chips' Raises ValueError if strings is empty. ] if call[name[isinstance], parameter[name[strings], name[six].string_types]] begin[:] <ast.Raise object at 0x7da20c7c9030> if compare[call[name[len], parameter[name[strings]]] equal[==] constant[0]] begin[:] <ast.Raise object at 0x7da20c7c90c0>
keyword[def] identifier[strings_to_list_string] ( identifier[strings] ): literal[string] keyword[if] identifier[isinstance] ( identifier[strings] , identifier[six] . identifier[string_types] ): keyword[raise] identifier[TypeError] ( literal[string] literal[string] ) keyword[if] identifier[len] ( identifier[strings] )== literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[elif] identifier[len] ( identifier[strings] )== literal[int] : keyword[return] identifier[strings] [ literal[int] ] keyword[elif] identifier[len] ( identifier[strings] )== literal[int] : keyword[return] literal[string] . identifier[join] ( identifier[strings] ) keyword[else] : keyword[return] literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[strings] [:- literal[int] ]), identifier[strings] [- literal[int] ])
def strings_to_list_string(strings): """Takes a list of strings presumably containing words and phrases, and returns a "list" form of those strings, like: >>> strings_to_list_string(('cats', 'dogs')) >>> 'cats and dogs' or >>> strings_to_list_string(('pizza', 'pop', 'chips')) >>> 'pizza, pop, and chips' Raises ValueError if strings is empty. """ if isinstance(strings, six.string_types): raise TypeError('strings must be an iterable of strings, not a string itself') # depends on [control=['if'], data=[]] if len(strings) == 0: raise ValueError('strings may not be empty') # depends on [control=['if'], data=[]] elif len(strings) == 1: return strings[0] # depends on [control=['if'], data=[]] elif len(strings) == 2: return ' and '.join(strings) # depends on [control=['if'], data=[]] else: return '{0}, and {1}'.format(', '.join(strings[:-1]), strings[-1])
async def get_mailbox(self, name: str, selected: SelectedMailbox = None) \ -> Tuple[MailboxInterface, Optional[SelectedMailbox]]: """Retrieves a :class:`~pymap.interfaces.mailbox.MailboxInterface` object corresponding to an existing mailbox owned by the user. Raises an exception if the mailbox does not yet exist. Args: name: The name of the mailbox. selected: If applicable, the currently selected mailbox name. Raises: :class:`~pymap.exceptions.MailboxNotFound` """ ...
<ast.AsyncFunctionDef object at 0x7da20e955600>
keyword[async] keyword[def] identifier[get_mailbox] ( identifier[self] , identifier[name] : identifier[str] , identifier[selected] : identifier[SelectedMailbox] = keyword[None] )-> identifier[Tuple] [ identifier[MailboxInterface] , identifier[Optional] [ identifier[SelectedMailbox] ]]: literal[string] ...
async def get_mailbox(self, name: str, selected: SelectedMailbox=None) -> Tuple[MailboxInterface, Optional[SelectedMailbox]]: """Retrieves a :class:`~pymap.interfaces.mailbox.MailboxInterface` object corresponding to an existing mailbox owned by the user. Raises an exception if the mailbox does not yet exist. Args: name: The name of the mailbox. selected: If applicable, the currently selected mailbox name. Raises: :class:`~pymap.exceptions.MailboxNotFound` """ ...
def preprocess_worksheet(self, table, worksheet): ''' Performs a preprocess pass of the table to attempt naive conversions of data and to record the initial types of each cell. ''' table_conversion = [] flags = {} units = {} for rind, row in enumerate(table): conversion_row = [] table_conversion.append(conversion_row) if self.skippable_rows and worksheet in self.skippable_rows and rind in self.skippable_rows[worksheet]: self.flag_change(flags, 'interpreted', (rind, None), worksheet, self.FLAGS['skipped-row']) continue for cind, cell in enumerate(row): position = (rind, cind) if self.skippable_columns and worksheet in self.skippable_columns and cind in self.skippable_columns[worksheet]: conversion = None self.flag_change(flags, 'interpreted', position, worksheet, self.FLAGS['skipped-column']) else: # Do the heavy lifting in pre_process_cell conversion = auto_convert_cell(self, cell, position, worksheet, flags, units, parens_as_neg=self.parens_as_neg) conversion_row.append(conversion) # Give back our conversions, type labeling, and conversion flags return table_conversion, flags, units
def function[preprocess_worksheet, parameter[self, table, worksheet]]: constant[ Performs a preprocess pass of the table to attempt naive conversions of data and to record the initial types of each cell. ] variable[table_conversion] assign[=] list[[]] variable[flags] assign[=] dictionary[[], []] variable[units] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da1b14a3160>, <ast.Name object at 0x7da1b14a2650>]]] in starred[call[name[enumerate], parameter[name[table]]]] begin[:] variable[conversion_row] assign[=] list[[]] call[name[table_conversion].append, parameter[name[conversion_row]]] if <ast.BoolOp object at 0x7da1b14a0d60> begin[:] call[name[self].flag_change, parameter[name[flags], constant[interpreted], tuple[[<ast.Name object at 0x7da1b14a2d10>, <ast.Constant object at 0x7da1b14a3ca0>]], name[worksheet], call[name[self].FLAGS][constant[skipped-row]]]] continue for taget[tuple[[<ast.Name object at 0x7da1b26addb0>, <ast.Name object at 0x7da1b26ace80>]]] in starred[call[name[enumerate], parameter[name[row]]]] begin[:] variable[position] assign[=] tuple[[<ast.Name object at 0x7da1b26ac070>, <ast.Name object at 0x7da1b26afe50>]] if <ast.BoolOp object at 0x7da1b26af820> begin[:] variable[conversion] assign[=] constant[None] call[name[self].flag_change, parameter[name[flags], constant[interpreted], name[position], name[worksheet], call[name[self].FLAGS][constant[skipped-column]]]] call[name[conversion_row].append, parameter[name[conversion]]] return[tuple[[<ast.Name object at 0x7da1b14a24a0>, <ast.Name object at 0x7da1b14a2c50>, <ast.Name object at 0x7da1b14a0b50>]]]
keyword[def] identifier[preprocess_worksheet] ( identifier[self] , identifier[table] , identifier[worksheet] ): literal[string] identifier[table_conversion] =[] identifier[flags] ={} identifier[units] ={} keyword[for] identifier[rind] , identifier[row] keyword[in] identifier[enumerate] ( identifier[table] ): identifier[conversion_row] =[] identifier[table_conversion] . identifier[append] ( identifier[conversion_row] ) keyword[if] identifier[self] . identifier[skippable_rows] keyword[and] identifier[worksheet] keyword[in] identifier[self] . identifier[skippable_rows] keyword[and] identifier[rind] keyword[in] identifier[self] . identifier[skippable_rows] [ identifier[worksheet] ]: identifier[self] . identifier[flag_change] ( identifier[flags] , literal[string] ,( identifier[rind] , keyword[None] ), identifier[worksheet] , identifier[self] . identifier[FLAGS] [ literal[string] ]) keyword[continue] keyword[for] identifier[cind] , identifier[cell] keyword[in] identifier[enumerate] ( identifier[row] ): identifier[position] =( identifier[rind] , identifier[cind] ) keyword[if] identifier[self] . identifier[skippable_columns] keyword[and] identifier[worksheet] keyword[in] identifier[self] . identifier[skippable_columns] keyword[and] identifier[cind] keyword[in] identifier[self] . identifier[skippable_columns] [ identifier[worksheet] ]: identifier[conversion] = keyword[None] identifier[self] . identifier[flag_change] ( identifier[flags] , literal[string] , identifier[position] , identifier[worksheet] , identifier[self] . identifier[FLAGS] [ literal[string] ]) keyword[else] : identifier[conversion] = identifier[auto_convert_cell] ( identifier[self] , identifier[cell] , identifier[position] , identifier[worksheet] , identifier[flags] , identifier[units] , identifier[parens_as_neg] = identifier[self] . identifier[parens_as_neg] ) identifier[conversion_row] . identifier[append] ( identifier[conversion] ) keyword[return] identifier[table_conversion] , identifier[flags] , identifier[units]
def preprocess_worksheet(self, table, worksheet): """ Performs a preprocess pass of the table to attempt naive conversions of data and to record the initial types of each cell. """ table_conversion = [] flags = {} units = {} for (rind, row) in enumerate(table): conversion_row = [] table_conversion.append(conversion_row) if self.skippable_rows and worksheet in self.skippable_rows and (rind in self.skippable_rows[worksheet]): self.flag_change(flags, 'interpreted', (rind, None), worksheet, self.FLAGS['skipped-row']) continue # depends on [control=['if'], data=[]] for (cind, cell) in enumerate(row): position = (rind, cind) if self.skippable_columns and worksheet in self.skippable_columns and (cind in self.skippable_columns[worksheet]): conversion = None self.flag_change(flags, 'interpreted', position, worksheet, self.FLAGS['skipped-column']) # depends on [control=['if'], data=[]] else: # Do the heavy lifting in pre_process_cell conversion = auto_convert_cell(self, cell, position, worksheet, flags, units, parens_as_neg=self.parens_as_neg) conversion_row.append(conversion) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] # Give back our conversions, type labeling, and conversion flags return (table_conversion, flags, units)
def clear(self): """ A version of clear that also affects scalars/sections Also clears comments and configspec. Leaves other attributes alone : depth/main/parent are not affected """ dict.clear(self) self.scalars = [] self.sections = [] self.comments = {} self.inline_comments = {} self.configspec = None self.defaults = [] self.extra_values = []
def function[clear, parameter[self]]: constant[ A version of clear that also affects scalars/sections Also clears comments and configspec. Leaves other attributes alone : depth/main/parent are not affected ] call[name[dict].clear, parameter[name[self]]] name[self].scalars assign[=] list[[]] name[self].sections assign[=] list[[]] name[self].comments assign[=] dictionary[[], []] name[self].inline_comments assign[=] dictionary[[], []] name[self].configspec assign[=] constant[None] name[self].defaults assign[=] list[[]] name[self].extra_values assign[=] list[[]]
keyword[def] identifier[clear] ( identifier[self] ): literal[string] identifier[dict] . identifier[clear] ( identifier[self] ) identifier[self] . identifier[scalars] =[] identifier[self] . identifier[sections] =[] identifier[self] . identifier[comments] ={} identifier[self] . identifier[inline_comments] ={} identifier[self] . identifier[configspec] = keyword[None] identifier[self] . identifier[defaults] =[] identifier[self] . identifier[extra_values] =[]
def clear(self): """ A version of clear that also affects scalars/sections Also clears comments and configspec. Leaves other attributes alone : depth/main/parent are not affected """ dict.clear(self) self.scalars = [] self.sections = [] self.comments = {} self.inline_comments = {} self.configspec = None self.defaults = [] self.extra_values = []
def compute_dkl(fsamps, prior_fsamps, **kwargs): """ Compute the Kullback Leibler divergence for function samples for posterior and prior pre-calculated at a range of x values. Parameters ---------- fsamps: 2D numpy.array Posterior function samples, as computed by :func:`fgivenx.compute_samples` prior_fsamps: 2D numpy.array Prior function samples, as computed by :func:`fgivenx.compute_samples` parallel, tqdm_kwargs: optional see docstring for :func:`fgivenx.parallel.parallel_apply`. cache: str, optional File root for saving previous calculations for re-use. Returns ------- 1D numpy.array: Kullback-Leibler divergences at each value of x. `shape=(len(fsamps))` """ parallel = kwargs.pop('parallel', False) cache = kwargs.pop('cache', '') tqdm_kwargs = kwargs.pop('tqdm_kwargs', {}) if kwargs: raise TypeError('Unexpected **kwargs: %r' % kwargs) if cache: cache = Cache(cache + '_dkl') try: return cache.check(fsamps, prior_fsamps) except CacheException as e: print(e) zip_fsamps = list(zip(fsamps, prior_fsamps)) dkls = parallel_apply(DKL, zip_fsamps, parallel=parallel, tqdm_kwargs=tqdm_kwargs) dkls = numpy.array(dkls) if cache: cache.save(fsamps, prior_fsamps, dkls) return dkls
def function[compute_dkl, parameter[fsamps, prior_fsamps]]: constant[ Compute the Kullback Leibler divergence for function samples for posterior and prior pre-calculated at a range of x values. Parameters ---------- fsamps: 2D numpy.array Posterior function samples, as computed by :func:`fgivenx.compute_samples` prior_fsamps: 2D numpy.array Prior function samples, as computed by :func:`fgivenx.compute_samples` parallel, tqdm_kwargs: optional see docstring for :func:`fgivenx.parallel.parallel_apply`. cache: str, optional File root for saving previous calculations for re-use. Returns ------- 1D numpy.array: Kullback-Leibler divergences at each value of x. `shape=(len(fsamps))` ] variable[parallel] assign[=] call[name[kwargs].pop, parameter[constant[parallel], constant[False]]] variable[cache] assign[=] call[name[kwargs].pop, parameter[constant[cache], constant[]]] variable[tqdm_kwargs] assign[=] call[name[kwargs].pop, parameter[constant[tqdm_kwargs], dictionary[[], []]]] if name[kwargs] begin[:] <ast.Raise object at 0x7da1b0926bc0> if name[cache] begin[:] variable[cache] assign[=] call[name[Cache], parameter[binary_operation[name[cache] + constant[_dkl]]]] <ast.Try object at 0x7da1b0926e30> variable[zip_fsamps] assign[=] call[name[list], parameter[call[name[zip], parameter[name[fsamps], name[prior_fsamps]]]]] variable[dkls] assign[=] call[name[parallel_apply], parameter[name[DKL], name[zip_fsamps]]] variable[dkls] assign[=] call[name[numpy].array, parameter[name[dkls]]] if name[cache] begin[:] call[name[cache].save, parameter[name[fsamps], name[prior_fsamps], name[dkls]]] return[name[dkls]]
keyword[def] identifier[compute_dkl] ( identifier[fsamps] , identifier[prior_fsamps] ,** identifier[kwargs] ): literal[string] identifier[parallel] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] ) identifier[cache] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] ) identifier[tqdm_kwargs] = identifier[kwargs] . identifier[pop] ( literal[string] ,{}) keyword[if] identifier[kwargs] : keyword[raise] identifier[TypeError] ( literal[string] % identifier[kwargs] ) keyword[if] identifier[cache] : identifier[cache] = identifier[Cache] ( identifier[cache] + literal[string] ) keyword[try] : keyword[return] identifier[cache] . identifier[check] ( identifier[fsamps] , identifier[prior_fsamps] ) keyword[except] identifier[CacheException] keyword[as] identifier[e] : identifier[print] ( identifier[e] ) identifier[zip_fsamps] = identifier[list] ( identifier[zip] ( identifier[fsamps] , identifier[prior_fsamps] )) identifier[dkls] = identifier[parallel_apply] ( identifier[DKL] , identifier[zip_fsamps] , identifier[parallel] = identifier[parallel] , identifier[tqdm_kwargs] = identifier[tqdm_kwargs] ) identifier[dkls] = identifier[numpy] . identifier[array] ( identifier[dkls] ) keyword[if] identifier[cache] : identifier[cache] . identifier[save] ( identifier[fsamps] , identifier[prior_fsamps] , identifier[dkls] ) keyword[return] identifier[dkls]
def compute_dkl(fsamps, prior_fsamps, **kwargs): """ Compute the Kullback Leibler divergence for function samples for posterior and prior pre-calculated at a range of x values. Parameters ---------- fsamps: 2D numpy.array Posterior function samples, as computed by :func:`fgivenx.compute_samples` prior_fsamps: 2D numpy.array Prior function samples, as computed by :func:`fgivenx.compute_samples` parallel, tqdm_kwargs: optional see docstring for :func:`fgivenx.parallel.parallel_apply`. cache: str, optional File root for saving previous calculations for re-use. Returns ------- 1D numpy.array: Kullback-Leibler divergences at each value of x. `shape=(len(fsamps))` """ parallel = kwargs.pop('parallel', False) cache = kwargs.pop('cache', '') tqdm_kwargs = kwargs.pop('tqdm_kwargs', {}) if kwargs: raise TypeError('Unexpected **kwargs: %r' % kwargs) # depends on [control=['if'], data=[]] if cache: cache = Cache(cache + '_dkl') try: return cache.check(fsamps, prior_fsamps) # depends on [control=['try'], data=[]] except CacheException as e: print(e) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] zip_fsamps = list(zip(fsamps, prior_fsamps)) dkls = parallel_apply(DKL, zip_fsamps, parallel=parallel, tqdm_kwargs=tqdm_kwargs) dkls = numpy.array(dkls) if cache: cache.save(fsamps, prior_fsamps, dkls) # depends on [control=['if'], data=[]] return dkls
def harvest(lancet, config_section): """Construct a new Harvest client.""" url, username, password = lancet.get_credentials( config_section, credentials_checker ) project_id_getter = lancet.get_instance_from_config( "timer", "project_id_getter", lancet ) task_id_getter = lancet.get_instance_from_config( "timer", "task_id_getter", lancet ) client = HarvestPlatform( server=url, basic_auth=(username, password), project_id_getter=project_id_getter, task_id_getter=task_id_getter, ) lancet.call_on_close(client.close) return client
def function[harvest, parameter[lancet, config_section]]: constant[Construct a new Harvest client.] <ast.Tuple object at 0x7da1b11c0eb0> assign[=] call[name[lancet].get_credentials, parameter[name[config_section], name[credentials_checker]]] variable[project_id_getter] assign[=] call[name[lancet].get_instance_from_config, parameter[constant[timer], constant[project_id_getter], name[lancet]]] variable[task_id_getter] assign[=] call[name[lancet].get_instance_from_config, parameter[constant[timer], constant[task_id_getter], name[lancet]]] variable[client] assign[=] call[name[HarvestPlatform], parameter[]] call[name[lancet].call_on_close, parameter[name[client].close]] return[name[client]]
keyword[def] identifier[harvest] ( identifier[lancet] , identifier[config_section] ): literal[string] identifier[url] , identifier[username] , identifier[password] = identifier[lancet] . identifier[get_credentials] ( identifier[config_section] , identifier[credentials_checker] ) identifier[project_id_getter] = identifier[lancet] . identifier[get_instance_from_config] ( literal[string] , literal[string] , identifier[lancet] ) identifier[task_id_getter] = identifier[lancet] . identifier[get_instance_from_config] ( literal[string] , literal[string] , identifier[lancet] ) identifier[client] = identifier[HarvestPlatform] ( identifier[server] = identifier[url] , identifier[basic_auth] =( identifier[username] , identifier[password] ), identifier[project_id_getter] = identifier[project_id_getter] , identifier[task_id_getter] = identifier[task_id_getter] , ) identifier[lancet] . identifier[call_on_close] ( identifier[client] . identifier[close] ) keyword[return] identifier[client]
def harvest(lancet, config_section): """Construct a new Harvest client.""" (url, username, password) = lancet.get_credentials(config_section, credentials_checker) project_id_getter = lancet.get_instance_from_config('timer', 'project_id_getter', lancet) task_id_getter = lancet.get_instance_from_config('timer', 'task_id_getter', lancet) client = HarvestPlatform(server=url, basic_auth=(username, password), project_id_getter=project_id_getter, task_id_getter=task_id_getter) lancet.call_on_close(client.close) return client
def set_data(self, adjacency_mat=None, **kwargs): """Set the data Parameters ---------- adjacency_mat : ndarray | None The adjacency matrix. **kwargs : dict Keyword arguments to pass to the arrows. """ if adjacency_mat is not None: if adjacency_mat.shape[0] != adjacency_mat.shape[1]: raise ValueError("Adjacency matrix should be square.") self._adjacency_mat = adjacency_mat for k in self._arrow_attributes: if k in kwargs: translated = (self._arrow_kw_trans[k] if k in self._arrow_kw_trans else k) setattr(self._edges, translated, kwargs.pop(k)) arrow_kwargs = {} for k in self._arrow_kwargs: if k in kwargs: translated = (self._arrow_kw_trans[k] if k in self._arrow_kw_trans else k) arrow_kwargs[translated] = kwargs.pop(k) node_kwargs = {} for k in self._node_kwargs: if k in kwargs: translated = (self._node_kw_trans[k] if k in self._node_kw_trans else k) node_kwargs[translated] = kwargs.pop(k) if len(kwargs) > 0: raise TypeError("%s.set_data() got invalid keyword arguments: %S" % (self.__class__.__name__, list(kwargs.keys()))) # The actual data is set in GraphVisual.animate_layout or # GraphVisual.set_final_layout self._arrow_data = arrow_kwargs self._node_data = node_kwargs if not self._animate: self.set_final_layout()
def function[set_data, parameter[self, adjacency_mat]]: constant[Set the data Parameters ---------- adjacency_mat : ndarray | None The adjacency matrix. **kwargs : dict Keyword arguments to pass to the arrows. ] if compare[name[adjacency_mat] is_not constant[None]] begin[:] if compare[call[name[adjacency_mat].shape][constant[0]] not_equal[!=] call[name[adjacency_mat].shape][constant[1]]] begin[:] <ast.Raise object at 0x7da1b0ff90c0> name[self]._adjacency_mat assign[=] name[adjacency_mat] for taget[name[k]] in starred[name[self]._arrow_attributes] begin[:] if compare[name[k] in name[kwargs]] begin[:] variable[translated] assign[=] <ast.IfExp object at 0x7da1b0ff8c10> call[name[setattr], parameter[name[self]._edges, name[translated], call[name[kwargs].pop, parameter[name[k]]]]] variable[arrow_kwargs] assign[=] dictionary[[], []] for taget[name[k]] in starred[name[self]._arrow_kwargs] begin[:] if compare[name[k] in name[kwargs]] begin[:] variable[translated] assign[=] <ast.IfExp object at 0x7da1b0ffb7c0> call[name[arrow_kwargs]][name[translated]] assign[=] call[name[kwargs].pop, parameter[name[k]]] variable[node_kwargs] assign[=] dictionary[[], []] for taget[name[k]] in starred[name[self]._node_kwargs] begin[:] if compare[name[k] in name[kwargs]] begin[:] variable[translated] assign[=] <ast.IfExp object at 0x7da1b0ff8f10> call[name[node_kwargs]][name[translated]] assign[=] call[name[kwargs].pop, parameter[name[k]]] if compare[call[name[len], parameter[name[kwargs]]] greater[>] constant[0]] begin[:] <ast.Raise object at 0x7da1b0ea0610> name[self]._arrow_data assign[=] name[arrow_kwargs] name[self]._node_data assign[=] name[node_kwargs] if <ast.UnaryOp object at 0x7da1b0ea0220> begin[:] call[name[self].set_final_layout, parameter[]]
keyword[def] identifier[set_data] ( identifier[self] , identifier[adjacency_mat] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[adjacency_mat] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[adjacency_mat] . identifier[shape] [ literal[int] ]!= identifier[adjacency_mat] . identifier[shape] [ literal[int] ]: keyword[raise] identifier[ValueError] ( literal[string] ) identifier[self] . identifier[_adjacency_mat] = identifier[adjacency_mat] keyword[for] identifier[k] keyword[in] identifier[self] . identifier[_arrow_attributes] : keyword[if] identifier[k] keyword[in] identifier[kwargs] : identifier[translated] =( identifier[self] . identifier[_arrow_kw_trans] [ identifier[k] ] keyword[if] identifier[k] keyword[in] identifier[self] . identifier[_arrow_kw_trans] keyword[else] identifier[k] ) identifier[setattr] ( identifier[self] . identifier[_edges] , identifier[translated] , identifier[kwargs] . identifier[pop] ( identifier[k] )) identifier[arrow_kwargs] ={} keyword[for] identifier[k] keyword[in] identifier[self] . identifier[_arrow_kwargs] : keyword[if] identifier[k] keyword[in] identifier[kwargs] : identifier[translated] =( identifier[self] . identifier[_arrow_kw_trans] [ identifier[k] ] keyword[if] identifier[k] keyword[in] identifier[self] . identifier[_arrow_kw_trans] keyword[else] identifier[k] ) identifier[arrow_kwargs] [ identifier[translated] ]= identifier[kwargs] . identifier[pop] ( identifier[k] ) identifier[node_kwargs] ={} keyword[for] identifier[k] keyword[in] identifier[self] . identifier[_node_kwargs] : keyword[if] identifier[k] keyword[in] identifier[kwargs] : identifier[translated] =( identifier[self] . identifier[_node_kw_trans] [ identifier[k] ] keyword[if] identifier[k] keyword[in] identifier[self] . identifier[_node_kw_trans] keyword[else] identifier[k] ) identifier[node_kwargs] [ identifier[translated] ]= identifier[kwargs] . identifier[pop] ( identifier[k] ) keyword[if] identifier[len] ( identifier[kwargs] )> literal[int] : keyword[raise] identifier[TypeError] ( literal[string] %( identifier[self] . identifier[__class__] . identifier[__name__] , identifier[list] ( identifier[kwargs] . identifier[keys] ()))) identifier[self] . identifier[_arrow_data] = identifier[arrow_kwargs] identifier[self] . identifier[_node_data] = identifier[node_kwargs] keyword[if] keyword[not] identifier[self] . identifier[_animate] : identifier[self] . identifier[set_final_layout] ()
def set_data(self, adjacency_mat=None, **kwargs): """Set the data Parameters ---------- adjacency_mat : ndarray | None The adjacency matrix. **kwargs : dict Keyword arguments to pass to the arrows. """ if adjacency_mat is not None: if adjacency_mat.shape[0] != adjacency_mat.shape[1]: raise ValueError('Adjacency matrix should be square.') # depends on [control=['if'], data=[]] self._adjacency_mat = adjacency_mat # depends on [control=['if'], data=['adjacency_mat']] for k in self._arrow_attributes: if k in kwargs: translated = self._arrow_kw_trans[k] if k in self._arrow_kw_trans else k setattr(self._edges, translated, kwargs.pop(k)) # depends on [control=['if'], data=['k', 'kwargs']] # depends on [control=['for'], data=['k']] arrow_kwargs = {} for k in self._arrow_kwargs: if k in kwargs: translated = self._arrow_kw_trans[k] if k in self._arrow_kw_trans else k arrow_kwargs[translated] = kwargs.pop(k) # depends on [control=['if'], data=['k', 'kwargs']] # depends on [control=['for'], data=['k']] node_kwargs = {} for k in self._node_kwargs: if k in kwargs: translated = self._node_kw_trans[k] if k in self._node_kw_trans else k node_kwargs[translated] = kwargs.pop(k) # depends on [control=['if'], data=['k', 'kwargs']] # depends on [control=['for'], data=['k']] if len(kwargs) > 0: raise TypeError('%s.set_data() got invalid keyword arguments: %S' % (self.__class__.__name__, list(kwargs.keys()))) # depends on [control=['if'], data=[]] # The actual data is set in GraphVisual.animate_layout or # GraphVisual.set_final_layout self._arrow_data = arrow_kwargs self._node_data = node_kwargs if not self._animate: self.set_final_layout() # depends on [control=['if'], data=[]]
def fmultiprocess( log, function, inputArray, poolSize=False, timeout=3600, **kwargs): """multiprocess pool **Key Arguments:** - ``log`` -- logger - ``function`` -- the function to multiprocess - ``inputArray`` -- the array to be iterated over - ``poolSize`` -- limit the number of CPU that are used in multiprocess job - ``timeout`` -- time in sec after which to raise a timeout error if the processes have not completed **Return:** - ``resultArray`` -- the array of results **Usage:** .. code-block:: python from fundamentals import multiprocess # DEFINE AN INPUT ARRAY inputArray = range(10000) results = multiprocess(log=log, function=functionName, poolSize=10, timeout=300, inputArray=inputArray, otherFunctionKeyword="cheese") """ log.debug('starting the ``multiprocess`` function') # DEFINTE POOL SIZE - NUMBER OF CPU CORES TO USE (BEST = ALL - 1) if not poolSize: poolSize = psutil.cpu_count() if poolSize: p = Pool(processes=poolSize) else: p = Pool() cpuCount = psutil.cpu_count() chunksize = int((len(inputArray) + 1) / (cpuCount * 3)) if chunksize == 0: chunksize = 1 # MAP-REDUCE THE WORK OVER MULTIPLE CPU CORES if "log" in inspect.getargspec(function)[0]: mapfunc = partial(function, log=log, **kwargs) resultArray = p.map_async(mapfunc, inputArray, chunksize=chunksize) else: mapfunc = partial(function, **kwargs) resultArray = p.map_async(mapfunc, inputArray, chunksize=chunksize) resultArray = resultArray.get(timeout=timeout) p.close() p.terminate() log.debug('completed the ``multiprocess`` function') return resultArray
def function[fmultiprocess, parameter[log, function, inputArray, poolSize, timeout]]: constant[multiprocess pool **Key Arguments:** - ``log`` -- logger - ``function`` -- the function to multiprocess - ``inputArray`` -- the array to be iterated over - ``poolSize`` -- limit the number of CPU that are used in multiprocess job - ``timeout`` -- time in sec after which to raise a timeout error if the processes have not completed **Return:** - ``resultArray`` -- the array of results **Usage:** .. code-block:: python from fundamentals import multiprocess # DEFINE AN INPUT ARRAY inputArray = range(10000) results = multiprocess(log=log, function=functionName, poolSize=10, timeout=300, inputArray=inputArray, otherFunctionKeyword="cheese") ] call[name[log].debug, parameter[constant[starting the ``multiprocess`` function]]] if <ast.UnaryOp object at 0x7da20c6a9f90> begin[:] variable[poolSize] assign[=] call[name[psutil].cpu_count, parameter[]] if name[poolSize] begin[:] variable[p] assign[=] call[name[Pool], parameter[]] variable[cpuCount] assign[=] call[name[psutil].cpu_count, parameter[]] variable[chunksize] assign[=] call[name[int], parameter[binary_operation[binary_operation[call[name[len], parameter[name[inputArray]]] + constant[1]] / binary_operation[name[cpuCount] * constant[3]]]]] if compare[name[chunksize] equal[==] constant[0]] begin[:] variable[chunksize] assign[=] constant[1] if compare[constant[log] in call[call[name[inspect].getargspec, parameter[name[function]]]][constant[0]]] begin[:] variable[mapfunc] assign[=] call[name[partial], parameter[name[function]]] variable[resultArray] assign[=] call[name[p].map_async, parameter[name[mapfunc], name[inputArray]]] variable[resultArray] assign[=] call[name[resultArray].get, parameter[]] call[name[p].close, parameter[]] call[name[p].terminate, parameter[]] call[name[log].debug, parameter[constant[completed the ``multiprocess`` function]]] return[name[resultArray]]
keyword[def] identifier[fmultiprocess] ( identifier[log] , identifier[function] , identifier[inputArray] , identifier[poolSize] = keyword[False] , identifier[timeout] = literal[int] , ** identifier[kwargs] ): literal[string] identifier[log] . identifier[debug] ( literal[string] ) keyword[if] keyword[not] identifier[poolSize] : identifier[poolSize] = identifier[psutil] . identifier[cpu_count] () keyword[if] identifier[poolSize] : identifier[p] = identifier[Pool] ( identifier[processes] = identifier[poolSize] ) keyword[else] : identifier[p] = identifier[Pool] () identifier[cpuCount] = identifier[psutil] . identifier[cpu_count] () identifier[chunksize] = identifier[int] (( identifier[len] ( identifier[inputArray] )+ literal[int] )/( identifier[cpuCount] * literal[int] )) keyword[if] identifier[chunksize] == literal[int] : identifier[chunksize] = literal[int] keyword[if] literal[string] keyword[in] identifier[inspect] . identifier[getargspec] ( identifier[function] )[ literal[int] ]: identifier[mapfunc] = identifier[partial] ( identifier[function] , identifier[log] = identifier[log] ,** identifier[kwargs] ) identifier[resultArray] = identifier[p] . identifier[map_async] ( identifier[mapfunc] , identifier[inputArray] , identifier[chunksize] = identifier[chunksize] ) keyword[else] : identifier[mapfunc] = identifier[partial] ( identifier[function] ,** identifier[kwargs] ) identifier[resultArray] = identifier[p] . identifier[map_async] ( identifier[mapfunc] , identifier[inputArray] , identifier[chunksize] = identifier[chunksize] ) identifier[resultArray] = identifier[resultArray] . identifier[get] ( identifier[timeout] = identifier[timeout] ) identifier[p] . identifier[close] () identifier[p] . identifier[terminate] () identifier[log] . identifier[debug] ( literal[string] ) keyword[return] identifier[resultArray]
def fmultiprocess(log, function, inputArray, poolSize=False, timeout=3600, **kwargs): """multiprocess pool **Key Arguments:** - ``log`` -- logger - ``function`` -- the function to multiprocess - ``inputArray`` -- the array to be iterated over - ``poolSize`` -- limit the number of CPU that are used in multiprocess job - ``timeout`` -- time in sec after which to raise a timeout error if the processes have not completed **Return:** - ``resultArray`` -- the array of results **Usage:** .. code-block:: python from fundamentals import multiprocess # DEFINE AN INPUT ARRAY inputArray = range(10000) results = multiprocess(log=log, function=functionName, poolSize=10, timeout=300, inputArray=inputArray, otherFunctionKeyword="cheese") """ log.debug('starting the ``multiprocess`` function') # DEFINTE POOL SIZE - NUMBER OF CPU CORES TO USE (BEST = ALL - 1) if not poolSize: poolSize = psutil.cpu_count() # depends on [control=['if'], data=[]] if poolSize: p = Pool(processes=poolSize) # depends on [control=['if'], data=[]] else: p = Pool() cpuCount = psutil.cpu_count() chunksize = int((len(inputArray) + 1) / (cpuCount * 3)) if chunksize == 0: chunksize = 1 # depends on [control=['if'], data=['chunksize']] # MAP-REDUCE THE WORK OVER MULTIPLE CPU CORES if 'log' in inspect.getargspec(function)[0]: mapfunc = partial(function, log=log, **kwargs) resultArray = p.map_async(mapfunc, inputArray, chunksize=chunksize) # depends on [control=['if'], data=[]] else: mapfunc = partial(function, **kwargs) resultArray = p.map_async(mapfunc, inputArray, chunksize=chunksize) resultArray = resultArray.get(timeout=timeout) p.close() p.terminate() log.debug('completed the ``multiprocess`` function') return resultArray
def wildcard_allowed_principals(self, pattern=None): """ Find statements which allow wildcard principals. A pattern can be specified for the wildcard principal """ wildcard_allowed = [] for statement in self.statements: if statement.wildcard_principals(pattern) and statement.effect == "Allow": wildcard_allowed.append(statement) return wildcard_allowed
def function[wildcard_allowed_principals, parameter[self, pattern]]: constant[ Find statements which allow wildcard principals. A pattern can be specified for the wildcard principal ] variable[wildcard_allowed] assign[=] list[[]] for taget[name[statement]] in starred[name[self].statements] begin[:] if <ast.BoolOp object at 0x7da1b0716890> begin[:] call[name[wildcard_allowed].append, parameter[name[statement]]] return[name[wildcard_allowed]]
keyword[def] identifier[wildcard_allowed_principals] ( identifier[self] , identifier[pattern] = keyword[None] ): literal[string] identifier[wildcard_allowed] =[] keyword[for] identifier[statement] keyword[in] identifier[self] . identifier[statements] : keyword[if] identifier[statement] . identifier[wildcard_principals] ( identifier[pattern] ) keyword[and] identifier[statement] . identifier[effect] == literal[string] : identifier[wildcard_allowed] . identifier[append] ( identifier[statement] ) keyword[return] identifier[wildcard_allowed]
def wildcard_allowed_principals(self, pattern=None): """ Find statements which allow wildcard principals. A pattern can be specified for the wildcard principal """ wildcard_allowed = [] for statement in self.statements: if statement.wildcard_principals(pattern) and statement.effect == 'Allow': wildcard_allowed.append(statement) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['statement']] return wildcard_allowed
def create_frameshift_effect( mutated_codon_index, sequence_from_mutated_codon, variant, transcript): """ Determine frameshift effect within a coding sequence (possibly affecting either the start or stop codons, or anythign in between) Parameters ---------- mutated_codon_index : int Codon offset (starting from 0 = start codon) of first non-reference amino acid in the variant protein sequence_from_mutated_codon: Bio.Seq Sequence of mutated cDNA, starting from first mutated codon, until the end of the transcript variant : Variant transcript : transcript """ assert transcript.protein_sequence is not None, \ "Expect transcript %s to have protein sequence" % transcript original_protein_sequence = transcript.protein_sequence original_protein_length = len(original_protein_sequence) mutant_protein_suffix = translate( nucleotide_sequence=sequence_from_mutated_codon, first_codon_is_start=False, to_stop=True, truncate=True) if mutated_codon_index == 0: # TODO: scan through sequence_from_mutated_codon for # Kozak sequence + start codon to choose the new start return StartLoss(variant=variant, transcript=transcript) # the frameshifted sequence may contain some amino acids which are # the same as the original protein! _, mutant_protein_suffix, unchanged_amino_acids = trim_shared_prefix( ref=original_protein_sequence[mutated_codon_index:], alt=mutant_protein_suffix) n_unchanged_amino_acids = len(unchanged_amino_acids) offset_to_first_different_amino_acid = mutated_codon_index + n_unchanged_amino_acids # miraculously, this frameshift left the protein unchanged, # most likely by turning one stop codon into another stop codon if n_unchanged_amino_acids == 0: aa_ref = "" else: aa_ref = original_protein_sequence[-n_unchanged_amino_acids:] if offset_to_first_different_amino_acid >= original_protein_length: # frameshift is either extending the protein or leaving it unchanged if len(mutant_protein_suffix) == 0: return Silent( variant=variant, transcript=transcript, aa_pos=mutated_codon_index, aa_ref=aa_ref) else: # When all the amino acids are the same as the original, we either # have the original protein or we've extended it. # If we've extended it, it means we must have lost our stop codon. return StopLoss( variant=variant, transcript=transcript, aa_ref=aa_ref, aa_alt=mutant_protein_suffix) # original amino acid at the mutated codon before the frameshift occurred aa_ref = original_protein_sequence[offset_to_first_different_amino_acid] # TODO: what if all the shifted amino acids were the same and the protein # ended up the same length? Add a Silent case? if len(mutant_protein_suffix) == 0: # if a frameshift doesn't create any new amino acids, then # it must immediately have hit a stop codon return FrameShiftTruncation( variant=variant, transcript=transcript, stop_codon_offset=offset_to_first_different_amino_acid) return FrameShift( variant=variant, transcript=transcript, aa_mutation_start_offset=offset_to_first_different_amino_acid, shifted_sequence=str(mutant_protein_suffix))
def function[create_frameshift_effect, parameter[mutated_codon_index, sequence_from_mutated_codon, variant, transcript]]: constant[ Determine frameshift effect within a coding sequence (possibly affecting either the start or stop codons, or anythign in between) Parameters ---------- mutated_codon_index : int Codon offset (starting from 0 = start codon) of first non-reference amino acid in the variant protein sequence_from_mutated_codon: Bio.Seq Sequence of mutated cDNA, starting from first mutated codon, until the end of the transcript variant : Variant transcript : transcript ] assert[compare[name[transcript].protein_sequence is_not constant[None]]] variable[original_protein_sequence] assign[=] name[transcript].protein_sequence variable[original_protein_length] assign[=] call[name[len], parameter[name[original_protein_sequence]]] variable[mutant_protein_suffix] assign[=] call[name[translate], parameter[]] if compare[name[mutated_codon_index] equal[==] constant[0]] begin[:] return[call[name[StartLoss], parameter[]]] <ast.Tuple object at 0x7da1b0467e80> assign[=] call[name[trim_shared_prefix], parameter[]] variable[n_unchanged_amino_acids] assign[=] call[name[len], parameter[name[unchanged_amino_acids]]] variable[offset_to_first_different_amino_acid] assign[=] binary_operation[name[mutated_codon_index] + name[n_unchanged_amino_acids]] if compare[name[n_unchanged_amino_acids] equal[==] constant[0]] begin[:] variable[aa_ref] assign[=] constant[] if compare[name[offset_to_first_different_amino_acid] greater_or_equal[>=] name[original_protein_length]] begin[:] if compare[call[name[len], parameter[name[mutant_protein_suffix]]] equal[==] constant[0]] begin[:] return[call[name[Silent], parameter[]]] variable[aa_ref] assign[=] call[name[original_protein_sequence]][name[offset_to_first_different_amino_acid]] if compare[call[name[len], parameter[name[mutant_protein_suffix]]] equal[==] constant[0]] begin[:] return[call[name[FrameShiftTruncation], parameter[]]] return[call[name[FrameShift], parameter[]]]
keyword[def] identifier[create_frameshift_effect] ( identifier[mutated_codon_index] , identifier[sequence_from_mutated_codon] , identifier[variant] , identifier[transcript] ): literal[string] keyword[assert] identifier[transcript] . identifier[protein_sequence] keyword[is] keyword[not] keyword[None] , literal[string] % identifier[transcript] identifier[original_protein_sequence] = identifier[transcript] . identifier[protein_sequence] identifier[original_protein_length] = identifier[len] ( identifier[original_protein_sequence] ) identifier[mutant_protein_suffix] = identifier[translate] ( identifier[nucleotide_sequence] = identifier[sequence_from_mutated_codon] , identifier[first_codon_is_start] = keyword[False] , identifier[to_stop] = keyword[True] , identifier[truncate] = keyword[True] ) keyword[if] identifier[mutated_codon_index] == literal[int] : keyword[return] identifier[StartLoss] ( identifier[variant] = identifier[variant] , identifier[transcript] = identifier[transcript] ) identifier[_] , identifier[mutant_protein_suffix] , identifier[unchanged_amino_acids] = identifier[trim_shared_prefix] ( identifier[ref] = identifier[original_protein_sequence] [ identifier[mutated_codon_index] :], identifier[alt] = identifier[mutant_protein_suffix] ) identifier[n_unchanged_amino_acids] = identifier[len] ( identifier[unchanged_amino_acids] ) identifier[offset_to_first_different_amino_acid] = identifier[mutated_codon_index] + identifier[n_unchanged_amino_acids] keyword[if] identifier[n_unchanged_amino_acids] == literal[int] : identifier[aa_ref] = literal[string] keyword[else] : identifier[aa_ref] = identifier[original_protein_sequence] [- identifier[n_unchanged_amino_acids] :] keyword[if] identifier[offset_to_first_different_amino_acid] >= identifier[original_protein_length] : keyword[if] identifier[len] ( identifier[mutant_protein_suffix] )== literal[int] : keyword[return] identifier[Silent] ( identifier[variant] = identifier[variant] , identifier[transcript] = identifier[transcript] , identifier[aa_pos] = identifier[mutated_codon_index] , identifier[aa_ref] = identifier[aa_ref] ) keyword[else] : keyword[return] identifier[StopLoss] ( identifier[variant] = identifier[variant] , identifier[transcript] = identifier[transcript] , identifier[aa_ref] = identifier[aa_ref] , identifier[aa_alt] = identifier[mutant_protein_suffix] ) identifier[aa_ref] = identifier[original_protein_sequence] [ identifier[offset_to_first_different_amino_acid] ] keyword[if] identifier[len] ( identifier[mutant_protein_suffix] )== literal[int] : keyword[return] identifier[FrameShiftTruncation] ( identifier[variant] = identifier[variant] , identifier[transcript] = identifier[transcript] , identifier[stop_codon_offset] = identifier[offset_to_first_different_amino_acid] ) keyword[return] identifier[FrameShift] ( identifier[variant] = identifier[variant] , identifier[transcript] = identifier[transcript] , identifier[aa_mutation_start_offset] = identifier[offset_to_first_different_amino_acid] , identifier[shifted_sequence] = identifier[str] ( identifier[mutant_protein_suffix] ))
def create_frameshift_effect(mutated_codon_index, sequence_from_mutated_codon, variant, transcript): """ Determine frameshift effect within a coding sequence (possibly affecting either the start or stop codons, or anythign in between) Parameters ---------- mutated_codon_index : int Codon offset (starting from 0 = start codon) of first non-reference amino acid in the variant protein sequence_from_mutated_codon: Bio.Seq Sequence of mutated cDNA, starting from first mutated codon, until the end of the transcript variant : Variant transcript : transcript """ assert transcript.protein_sequence is not None, 'Expect transcript %s to have protein sequence' % transcript original_protein_sequence = transcript.protein_sequence original_protein_length = len(original_protein_sequence) mutant_protein_suffix = translate(nucleotide_sequence=sequence_from_mutated_codon, first_codon_is_start=False, to_stop=True, truncate=True) if mutated_codon_index == 0: # TODO: scan through sequence_from_mutated_codon for # Kozak sequence + start codon to choose the new start return StartLoss(variant=variant, transcript=transcript) # depends on [control=['if'], data=[]] # the frameshifted sequence may contain some amino acids which are # the same as the original protein! (_, mutant_protein_suffix, unchanged_amino_acids) = trim_shared_prefix(ref=original_protein_sequence[mutated_codon_index:], alt=mutant_protein_suffix) n_unchanged_amino_acids = len(unchanged_amino_acids) offset_to_first_different_amino_acid = mutated_codon_index + n_unchanged_amino_acids # miraculously, this frameshift left the protein unchanged, # most likely by turning one stop codon into another stop codon if n_unchanged_amino_acids == 0: aa_ref = '' # depends on [control=['if'], data=[]] else: aa_ref = original_protein_sequence[-n_unchanged_amino_acids:] if offset_to_first_different_amino_acid >= original_protein_length: # frameshift is either extending the protein or leaving it unchanged if len(mutant_protein_suffix) == 0: return Silent(variant=variant, transcript=transcript, aa_pos=mutated_codon_index, aa_ref=aa_ref) # depends on [control=['if'], data=[]] else: # When all the amino acids are the same as the original, we either # have the original protein or we've extended it. # If we've extended it, it means we must have lost our stop codon. return StopLoss(variant=variant, transcript=transcript, aa_ref=aa_ref, aa_alt=mutant_protein_suffix) # depends on [control=['if'], data=[]] # original amino acid at the mutated codon before the frameshift occurred aa_ref = original_protein_sequence[offset_to_first_different_amino_acid] # TODO: what if all the shifted amino acids were the same and the protein # ended up the same length? Add a Silent case? if len(mutant_protein_suffix) == 0: # if a frameshift doesn't create any new amino acids, then # it must immediately have hit a stop codon return FrameShiftTruncation(variant=variant, transcript=transcript, stop_codon_offset=offset_to_first_different_amino_acid) # depends on [control=['if'], data=[]] return FrameShift(variant=variant, transcript=transcript, aa_mutation_start_offset=offset_to_first_different_amino_acid, shifted_sequence=str(mutant_protein_suffix))
def create(): """ Handles post from the "Add room" form on the homepage, and redirects to the new room. """ name = request.form.get("name") if name: room, created = get_or_create(ChatRoom, name=name) return redirect(url_for('room', slug=room.slug)) return redirect(url_for('rooms'))
def function[create, parameter[]]: constant[ Handles post from the "Add room" form on the homepage, and redirects to the new room. ] variable[name] assign[=] call[name[request].form.get, parameter[constant[name]]] if name[name] begin[:] <ast.Tuple object at 0x7da18c4ce3b0> assign[=] call[name[get_or_create], parameter[name[ChatRoom]]] return[call[name[redirect], parameter[call[name[url_for], parameter[constant[room]]]]]] return[call[name[redirect], parameter[call[name[url_for], parameter[constant[rooms]]]]]]
keyword[def] identifier[create] (): literal[string] identifier[name] = identifier[request] . identifier[form] . identifier[get] ( literal[string] ) keyword[if] identifier[name] : identifier[room] , identifier[created] = identifier[get_or_create] ( identifier[ChatRoom] , identifier[name] = identifier[name] ) keyword[return] identifier[redirect] ( identifier[url_for] ( literal[string] , identifier[slug] = identifier[room] . identifier[slug] )) keyword[return] identifier[redirect] ( identifier[url_for] ( literal[string] ))
def create(): """ Handles post from the "Add room" form on the homepage, and redirects to the new room. """ name = request.form.get('name') if name: (room, created) = get_or_create(ChatRoom, name=name) return redirect(url_for('room', slug=room.slug)) # depends on [control=['if'], data=[]] return redirect(url_for('rooms'))
def get_assessment_taken_lookup_session(self): """Gets the ``OsidSession`` associated with the assessment taken lookup service. return: (osid.assessment.AssessmentTakenLookupSession) - an ``AssessmentTakenLookupSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_taken_lookup()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_taken_lookup()`` is ``true``.* """ if not self.supports_assessment_taken_lookup(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.AssessmentTakenLookupSession(runtime=self._runtime)
def function[get_assessment_taken_lookup_session, parameter[self]]: constant[Gets the ``OsidSession`` associated with the assessment taken lookup service. return: (osid.assessment.AssessmentTakenLookupSession) - an ``AssessmentTakenLookupSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_taken_lookup()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_taken_lookup()`` is ``true``.* ] if <ast.UnaryOp object at 0x7da20c7c9ff0> begin[:] <ast.Raise object at 0x7da20c7cb580> return[call[name[sessions].AssessmentTakenLookupSession, parameter[]]]
keyword[def] identifier[get_assessment_taken_lookup_session] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[supports_assessment_taken_lookup] (): keyword[raise] identifier[errors] . identifier[Unimplemented] () keyword[return] identifier[sessions] . identifier[AssessmentTakenLookupSession] ( identifier[runtime] = identifier[self] . identifier[_runtime] )
def get_assessment_taken_lookup_session(self): """Gets the ``OsidSession`` associated with the assessment taken lookup service. return: (osid.assessment.AssessmentTakenLookupSession) - an ``AssessmentTakenLookupSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_taken_lookup()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_taken_lookup()`` is ``true``.* """ if not self.supports_assessment_taken_lookup(): raise errors.Unimplemented() # depends on [control=['if'], data=[]] # pylint: disable=no-member return sessions.AssessmentTakenLookupSession(runtime=self._runtime)
def low(self, fun, low): ''' Pass the cloud function and low data structure to run ''' l_fun = getattr(self, fun) f_call = salt.utils.args.format_call(l_fun, low) return l_fun(*f_call.get('args', ()), **f_call.get('kwargs', {}))
def function[low, parameter[self, fun, low]]: constant[ Pass the cloud function and low data structure to run ] variable[l_fun] assign[=] call[name[getattr], parameter[name[self], name[fun]]] variable[f_call] assign[=] call[name[salt].utils.args.format_call, parameter[name[l_fun], name[low]]] return[call[name[l_fun], parameter[<ast.Starred object at 0x7da1b208ec50>]]]
keyword[def] identifier[low] ( identifier[self] , identifier[fun] , identifier[low] ): literal[string] identifier[l_fun] = identifier[getattr] ( identifier[self] , identifier[fun] ) identifier[f_call] = identifier[salt] . identifier[utils] . identifier[args] . identifier[format_call] ( identifier[l_fun] , identifier[low] ) keyword[return] identifier[l_fun] (* identifier[f_call] . identifier[get] ( literal[string] ,()),** identifier[f_call] . identifier[get] ( literal[string] ,{}))
def low(self, fun, low): """ Pass the cloud function and low data structure to run """ l_fun = getattr(self, fun) f_call = salt.utils.args.format_call(l_fun, low) return l_fun(*f_call.get('args', ()), **f_call.get('kwargs', {}))
def set_topic_config(self, topic, value, kafka_version=(0, 10, )): """Set configuration information for specified topic. :topic : topic whose configuration needs to be changed :value : config value with which the topic needs to be updated with. This would be of the form key=value. Example 'cleanup.policy=compact' :kafka_version :tuple kafka version the brokers are running on. Defaults to (0, 10, x). Kafka version 9 and kafka 10 support this feature. """ config_data = dump_json(value) try: # Change value return_value = self.set( "/config/topics/{topic}".format(topic=topic), config_data ) # Create change version = kafka_version[1] # this feature is supported in kafka 9 and kafka 10 assert version in (9, 10), "Feature supported with kafka 9 and kafka 10" if version == 9: # https://github.com/apache/kafka/blob/0.9.0.1/ # core/src/main/scala/kafka/admin/AdminUtils.scala#L334 change_node = dump_json({ "version": 1, "entity_type": "topics", "entity_name": topic }) else: # kafka 10 # https://github.com/apache/kafka/blob/0.10.2.1/ # core/src/main/scala/kafka/admin/AdminUtils.scala#L574 change_node = dump_json({ "version": 2, "entity_path": "topics/" + topic, }) self.create( '/config/changes/config_change_', change_node, sequence=True ) except NoNodeError as e: _log.error( "topic {topic} not found.".format(topic=topic) ) raise e return return_value
def function[set_topic_config, parameter[self, topic, value, kafka_version]]: constant[Set configuration information for specified topic. :topic : topic whose configuration needs to be changed :value : config value with which the topic needs to be updated with. This would be of the form key=value. Example 'cleanup.policy=compact' :kafka_version :tuple kafka version the brokers are running on. Defaults to (0, 10, x). Kafka version 9 and kafka 10 support this feature. ] variable[config_data] assign[=] call[name[dump_json], parameter[name[value]]] <ast.Try object at 0x7da1b0789ff0> return[name[return_value]]
keyword[def] identifier[set_topic_config] ( identifier[self] , identifier[topic] , identifier[value] , identifier[kafka_version] =( literal[int] , literal[int] ,)): literal[string] identifier[config_data] = identifier[dump_json] ( identifier[value] ) keyword[try] : identifier[return_value] = identifier[self] . identifier[set] ( literal[string] . identifier[format] ( identifier[topic] = identifier[topic] ), identifier[config_data] ) identifier[version] = identifier[kafka_version] [ literal[int] ] keyword[assert] identifier[version] keyword[in] ( literal[int] , literal[int] ), literal[string] keyword[if] identifier[version] == literal[int] : identifier[change_node] = identifier[dump_json] ({ literal[string] : literal[int] , literal[string] : literal[string] , literal[string] : identifier[topic] }) keyword[else] : identifier[change_node] = identifier[dump_json] ({ literal[string] : literal[int] , literal[string] : literal[string] + identifier[topic] , }) identifier[self] . identifier[create] ( literal[string] , identifier[change_node] , identifier[sequence] = keyword[True] ) keyword[except] identifier[NoNodeError] keyword[as] identifier[e] : identifier[_log] . identifier[error] ( literal[string] . identifier[format] ( identifier[topic] = identifier[topic] ) ) keyword[raise] identifier[e] keyword[return] identifier[return_value]
def set_topic_config(self, topic, value, kafka_version=(0, 10)): """Set configuration information for specified topic. :topic : topic whose configuration needs to be changed :value : config value with which the topic needs to be updated with. This would be of the form key=value. Example 'cleanup.policy=compact' :kafka_version :tuple kafka version the brokers are running on. Defaults to (0, 10, x). Kafka version 9 and kafka 10 support this feature. """ config_data = dump_json(value) try: # Change value return_value = self.set('/config/topics/{topic}'.format(topic=topic), config_data) # Create change version = kafka_version[1] # this feature is supported in kafka 9 and kafka 10 assert version in (9, 10), 'Feature supported with kafka 9 and kafka 10' if version == 9: # https://github.com/apache/kafka/blob/0.9.0.1/ # core/src/main/scala/kafka/admin/AdminUtils.scala#L334 change_node = dump_json({'version': 1, 'entity_type': 'topics', 'entity_name': topic}) # depends on [control=['if'], data=[]] else: # kafka 10 # https://github.com/apache/kafka/blob/0.10.2.1/ # core/src/main/scala/kafka/admin/AdminUtils.scala#L574 change_node = dump_json({'version': 2, 'entity_path': 'topics/' + topic}) self.create('/config/changes/config_change_', change_node, sequence=True) # depends on [control=['try'], data=[]] except NoNodeError as e: _log.error('topic {topic} not found.'.format(topic=topic)) raise e # depends on [control=['except'], data=['e']] return return_value
def pretrain_procedure(self, layer_objs, layer_graphs, set_params_func, train_set, validation_set=None): """Perform unsupervised pretraining of the model. :param layer_objs: list of model objects (autoencoders or rbms) :param layer_graphs: list of model tf.Graph objects :param set_params_func: function used to set the parameters after pretraining :param train_set: training set :param validation_set: validation set :return: return data encoded by the last layer """ next_train = train_set next_valid = validation_set for l, layer_obj in enumerate(layer_objs): print('Training layer {}...'.format(l + 1)) next_train, next_valid = self._pretrain_layer_and_gen_feed( layer_obj, set_params_func, next_train, next_valid, layer_graphs[l]) return next_train, next_valid
def function[pretrain_procedure, parameter[self, layer_objs, layer_graphs, set_params_func, train_set, validation_set]]: constant[Perform unsupervised pretraining of the model. :param layer_objs: list of model objects (autoencoders or rbms) :param layer_graphs: list of model tf.Graph objects :param set_params_func: function used to set the parameters after pretraining :param train_set: training set :param validation_set: validation set :return: return data encoded by the last layer ] variable[next_train] assign[=] name[train_set] variable[next_valid] assign[=] name[validation_set] for taget[tuple[[<ast.Name object at 0x7da1b26afc40>, <ast.Name object at 0x7da1b26aee60>]]] in starred[call[name[enumerate], parameter[name[layer_objs]]]] begin[:] call[name[print], parameter[call[constant[Training layer {}...].format, parameter[binary_operation[name[l] + constant[1]]]]]] <ast.Tuple object at 0x7da1b26af760> assign[=] call[name[self]._pretrain_layer_and_gen_feed, parameter[name[layer_obj], name[set_params_func], name[next_train], name[next_valid], call[name[layer_graphs]][name[l]]]] return[tuple[[<ast.Name object at 0x7da20c992170>, <ast.Name object at 0x7da20c9922f0>]]]
keyword[def] identifier[pretrain_procedure] ( identifier[self] , identifier[layer_objs] , identifier[layer_graphs] , identifier[set_params_func] , identifier[train_set] , identifier[validation_set] = keyword[None] ): literal[string] identifier[next_train] = identifier[train_set] identifier[next_valid] = identifier[validation_set] keyword[for] identifier[l] , identifier[layer_obj] keyword[in] identifier[enumerate] ( identifier[layer_objs] ): identifier[print] ( literal[string] . identifier[format] ( identifier[l] + literal[int] )) identifier[next_train] , identifier[next_valid] = identifier[self] . identifier[_pretrain_layer_and_gen_feed] ( identifier[layer_obj] , identifier[set_params_func] , identifier[next_train] , identifier[next_valid] , identifier[layer_graphs] [ identifier[l] ]) keyword[return] identifier[next_train] , identifier[next_valid]
def pretrain_procedure(self, layer_objs, layer_graphs, set_params_func, train_set, validation_set=None): """Perform unsupervised pretraining of the model. :param layer_objs: list of model objects (autoencoders or rbms) :param layer_graphs: list of model tf.Graph objects :param set_params_func: function used to set the parameters after pretraining :param train_set: training set :param validation_set: validation set :return: return data encoded by the last layer """ next_train = train_set next_valid = validation_set for (l, layer_obj) in enumerate(layer_objs): print('Training layer {}...'.format(l + 1)) (next_train, next_valid) = self._pretrain_layer_and_gen_feed(layer_obj, set_params_func, next_train, next_valid, layer_graphs[l]) # depends on [control=['for'], data=[]] return (next_train, next_valid)
def _style_to_basic_html_attributes(self, element, style_content, force=False): """given an element and styles like 'background-color:red; font-family:Arial' turn some of that into HTML attributes. like 'bgcolor', etc. Note, the style_content can contain pseudoclasses like: '{color:red; border:1px solid green} :visited{border:1px solid green}' """ if style_content.count("}") and style_content.count("{") == style_content.count( "}" ): style_content = style_content.split("}")[0][1:] attributes = OrderedDict() for key, value in [ x.split(":") for x in style_content.split(";") if len(x.split(":")) == 2 ]: key = key.strip() if key == "text-align": attributes["align"] = value.strip() elif key == "vertical-align": attributes["valign"] = value.strip() elif key == "background-color" and "transparent" not in value.lower(): # Only add the 'bgcolor' attribute if the value does not # contain the word "transparent"; before we add it possibly # correct the 3-digit color code to its 6-digit equivalent # ("abc" to "aabbcc") so IBM Notes copes. attributes["bgcolor"] = self.six_color(value.strip()) elif key == "width" or key == "height": value = value.strip() if value.endswith("px"): value = value[:-2] attributes[key] = value for key, value in attributes.items(): if ( key in element.attrib and not force or key in self.disable_basic_attributes ): # already set, don't dare to overwrite continue element.attrib[key] = value
def function[_style_to_basic_html_attributes, parameter[self, element, style_content, force]]: constant[given an element and styles like 'background-color:red; font-family:Arial' turn some of that into HTML attributes. like 'bgcolor', etc. Note, the style_content can contain pseudoclasses like: '{color:red; border:1px solid green} :visited{border:1px solid green}' ] if <ast.BoolOp object at 0x7da1b12c9360> begin[:] variable[style_content] assign[=] call[call[call[name[style_content].split, parameter[constant[}]]]][constant[0]]][<ast.Slice object at 0x7da1b12c8c40>] variable[attributes] assign[=] call[name[OrderedDict], parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b12c8d30>, <ast.Name object at 0x7da1b12c8760>]]] in starred[<ast.ListComp object at 0x7da1b12cacb0>] begin[:] variable[key] assign[=] call[name[key].strip, parameter[]] if compare[name[key] equal[==] constant[text-align]] begin[:] call[name[attributes]][constant[align]] assign[=] call[name[value].strip, parameter[]] for taget[tuple[[<ast.Name object at 0x7da20c991360>, <ast.Name object at 0x7da20c9929e0>]]] in starred[call[name[attributes].items, parameter[]]] begin[:] if <ast.BoolOp object at 0x7da20c990d60> begin[:] continue call[name[element].attrib][name[key]] assign[=] name[value]
keyword[def] identifier[_style_to_basic_html_attributes] ( identifier[self] , identifier[element] , identifier[style_content] , identifier[force] = keyword[False] ): literal[string] keyword[if] identifier[style_content] . identifier[count] ( literal[string] ) keyword[and] identifier[style_content] . identifier[count] ( literal[string] )== identifier[style_content] . identifier[count] ( literal[string] ): identifier[style_content] = identifier[style_content] . identifier[split] ( literal[string] )[ literal[int] ][ literal[int] :] identifier[attributes] = identifier[OrderedDict] () keyword[for] identifier[key] , identifier[value] keyword[in] [ identifier[x] . identifier[split] ( literal[string] ) keyword[for] identifier[x] keyword[in] identifier[style_content] . identifier[split] ( literal[string] ) keyword[if] identifier[len] ( identifier[x] . identifier[split] ( literal[string] ))== literal[int] ]: identifier[key] = identifier[key] . identifier[strip] () keyword[if] identifier[key] == literal[string] : identifier[attributes] [ literal[string] ]= identifier[value] . identifier[strip] () keyword[elif] identifier[key] == literal[string] : identifier[attributes] [ literal[string] ]= identifier[value] . identifier[strip] () keyword[elif] identifier[key] == literal[string] keyword[and] literal[string] keyword[not] keyword[in] identifier[value] . identifier[lower] (): identifier[attributes] [ literal[string] ]= identifier[self] . identifier[six_color] ( identifier[value] . identifier[strip] ()) keyword[elif] identifier[key] == literal[string] keyword[or] identifier[key] == literal[string] : identifier[value] = identifier[value] . identifier[strip] () keyword[if] identifier[value] . identifier[endswith] ( literal[string] ): identifier[value] = identifier[value] [:- literal[int] ] identifier[attributes] [ identifier[key] ]= identifier[value] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[attributes] . identifier[items] (): keyword[if] ( identifier[key] keyword[in] identifier[element] . identifier[attrib] keyword[and] keyword[not] identifier[force] keyword[or] identifier[key] keyword[in] identifier[self] . identifier[disable_basic_attributes] ): keyword[continue] identifier[element] . identifier[attrib] [ identifier[key] ]= identifier[value]
def _style_to_basic_html_attributes(self, element, style_content, force=False): """given an element and styles like 'background-color:red; font-family:Arial' turn some of that into HTML attributes. like 'bgcolor', etc. Note, the style_content can contain pseudoclasses like: '{color:red; border:1px solid green} :visited{border:1px solid green}' """ if style_content.count('}') and style_content.count('{') == style_content.count('}'): style_content = style_content.split('}')[0][1:] # depends on [control=['if'], data=[]] attributes = OrderedDict() for (key, value) in [x.split(':') for x in style_content.split(';') if len(x.split(':')) == 2]: key = key.strip() if key == 'text-align': attributes['align'] = value.strip() # depends on [control=['if'], data=[]] elif key == 'vertical-align': attributes['valign'] = value.strip() # depends on [control=['if'], data=[]] elif key == 'background-color' and 'transparent' not in value.lower(): # Only add the 'bgcolor' attribute if the value does not # contain the word "transparent"; before we add it possibly # correct the 3-digit color code to its 6-digit equivalent # ("abc" to "aabbcc") so IBM Notes copes. attributes['bgcolor'] = self.six_color(value.strip()) # depends on [control=['if'], data=[]] elif key == 'width' or key == 'height': value = value.strip() if value.endswith('px'): value = value[:-2] # depends on [control=['if'], data=[]] attributes[key] = value # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] for (key, value) in attributes.items(): if key in element.attrib and (not force) or key in self.disable_basic_attributes: # already set, don't dare to overwrite continue # depends on [control=['if'], data=[]] element.attrib[key] = value # depends on [control=['for'], data=[]]
def render_variable(env, raw, cookiecutter_dict): """Inside the prompting taken from the cookiecutter.json file, this renders the next variable. For example, if a project_name is "Peanut Butter Cookie", the repo_name could be be rendered with: `{{ cookiecutter.project_name.replace(" ", "_") }}`. This is then presented to the user as the default. :param Environment env: A Jinja2 Environment object. :param str raw: The next value to be prompted for by the user. :param dict cookiecutter_dict: The current context as it's gradually being populated with variables. :return: The rendered value for the default variable. """ if raw is None: return None elif isinstance(raw, dict): return { render_variable(env, k, cookiecutter_dict): render_variable(env, v, cookiecutter_dict) for k, v in raw.items() } elif isinstance(raw, list): return [ render_variable(env, v, cookiecutter_dict) for v in raw ] elif not isinstance(raw, basestring): raw = str(raw) template = env.from_string(raw) rendered_template = template.render(cookiecutter=cookiecutter_dict) return rendered_template
def function[render_variable, parameter[env, raw, cookiecutter_dict]]: constant[Inside the prompting taken from the cookiecutter.json file, this renders the next variable. For example, if a project_name is "Peanut Butter Cookie", the repo_name could be be rendered with: `{{ cookiecutter.project_name.replace(" ", "_") }}`. This is then presented to the user as the default. :param Environment env: A Jinja2 Environment object. :param str raw: The next value to be prompted for by the user. :param dict cookiecutter_dict: The current context as it's gradually being populated with variables. :return: The rendered value for the default variable. ] if compare[name[raw] is constant[None]] begin[:] return[constant[None]] variable[template] assign[=] call[name[env].from_string, parameter[name[raw]]] variable[rendered_template] assign[=] call[name[template].render, parameter[]] return[name[rendered_template]]
keyword[def] identifier[render_variable] ( identifier[env] , identifier[raw] , identifier[cookiecutter_dict] ): literal[string] keyword[if] identifier[raw] keyword[is] keyword[None] : keyword[return] keyword[None] keyword[elif] identifier[isinstance] ( identifier[raw] , identifier[dict] ): keyword[return] { identifier[render_variable] ( identifier[env] , identifier[k] , identifier[cookiecutter_dict] ): identifier[render_variable] ( identifier[env] , identifier[v] , identifier[cookiecutter_dict] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[raw] . identifier[items] () } keyword[elif] identifier[isinstance] ( identifier[raw] , identifier[list] ): keyword[return] [ identifier[render_variable] ( identifier[env] , identifier[v] , identifier[cookiecutter_dict] ) keyword[for] identifier[v] keyword[in] identifier[raw] ] keyword[elif] keyword[not] identifier[isinstance] ( identifier[raw] , identifier[basestring] ): identifier[raw] = identifier[str] ( identifier[raw] ) identifier[template] = identifier[env] . identifier[from_string] ( identifier[raw] ) identifier[rendered_template] = identifier[template] . identifier[render] ( identifier[cookiecutter] = identifier[cookiecutter_dict] ) keyword[return] identifier[rendered_template]
def render_variable(env, raw, cookiecutter_dict): """Inside the prompting taken from the cookiecutter.json file, this renders the next variable. For example, if a project_name is "Peanut Butter Cookie", the repo_name could be be rendered with: `{{ cookiecutter.project_name.replace(" ", "_") }}`. This is then presented to the user as the default. :param Environment env: A Jinja2 Environment object. :param str raw: The next value to be prompted for by the user. :param dict cookiecutter_dict: The current context as it's gradually being populated with variables. :return: The rendered value for the default variable. """ if raw is None: return None # depends on [control=['if'], data=[]] elif isinstance(raw, dict): return {render_variable(env, k, cookiecutter_dict): render_variable(env, v, cookiecutter_dict) for (k, v) in raw.items()} # depends on [control=['if'], data=[]] elif isinstance(raw, list): return [render_variable(env, v, cookiecutter_dict) for v in raw] # depends on [control=['if'], data=[]] elif not isinstance(raw, basestring): raw = str(raw) # depends on [control=['if'], data=[]] template = env.from_string(raw) rendered_template = template.render(cookiecutter=cookiecutter_dict) return rendered_template
def obj(x): """Six-hump camelback function""" x1 = x[0] x2 = x[1] f = (4 - 2.1*(x1*x1) + (x1*x1*x1*x1)/3.0)*(x1*x1) + x1*x2 + (-4 + 4*(x2*x2))*(x2*x2) return f
def function[obj, parameter[x]]: constant[Six-hump camelback function] variable[x1] assign[=] call[name[x]][constant[0]] variable[x2] assign[=] call[name[x]][constant[1]] variable[f] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[4] - binary_operation[constant[2.1] * binary_operation[name[x1] * name[x1]]]] + binary_operation[binary_operation[binary_operation[binary_operation[name[x1] * name[x1]] * name[x1]] * name[x1]] / constant[3.0]]] * binary_operation[name[x1] * name[x1]]] + binary_operation[name[x1] * name[x2]]] + binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b0e32e90> + binary_operation[constant[4] * binary_operation[name[x2] * name[x2]]]] * binary_operation[name[x2] * name[x2]]]] return[name[f]]
keyword[def] identifier[obj] ( identifier[x] ): literal[string] identifier[x1] = identifier[x] [ literal[int] ] identifier[x2] = identifier[x] [ literal[int] ] identifier[f] =( literal[int] - literal[int] *( identifier[x1] * identifier[x1] )+( identifier[x1] * identifier[x1] * identifier[x1] * identifier[x1] )/ literal[int] )*( identifier[x1] * identifier[x1] )+ identifier[x1] * identifier[x2] +(- literal[int] + literal[int] *( identifier[x2] * identifier[x2] ))*( identifier[x2] * identifier[x2] ) keyword[return] identifier[f]
def obj(x): """Six-hump camelback function""" x1 = x[0] x2 = x[1] f = (4 - 2.1 * (x1 * x1) + x1 * x1 * x1 * x1 / 3.0) * (x1 * x1) + x1 * x2 + (-4 + 4 * (x2 * x2)) * (x2 * x2) return f
def find_actual_cause(self, mechanism, purviews=False): """Return the actual cause of a mechanism.""" return self.find_causal_link(Direction.CAUSE, mechanism, purviews)
def function[find_actual_cause, parameter[self, mechanism, purviews]]: constant[Return the actual cause of a mechanism.] return[call[name[self].find_causal_link, parameter[name[Direction].CAUSE, name[mechanism], name[purviews]]]]
keyword[def] identifier[find_actual_cause] ( identifier[self] , identifier[mechanism] , identifier[purviews] = keyword[False] ): literal[string] keyword[return] identifier[self] . identifier[find_causal_link] ( identifier[Direction] . identifier[CAUSE] , identifier[mechanism] , identifier[purviews] )
def find_actual_cause(self, mechanism, purviews=False): """Return the actual cause of a mechanism.""" return self.find_causal_link(Direction.CAUSE, mechanism, purviews)
def get_oct(self): """Return the octal notation of the address/netmask.""" return _convert(self._ip_dec, notation=IP_OCT, inotation=IP_DEC, _check=False, _isnm=self._isnm)
def function[get_oct, parameter[self]]: constant[Return the octal notation of the address/netmask.] return[call[name[_convert], parameter[name[self]._ip_dec]]]
keyword[def] identifier[get_oct] ( identifier[self] ): literal[string] keyword[return] identifier[_convert] ( identifier[self] . identifier[_ip_dec] , identifier[notation] = identifier[IP_OCT] , identifier[inotation] = identifier[IP_DEC] , identifier[_check] = keyword[False] , identifier[_isnm] = identifier[self] . identifier[_isnm] )
def get_oct(self): """Return the octal notation of the address/netmask.""" return _convert(self._ip_dec, notation=IP_OCT, inotation=IP_DEC, _check=False, _isnm=self._isnm)
def rollback(self, revision=None, annotations=None): """ Performs a rollback of the Deployment. If the 'revision' parameter is omitted, we fetch the Deployment's system-generated annotation containing the current revision, and revert to the version immediately preceding the current version. :param revision: The revision to rollback to. :param annotations: Annotations we'd like to update. :return: self """ rollback = DeploymentRollback() rollback.name = self.name rollback_config = RollbackConfig() # to the specified revision if revision is not None: rollback_config.revision = revision # to the revision immediately preceding the current revision else: current_revision = int(self.get_annotation(self.REVISION_ANNOTATION)) rev = max(current_revision - 1, 0) rollback_config.revision = rev rollback.rollback_to = rollback_config if annotations is not None: rollback.updated_annotations = annotations url = '{base}/{name}/rollback'.format(base=self.base_url, name=self.name) state = self.request( method='POST', url=url, data=rollback.serialize()) if not state.get('success'): status = state.get('status', '') reason = state.get('data', dict()).get('message', None) message = 'K8sDeployment: ROLLBACK failed : HTTP {0} : {1}'.format(status, reason) raise BadRequestException(message) time.sleep(0.2) self._wait_for_desired_replicas() self.get() return self
def function[rollback, parameter[self, revision, annotations]]: constant[ Performs a rollback of the Deployment. If the 'revision' parameter is omitted, we fetch the Deployment's system-generated annotation containing the current revision, and revert to the version immediately preceding the current version. :param revision: The revision to rollback to. :param annotations: Annotations we'd like to update. :return: self ] variable[rollback] assign[=] call[name[DeploymentRollback], parameter[]] name[rollback].name assign[=] name[self].name variable[rollback_config] assign[=] call[name[RollbackConfig], parameter[]] if compare[name[revision] is_not constant[None]] begin[:] name[rollback_config].revision assign[=] name[revision] name[rollback].rollback_to assign[=] name[rollback_config] if compare[name[annotations] is_not constant[None]] begin[:] name[rollback].updated_annotations assign[=] name[annotations] variable[url] assign[=] call[constant[{base}/{name}/rollback].format, parameter[]] variable[state] assign[=] call[name[self].request, parameter[]] if <ast.UnaryOp object at 0x7da204346bc0> begin[:] variable[status] assign[=] call[name[state].get, parameter[constant[status], constant[]]] variable[reason] assign[=] call[call[name[state].get, parameter[constant[data], call[name[dict], parameter[]]]].get, parameter[constant[message], constant[None]]] variable[message] assign[=] call[constant[K8sDeployment: ROLLBACK failed : HTTP {0} : {1}].format, parameter[name[status], name[reason]]] <ast.Raise object at 0x7da20c6e7d60> call[name[time].sleep, parameter[constant[0.2]]] call[name[self]._wait_for_desired_replicas, parameter[]] call[name[self].get, parameter[]] return[name[self]]
keyword[def] identifier[rollback] ( identifier[self] , identifier[revision] = keyword[None] , identifier[annotations] = keyword[None] ): literal[string] identifier[rollback] = identifier[DeploymentRollback] () identifier[rollback] . identifier[name] = identifier[self] . identifier[name] identifier[rollback_config] = identifier[RollbackConfig] () keyword[if] identifier[revision] keyword[is] keyword[not] keyword[None] : identifier[rollback_config] . identifier[revision] = identifier[revision] keyword[else] : identifier[current_revision] = identifier[int] ( identifier[self] . identifier[get_annotation] ( identifier[self] . identifier[REVISION_ANNOTATION] )) identifier[rev] = identifier[max] ( identifier[current_revision] - literal[int] , literal[int] ) identifier[rollback_config] . identifier[revision] = identifier[rev] identifier[rollback] . identifier[rollback_to] = identifier[rollback_config] keyword[if] identifier[annotations] keyword[is] keyword[not] keyword[None] : identifier[rollback] . identifier[updated_annotations] = identifier[annotations] identifier[url] = literal[string] . identifier[format] ( identifier[base] = identifier[self] . identifier[base_url] , identifier[name] = identifier[self] . identifier[name] ) identifier[state] = identifier[self] . identifier[request] ( identifier[method] = literal[string] , identifier[url] = identifier[url] , identifier[data] = identifier[rollback] . identifier[serialize] ()) keyword[if] keyword[not] identifier[state] . identifier[get] ( literal[string] ): identifier[status] = identifier[state] . identifier[get] ( literal[string] , literal[string] ) identifier[reason] = identifier[state] . identifier[get] ( literal[string] , identifier[dict] ()). identifier[get] ( literal[string] , keyword[None] ) identifier[message] = literal[string] . identifier[format] ( identifier[status] , identifier[reason] ) keyword[raise] identifier[BadRequestException] ( identifier[message] ) identifier[time] . identifier[sleep] ( literal[int] ) identifier[self] . identifier[_wait_for_desired_replicas] () identifier[self] . identifier[get] () keyword[return] identifier[self]
def rollback(self, revision=None, annotations=None): """ Performs a rollback of the Deployment. If the 'revision' parameter is omitted, we fetch the Deployment's system-generated annotation containing the current revision, and revert to the version immediately preceding the current version. :param revision: The revision to rollback to. :param annotations: Annotations we'd like to update. :return: self """ rollback = DeploymentRollback() rollback.name = self.name rollback_config = RollbackConfig() # to the specified revision if revision is not None: rollback_config.revision = revision # depends on [control=['if'], data=['revision']] else: # to the revision immediately preceding the current revision current_revision = int(self.get_annotation(self.REVISION_ANNOTATION)) rev = max(current_revision - 1, 0) rollback_config.revision = rev rollback.rollback_to = rollback_config if annotations is not None: rollback.updated_annotations = annotations # depends on [control=['if'], data=['annotations']] url = '{base}/{name}/rollback'.format(base=self.base_url, name=self.name) state = self.request(method='POST', url=url, data=rollback.serialize()) if not state.get('success'): status = state.get('status', '') reason = state.get('data', dict()).get('message', None) message = 'K8sDeployment: ROLLBACK failed : HTTP {0} : {1}'.format(status, reason) raise BadRequestException(message) # depends on [control=['if'], data=[]] time.sleep(0.2) self._wait_for_desired_replicas() self.get() return self
def request_writes(self, offset, data): """Request any available writes given new incoming data. You call this method by providing new data along with the offset associated with the data. If that new data unlocks any contiguous writes that can now be submitted, this method will return all applicable writes. This is done with 1 method call so you don't have to make two method calls (put(), get()) which acquires a lock each method call. """ if offset < self._next_offset: # This is a request for a write that we've already # seen. This can happen in the event of a retry # where if we retry at at offset N/2, we'll requeue # offsets 0-N/2 again. return [] writes = [] if offset in self._pending_offsets: # We've already queued this offset so this request is # a duplicate. In this case we should ignore # this request and prefer what's already queued. return [] heapq.heappush(self._writes, (offset, data)) self._pending_offsets.add(offset) while self._writes and self._writes[0][0] == self._next_offset: next_write = heapq.heappop(self._writes) writes.append({'offset': next_write[0], 'data': next_write[1]}) self._pending_offsets.remove(next_write[0]) self._next_offset += len(next_write[1]) return writes
def function[request_writes, parameter[self, offset, data]]: constant[Request any available writes given new incoming data. You call this method by providing new data along with the offset associated with the data. If that new data unlocks any contiguous writes that can now be submitted, this method will return all applicable writes. This is done with 1 method call so you don't have to make two method calls (put(), get()) which acquires a lock each method call. ] if compare[name[offset] less[<] name[self]._next_offset] begin[:] return[list[[]]] variable[writes] assign[=] list[[]] if compare[name[offset] in name[self]._pending_offsets] begin[:] return[list[[]]] call[name[heapq].heappush, parameter[name[self]._writes, tuple[[<ast.Name object at 0x7da1b26af640>, <ast.Name object at 0x7da1b26ad270>]]]] call[name[self]._pending_offsets.add, parameter[name[offset]]] while <ast.BoolOp object at 0x7da1b26ac2e0> begin[:] variable[next_write] assign[=] call[name[heapq].heappop, parameter[name[self]._writes]] call[name[writes].append, parameter[dictionary[[<ast.Constant object at 0x7da18c4cef50>, <ast.Constant object at 0x7da18c4cd2d0>], [<ast.Subscript object at 0x7da18c4cd1e0>, <ast.Subscript object at 0x7da18c4cc490>]]]] call[name[self]._pending_offsets.remove, parameter[call[name[next_write]][constant[0]]]] <ast.AugAssign object at 0x7da18c4cc880> return[name[writes]]
keyword[def] identifier[request_writes] ( identifier[self] , identifier[offset] , identifier[data] ): literal[string] keyword[if] identifier[offset] < identifier[self] . identifier[_next_offset] : keyword[return] [] identifier[writes] =[] keyword[if] identifier[offset] keyword[in] identifier[self] . identifier[_pending_offsets] : keyword[return] [] identifier[heapq] . identifier[heappush] ( identifier[self] . identifier[_writes] ,( identifier[offset] , identifier[data] )) identifier[self] . identifier[_pending_offsets] . identifier[add] ( identifier[offset] ) keyword[while] identifier[self] . identifier[_writes] keyword[and] identifier[self] . identifier[_writes] [ literal[int] ][ literal[int] ]== identifier[self] . identifier[_next_offset] : identifier[next_write] = identifier[heapq] . identifier[heappop] ( identifier[self] . identifier[_writes] ) identifier[writes] . identifier[append] ({ literal[string] : identifier[next_write] [ literal[int] ], literal[string] : identifier[next_write] [ literal[int] ]}) identifier[self] . identifier[_pending_offsets] . identifier[remove] ( identifier[next_write] [ literal[int] ]) identifier[self] . identifier[_next_offset] += identifier[len] ( identifier[next_write] [ literal[int] ]) keyword[return] identifier[writes]
def request_writes(self, offset, data): """Request any available writes given new incoming data. You call this method by providing new data along with the offset associated with the data. If that new data unlocks any contiguous writes that can now be submitted, this method will return all applicable writes. This is done with 1 method call so you don't have to make two method calls (put(), get()) which acquires a lock each method call. """ if offset < self._next_offset: # This is a request for a write that we've already # seen. This can happen in the event of a retry # where if we retry at at offset N/2, we'll requeue # offsets 0-N/2 again. return [] # depends on [control=['if'], data=[]] writes = [] if offset in self._pending_offsets: # We've already queued this offset so this request is # a duplicate. In this case we should ignore # this request and prefer what's already queued. return [] # depends on [control=['if'], data=[]] heapq.heappush(self._writes, (offset, data)) self._pending_offsets.add(offset) while self._writes and self._writes[0][0] == self._next_offset: next_write = heapq.heappop(self._writes) writes.append({'offset': next_write[0], 'data': next_write[1]}) self._pending_offsets.remove(next_write[0]) self._next_offset += len(next_write[1]) # depends on [control=['while'], data=[]] return writes
def get_sql_insert(table: str, fieldlist: Sequence[str], delims: Tuple[str, str] = ("", "")) -> str: """Returns ?-marked SQL for an INSERT statement.""" return ( "INSERT INTO " + delimit(table, delims) + " (" + ",".join([delimit(x, delims) for x in fieldlist]) + ") VALUES (" + ",".join(["?"] * len(fieldlist)) + ")" )
def function[get_sql_insert, parameter[table, fieldlist, delims]]: constant[Returns ?-marked SQL for an INSERT statement.] return[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[INSERT INTO ] + call[name[delimit], parameter[name[table], name[delims]]]] + constant[ (]] + call[constant[,].join, parameter[<ast.ListComp object at 0x7da1b173e8f0>]]] + constant[) VALUES (]] + call[constant[,].join, parameter[binary_operation[list[[<ast.Constant object at 0x7da1b173c910>]] * call[name[len], parameter[name[fieldlist]]]]]]] + constant[)]]]
keyword[def] identifier[get_sql_insert] ( identifier[table] : identifier[str] , identifier[fieldlist] : identifier[Sequence] [ identifier[str] ], identifier[delims] : identifier[Tuple] [ identifier[str] , identifier[str] ]=( literal[string] , literal[string] ))-> identifier[str] : literal[string] keyword[return] ( literal[string] + identifier[delimit] ( identifier[table] , identifier[delims] )+ literal[string] + literal[string] . identifier[join] ([ identifier[delimit] ( identifier[x] , identifier[delims] ) keyword[for] identifier[x] keyword[in] identifier[fieldlist] ])+ literal[string] + literal[string] . identifier[join] ([ literal[string] ]* identifier[len] ( identifier[fieldlist] ))+ literal[string] )
def get_sql_insert(table: str, fieldlist: Sequence[str], delims: Tuple[str, str]=('', '')) -> str: """Returns ?-marked SQL for an INSERT statement.""" return 'INSERT INTO ' + delimit(table, delims) + ' (' + ','.join([delimit(x, delims) for x in fieldlist]) + ') VALUES (' + ','.join(['?'] * len(fieldlist)) + ')'
def get_serial_number(self): """ Return the serial number of this certificate. :return: The serial number. :rtype: int """ asn1_serial = _lib.X509_get_serialNumber(self._x509) bignum_serial = _lib.ASN1_INTEGER_to_BN(asn1_serial, _ffi.NULL) try: hex_serial = _lib.BN_bn2hex(bignum_serial) try: hexstring_serial = _ffi.string(hex_serial) serial = int(hexstring_serial, 16) return serial finally: _lib.OPENSSL_free(hex_serial) finally: _lib.BN_free(bignum_serial)
def function[get_serial_number, parameter[self]]: constant[ Return the serial number of this certificate. :return: The serial number. :rtype: int ] variable[asn1_serial] assign[=] call[name[_lib].X509_get_serialNumber, parameter[name[self]._x509]] variable[bignum_serial] assign[=] call[name[_lib].ASN1_INTEGER_to_BN, parameter[name[asn1_serial], name[_ffi].NULL]] <ast.Try object at 0x7da1b0240be0>
keyword[def] identifier[get_serial_number] ( identifier[self] ): literal[string] identifier[asn1_serial] = identifier[_lib] . identifier[X509_get_serialNumber] ( identifier[self] . identifier[_x509] ) identifier[bignum_serial] = identifier[_lib] . identifier[ASN1_INTEGER_to_BN] ( identifier[asn1_serial] , identifier[_ffi] . identifier[NULL] ) keyword[try] : identifier[hex_serial] = identifier[_lib] . identifier[BN_bn2hex] ( identifier[bignum_serial] ) keyword[try] : identifier[hexstring_serial] = identifier[_ffi] . identifier[string] ( identifier[hex_serial] ) identifier[serial] = identifier[int] ( identifier[hexstring_serial] , literal[int] ) keyword[return] identifier[serial] keyword[finally] : identifier[_lib] . identifier[OPENSSL_free] ( identifier[hex_serial] ) keyword[finally] : identifier[_lib] . identifier[BN_free] ( identifier[bignum_serial] )
def get_serial_number(self): """ Return the serial number of this certificate. :return: The serial number. :rtype: int """ asn1_serial = _lib.X509_get_serialNumber(self._x509) bignum_serial = _lib.ASN1_INTEGER_to_BN(asn1_serial, _ffi.NULL) try: hex_serial = _lib.BN_bn2hex(bignum_serial) try: hexstring_serial = _ffi.string(hex_serial) serial = int(hexstring_serial, 16) return serial # depends on [control=['try'], data=[]] finally: _lib.OPENSSL_free(hex_serial) # depends on [control=['try'], data=[]] finally: _lib.BN_free(bignum_serial)
def in_memory(self, value): """Add or remove self from global memory. :param bool value: if True(False) ensure self is(is not) in memory. """ self_class = self.__class__ memory = Annotation.__ANNOTATIONS_IN_MEMORY__ if value: annotations_memory = memory.setdefault(self_class, set()) annotations_memory.add(self) else: if self_class in memory: annotations_memory = memory[self_class] while self in annotations_memory: annotations_memory.remove(self) if not annotations_memory: del memory[self_class]
def function[in_memory, parameter[self, value]]: constant[Add or remove self from global memory. :param bool value: if True(False) ensure self is(is not) in memory. ] variable[self_class] assign[=] name[self].__class__ variable[memory] assign[=] name[Annotation].__ANNOTATIONS_IN_MEMORY__ if name[value] begin[:] variable[annotations_memory] assign[=] call[name[memory].setdefault, parameter[name[self_class], call[name[set], parameter[]]]] call[name[annotations_memory].add, parameter[name[self]]]
keyword[def] identifier[in_memory] ( identifier[self] , identifier[value] ): literal[string] identifier[self_class] = identifier[self] . identifier[__class__] identifier[memory] = identifier[Annotation] . identifier[__ANNOTATIONS_IN_MEMORY__] keyword[if] identifier[value] : identifier[annotations_memory] = identifier[memory] . identifier[setdefault] ( identifier[self_class] , identifier[set] ()) identifier[annotations_memory] . identifier[add] ( identifier[self] ) keyword[else] : keyword[if] identifier[self_class] keyword[in] identifier[memory] : identifier[annotations_memory] = identifier[memory] [ identifier[self_class] ] keyword[while] identifier[self] keyword[in] identifier[annotations_memory] : identifier[annotations_memory] . identifier[remove] ( identifier[self] ) keyword[if] keyword[not] identifier[annotations_memory] : keyword[del] identifier[memory] [ identifier[self_class] ]
def in_memory(self, value): """Add or remove self from global memory. :param bool value: if True(False) ensure self is(is not) in memory. """ self_class = self.__class__ memory = Annotation.__ANNOTATIONS_IN_MEMORY__ if value: annotations_memory = memory.setdefault(self_class, set()) annotations_memory.add(self) # depends on [control=['if'], data=[]] elif self_class in memory: annotations_memory = memory[self_class] while self in annotations_memory: annotations_memory.remove(self) # depends on [control=['while'], data=['self', 'annotations_memory']] if not annotations_memory: del memory[self_class] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['self_class', 'memory']]
def auto_message(self, args): """Try guess the message by the args passed args: a set of args passed on the wrapper __call__ in the definition above. if the object already have some message (defined in __init__), we don't change that. If the first arg is a function, so is decorated without argument, use the func name as the message. If not self.message anyway, use the default_message global, another else use the default self.message already """ if any(args) and callable(args[0]) and not self.message: return args[0].__name__ elif not self.message: return self.default_message else: return self.message
def function[auto_message, parameter[self, args]]: constant[Try guess the message by the args passed args: a set of args passed on the wrapper __call__ in the definition above. if the object already have some message (defined in __init__), we don't change that. If the first arg is a function, so is decorated without argument, use the func name as the message. If not self.message anyway, use the default_message global, another else use the default self.message already ] if <ast.BoolOp object at 0x7da2041d9720> begin[:] return[call[name[args]][constant[0]].__name__]
keyword[def] identifier[auto_message] ( identifier[self] , identifier[args] ): literal[string] keyword[if] identifier[any] ( identifier[args] ) keyword[and] identifier[callable] ( identifier[args] [ literal[int] ]) keyword[and] keyword[not] identifier[self] . identifier[message] : keyword[return] identifier[args] [ literal[int] ]. identifier[__name__] keyword[elif] keyword[not] identifier[self] . identifier[message] : keyword[return] identifier[self] . identifier[default_message] keyword[else] : keyword[return] identifier[self] . identifier[message]
def auto_message(self, args): """Try guess the message by the args passed args: a set of args passed on the wrapper __call__ in the definition above. if the object already have some message (defined in __init__), we don't change that. If the first arg is a function, so is decorated without argument, use the func name as the message. If not self.message anyway, use the default_message global, another else use the default self.message already """ if any(args) and callable(args[0]) and (not self.message): return args[0].__name__ # depends on [control=['if'], data=[]] elif not self.message: return self.default_message # depends on [control=['if'], data=[]] else: return self.message
def count(self): """ Returns the total number of objects, across all pages. """ try: return self.object_list.count() except (AttributeError, TypeError): # AttributeError if object_list has no count() method. # TypeError if object_list.count() requires arguments # (i.e. is of type list). return len(self.object_list)
def function[count, parameter[self]]: constant[ Returns the total number of objects, across all pages. ] <ast.Try object at 0x7da20e9b1e10>
keyword[def] identifier[count] ( identifier[self] ): literal[string] keyword[try] : keyword[return] identifier[self] . identifier[object_list] . identifier[count] () keyword[except] ( identifier[AttributeError] , identifier[TypeError] ): keyword[return] identifier[len] ( identifier[self] . identifier[object_list] )
def count(self): """ Returns the total number of objects, across all pages. """ try: return self.object_list.count() # depends on [control=['try'], data=[]] except (AttributeError, TypeError): # AttributeError if object_list has no count() method. # TypeError if object_list.count() requires arguments # (i.e. is of type list). return len(self.object_list) # depends on [control=['except'], data=[]]
def on_page_layout_choice(self, event): """Page layout choice event handler""" width, height = self.paper_sizes_points[event.GetString()] self.page_width_text_ctrl.SetValue(str(width / 72.0)) self.page_height_text_ctrl.SetValue(str(height / 72.0)) event.Skip()
def function[on_page_layout_choice, parameter[self, event]]: constant[Page layout choice event handler] <ast.Tuple object at 0x7da1b1544c40> assign[=] call[name[self].paper_sizes_points][call[name[event].GetString, parameter[]]] call[name[self].page_width_text_ctrl.SetValue, parameter[call[name[str], parameter[binary_operation[name[width] / constant[72.0]]]]]] call[name[self].page_height_text_ctrl.SetValue, parameter[call[name[str], parameter[binary_operation[name[height] / constant[72.0]]]]]] call[name[event].Skip, parameter[]]
keyword[def] identifier[on_page_layout_choice] ( identifier[self] , identifier[event] ): literal[string] identifier[width] , identifier[height] = identifier[self] . identifier[paper_sizes_points] [ identifier[event] . identifier[GetString] ()] identifier[self] . identifier[page_width_text_ctrl] . identifier[SetValue] ( identifier[str] ( identifier[width] / literal[int] )) identifier[self] . identifier[page_height_text_ctrl] . identifier[SetValue] ( identifier[str] ( identifier[height] / literal[int] )) identifier[event] . identifier[Skip] ()
def on_page_layout_choice(self, event): """Page layout choice event handler""" (width, height) = self.paper_sizes_points[event.GetString()] self.page_width_text_ctrl.SetValue(str(width / 72.0)) self.page_height_text_ctrl.SetValue(str(height / 72.0)) event.Skip()
def getKeywordList(self, kw): """ Return lists of all attribute values for all active chips in the ``imageObject``. """ kwlist = [] for chip in range(1,self._numchips+1,1): sci_chip = self._image[self.scienceExt,chip] if sci_chip.group_member: kwlist.append(sci_chip.__dict__[kw]) return kwlist
def function[getKeywordList, parameter[self, kw]]: constant[ Return lists of all attribute values for all active chips in the ``imageObject``. ] variable[kwlist] assign[=] list[[]] for taget[name[chip]] in starred[call[name[range], parameter[constant[1], binary_operation[name[self]._numchips + constant[1]], constant[1]]]] begin[:] variable[sci_chip] assign[=] call[name[self]._image][tuple[[<ast.Attribute object at 0x7da1b1c220e0>, <ast.Name object at 0x7da1b1c22d70>]]] if name[sci_chip].group_member begin[:] call[name[kwlist].append, parameter[call[name[sci_chip].__dict__][name[kw]]]] return[name[kwlist]]
keyword[def] identifier[getKeywordList] ( identifier[self] , identifier[kw] ): literal[string] identifier[kwlist] =[] keyword[for] identifier[chip] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[_numchips] + literal[int] , literal[int] ): identifier[sci_chip] = identifier[self] . identifier[_image] [ identifier[self] . identifier[scienceExt] , identifier[chip] ] keyword[if] identifier[sci_chip] . identifier[group_member] : identifier[kwlist] . identifier[append] ( identifier[sci_chip] . identifier[__dict__] [ identifier[kw] ]) keyword[return] identifier[kwlist]
def getKeywordList(self, kw): """ Return lists of all attribute values for all active chips in the ``imageObject``. """ kwlist = [] for chip in range(1, self._numchips + 1, 1): sci_chip = self._image[self.scienceExt, chip] if sci_chip.group_member: kwlist.append(sci_chip.__dict__[kw]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['chip']] return kwlist
def mjd2gmst(mjd): """Convert Modfied Juian Date (JD = 2400000.5) to GMST Taken from P.T. Walace routines. """ tu = (mjd - MJD0) / (100*DPY) st = math.fmod(mjd, 1.0) * D2PI + (24110.54841 + (8640184.812866 + (0.093104 - 6.2e-6 * tu) * tu) * tu) * DS2R w = math.fmod(st, D2PI) if w >= 0.0: return w else: return w + D2PI
def function[mjd2gmst, parameter[mjd]]: constant[Convert Modfied Juian Date (JD = 2400000.5) to GMST Taken from P.T. Walace routines. ] variable[tu] assign[=] binary_operation[binary_operation[name[mjd] - name[MJD0]] / binary_operation[constant[100] * name[DPY]]] variable[st] assign[=] binary_operation[binary_operation[call[name[math].fmod, parameter[name[mjd], constant[1.0]]] * name[D2PI]] + binary_operation[binary_operation[constant[24110.54841] + binary_operation[binary_operation[constant[8640184.812866] + binary_operation[binary_operation[constant[0.093104] - binary_operation[constant[6.2e-06] * name[tu]]] * name[tu]]] * name[tu]]] * name[DS2R]]] variable[w] assign[=] call[name[math].fmod, parameter[name[st], name[D2PI]]] if compare[name[w] greater_or_equal[>=] constant[0.0]] begin[:] return[name[w]]
keyword[def] identifier[mjd2gmst] ( identifier[mjd] ): literal[string] identifier[tu] =( identifier[mjd] - identifier[MJD0] )/( literal[int] * identifier[DPY] ) identifier[st] = identifier[math] . identifier[fmod] ( identifier[mjd] , literal[int] )* identifier[D2PI] +( literal[int] +( literal[int] +( literal[int] - literal[int] * identifier[tu] )* identifier[tu] )* identifier[tu] )* identifier[DS2R] identifier[w] = identifier[math] . identifier[fmod] ( identifier[st] , identifier[D2PI] ) keyword[if] identifier[w] >= literal[int] : keyword[return] identifier[w] keyword[else] : keyword[return] identifier[w] + identifier[D2PI]
def mjd2gmst(mjd): """Convert Modfied Juian Date (JD = 2400000.5) to GMST Taken from P.T. Walace routines. """ tu = (mjd - MJD0) / (100 * DPY) st = math.fmod(mjd, 1.0) * D2PI + (24110.54841 + (8640184.812866 + (0.093104 - 6.2e-06 * tu) * tu) * tu) * DS2R w = math.fmod(st, D2PI) if w >= 0.0: return w # depends on [control=['if'], data=['w']] else: return w + D2PI
def validate_raw_manifest_format(raw_manifest: str) -> None: """ Raise a ValidationError if a manifest ... - is not tightly packed (i.e. no linebreaks or extra whitespace) - does not have alphabetically sorted keys - has duplicate keys - is not UTF-8 encoded - has a trailing newline """ try: manifest_dict = json.loads(raw_manifest, encoding="UTF-8") except json.JSONDecodeError as err: raise json.JSONDecodeError( "Failed to load package data. File is not a valid JSON document.", err.doc, err.pos, ) compact_manifest = json.dumps(manifest_dict, sort_keys=True, separators=(",", ":")) if raw_manifest != compact_manifest: raise ValidationError( "The manifest appears to be malformed. Please ensure that it conforms to the " "EthPM-Spec for document format. " "http://ethpm.github.io/ethpm-spec/package-spec.html#document-format " )
def function[validate_raw_manifest_format, parameter[raw_manifest]]: constant[ Raise a ValidationError if a manifest ... - is not tightly packed (i.e. no linebreaks or extra whitespace) - does not have alphabetically sorted keys - has duplicate keys - is not UTF-8 encoded - has a trailing newline ] <ast.Try object at 0x7da2044c3940> variable[compact_manifest] assign[=] call[name[json].dumps, parameter[name[manifest_dict]]] if compare[name[raw_manifest] not_equal[!=] name[compact_manifest]] begin[:] <ast.Raise object at 0x7da2044c1780>
keyword[def] identifier[validate_raw_manifest_format] ( identifier[raw_manifest] : identifier[str] )-> keyword[None] : literal[string] keyword[try] : identifier[manifest_dict] = identifier[json] . identifier[loads] ( identifier[raw_manifest] , identifier[encoding] = literal[string] ) keyword[except] identifier[json] . identifier[JSONDecodeError] keyword[as] identifier[err] : keyword[raise] identifier[json] . identifier[JSONDecodeError] ( literal[string] , identifier[err] . identifier[doc] , identifier[err] . identifier[pos] , ) identifier[compact_manifest] = identifier[json] . identifier[dumps] ( identifier[manifest_dict] , identifier[sort_keys] = keyword[True] , identifier[separators] =( literal[string] , literal[string] )) keyword[if] identifier[raw_manifest] != identifier[compact_manifest] : keyword[raise] identifier[ValidationError] ( literal[string] literal[string] literal[string] )
def validate_raw_manifest_format(raw_manifest: str) -> None: """ Raise a ValidationError if a manifest ... - is not tightly packed (i.e. no linebreaks or extra whitespace) - does not have alphabetically sorted keys - has duplicate keys - is not UTF-8 encoded - has a trailing newline """ try: manifest_dict = json.loads(raw_manifest, encoding='UTF-8') # depends on [control=['try'], data=[]] except json.JSONDecodeError as err: raise json.JSONDecodeError('Failed to load package data. File is not a valid JSON document.', err.doc, err.pos) # depends on [control=['except'], data=['err']] compact_manifest = json.dumps(manifest_dict, sort_keys=True, separators=(',', ':')) if raw_manifest != compact_manifest: raise ValidationError('The manifest appears to be malformed. Please ensure that it conforms to the EthPM-Spec for document format. http://ethpm.github.io/ethpm-spec/package-spec.html#document-format ') # depends on [control=['if'], data=[]]
def _extend_settings(settings, configurator_config, prefix=None): """ Extend settings dictionary with content of yaml's configurator key. .. note:: This methods changes multilayered subkeys defined within **configurator** into dotted keys in settings dictionary: .. code-block:: yaml configurator: sqlalchemy: url: mysql://user:password@host/dbname will result in **sqlalchemy.url**: mysql://user:password@host/dbname key value in settings dictionary. :param dict settings: settings dictionary :param dict configurator_config: yml defined settings :param str prefix: prefix for settings dict key """ for key in configurator_config: settings_key = '.'.join([prefix, key]) if prefix else key if hasattr(configurator_config[key], 'keys') and\ hasattr(configurator_config[key], '__getitem__'): _extend_settings( settings, configurator_config[key], prefix=settings_key ) else: settings[settings_key] = configurator_config[key]
def function[_extend_settings, parameter[settings, configurator_config, prefix]]: constant[ Extend settings dictionary with content of yaml's configurator key. .. note:: This methods changes multilayered subkeys defined within **configurator** into dotted keys in settings dictionary: .. code-block:: yaml configurator: sqlalchemy: url: mysql://user:password@host/dbname will result in **sqlalchemy.url**: mysql://user:password@host/dbname key value in settings dictionary. :param dict settings: settings dictionary :param dict configurator_config: yml defined settings :param str prefix: prefix for settings dict key ] for taget[name[key]] in starred[name[configurator_config]] begin[:] variable[settings_key] assign[=] <ast.IfExp object at 0x7da18fe939a0> if <ast.BoolOp object at 0x7da18fe93c40> begin[:] call[name[_extend_settings], parameter[name[settings], call[name[configurator_config]][name[key]]]]
keyword[def] identifier[_extend_settings] ( identifier[settings] , identifier[configurator_config] , identifier[prefix] = keyword[None] ): literal[string] keyword[for] identifier[key] keyword[in] identifier[configurator_config] : identifier[settings_key] = literal[string] . identifier[join] ([ identifier[prefix] , identifier[key] ]) keyword[if] identifier[prefix] keyword[else] identifier[key] keyword[if] identifier[hasattr] ( identifier[configurator_config] [ identifier[key] ], literal[string] ) keyword[and] identifier[hasattr] ( identifier[configurator_config] [ identifier[key] ], literal[string] ): identifier[_extend_settings] ( identifier[settings] , identifier[configurator_config] [ identifier[key] ], identifier[prefix] = identifier[settings_key] ) keyword[else] : identifier[settings] [ identifier[settings_key] ]= identifier[configurator_config] [ identifier[key] ]
def _extend_settings(settings, configurator_config, prefix=None): """ Extend settings dictionary with content of yaml's configurator key. .. note:: This methods changes multilayered subkeys defined within **configurator** into dotted keys in settings dictionary: .. code-block:: yaml configurator: sqlalchemy: url: mysql://user:password@host/dbname will result in **sqlalchemy.url**: mysql://user:password@host/dbname key value in settings dictionary. :param dict settings: settings dictionary :param dict configurator_config: yml defined settings :param str prefix: prefix for settings dict key """ for key in configurator_config: settings_key = '.'.join([prefix, key]) if prefix else key if hasattr(configurator_config[key], 'keys') and hasattr(configurator_config[key], '__getitem__'): _extend_settings(settings, configurator_config[key], prefix=settings_key) # depends on [control=['if'], data=[]] else: settings[settings_key] = configurator_config[key] # depends on [control=['for'], data=['key']]
def _sample_3d(self, n, seed=None): """Specialized inversion sampler for 3D.""" seed = seed_stream.SeedStream(seed, salt='von_mises_fisher_3d') u_shape = tf.concat([[n], self._batch_shape_tensor()], axis=0) z = tf.random.uniform(u_shape, seed=seed(), dtype=self.dtype) # TODO(bjp): Higher-order odd dim analytic CDFs are available in [1], could # be bisected for bounded sampling runtime (i.e. not rejection sampling). # [1]: Inversion sampler via: https://ieeexplore.ieee.org/document/7347705/ # The inversion is: u = 1 + log(z + (1-z)*exp(-2*kappa)) / kappa # We must protect against both kappa and z being zero. safe_conc = tf.where(self.concentration > 0, self.concentration, tf.ones_like(self.concentration)) safe_z = tf.where(z > 0, z, tf.ones_like(z)) safe_u = 1 + tf.reduce_logsumexp( input_tensor=[ tf.math.log(safe_z), tf.math.log1p(-safe_z) - 2 * safe_conc ], axis=0) / safe_conc # Limit of the above expression as kappa->0 is 2*z-1 u = tf.where(self.concentration > tf.zeros_like(safe_u), safe_u, 2 * z - 1) # Limit of the expression as z->0 is -1. u = tf.where(tf.equal(z, 0), -tf.ones_like(u), u) if not self._allow_nan_stats: u = tf.debugging.check_numerics(u, 'u in _sample_3d') return u[..., tf.newaxis]
def function[_sample_3d, parameter[self, n, seed]]: constant[Specialized inversion sampler for 3D.] variable[seed] assign[=] call[name[seed_stream].SeedStream, parameter[name[seed]]] variable[u_shape] assign[=] call[name[tf].concat, parameter[list[[<ast.List object at 0x7da1b0211840>, <ast.Call object at 0x7da1b0211e40>]]]] variable[z] assign[=] call[name[tf].random.uniform, parameter[name[u_shape]]] variable[safe_conc] assign[=] call[name[tf].where, parameter[compare[name[self].concentration greater[>] constant[0]], name[self].concentration, call[name[tf].ones_like, parameter[name[self].concentration]]]] variable[safe_z] assign[=] call[name[tf].where, parameter[compare[name[z] greater[>] constant[0]], name[z], call[name[tf].ones_like, parameter[name[z]]]]] variable[safe_u] assign[=] binary_operation[constant[1] + binary_operation[call[name[tf].reduce_logsumexp, parameter[]] / name[safe_conc]]] variable[u] assign[=] call[name[tf].where, parameter[compare[name[self].concentration greater[>] call[name[tf].zeros_like, parameter[name[safe_u]]]], name[safe_u], binary_operation[binary_operation[constant[2] * name[z]] - constant[1]]]] variable[u] assign[=] call[name[tf].where, parameter[call[name[tf].equal, parameter[name[z], constant[0]]], <ast.UnaryOp object at 0x7da1b03e03d0>, name[u]]] if <ast.UnaryOp object at 0x7da1b0322650> begin[:] variable[u] assign[=] call[name[tf].debugging.check_numerics, parameter[name[u], constant[u in _sample_3d]]] return[call[name[u]][tuple[[<ast.Constant object at 0x7da1b03235e0>, <ast.Attribute object at 0x7da1b0321270>]]]]
keyword[def] identifier[_sample_3d] ( identifier[self] , identifier[n] , identifier[seed] = keyword[None] ): literal[string] identifier[seed] = identifier[seed_stream] . identifier[SeedStream] ( identifier[seed] , identifier[salt] = literal[string] ) identifier[u_shape] = identifier[tf] . identifier[concat] ([[ identifier[n] ], identifier[self] . identifier[_batch_shape_tensor] ()], identifier[axis] = literal[int] ) identifier[z] = identifier[tf] . identifier[random] . identifier[uniform] ( identifier[u_shape] , identifier[seed] = identifier[seed] (), identifier[dtype] = identifier[self] . identifier[dtype] ) identifier[safe_conc] = identifier[tf] . identifier[where] ( identifier[self] . identifier[concentration] > literal[int] , identifier[self] . identifier[concentration] , identifier[tf] . identifier[ones_like] ( identifier[self] . identifier[concentration] )) identifier[safe_z] = identifier[tf] . identifier[where] ( identifier[z] > literal[int] , identifier[z] , identifier[tf] . identifier[ones_like] ( identifier[z] )) identifier[safe_u] = literal[int] + identifier[tf] . identifier[reduce_logsumexp] ( identifier[input_tensor] =[ identifier[tf] . identifier[math] . identifier[log] ( identifier[safe_z] ), identifier[tf] . identifier[math] . identifier[log1p] (- identifier[safe_z] )- literal[int] * identifier[safe_conc] ], identifier[axis] = literal[int] )/ identifier[safe_conc] identifier[u] = identifier[tf] . identifier[where] ( identifier[self] . identifier[concentration] > identifier[tf] . identifier[zeros_like] ( identifier[safe_u] ), identifier[safe_u] , literal[int] * identifier[z] - literal[int] ) identifier[u] = identifier[tf] . identifier[where] ( identifier[tf] . identifier[equal] ( identifier[z] , literal[int] ),- identifier[tf] . identifier[ones_like] ( identifier[u] ), identifier[u] ) keyword[if] keyword[not] identifier[self] . identifier[_allow_nan_stats] : identifier[u] = identifier[tf] . identifier[debugging] . identifier[check_numerics] ( identifier[u] , literal[string] ) keyword[return] identifier[u] [..., identifier[tf] . identifier[newaxis] ]
def _sample_3d(self, n, seed=None): """Specialized inversion sampler for 3D.""" seed = seed_stream.SeedStream(seed, salt='von_mises_fisher_3d') u_shape = tf.concat([[n], self._batch_shape_tensor()], axis=0) z = tf.random.uniform(u_shape, seed=seed(), dtype=self.dtype) # TODO(bjp): Higher-order odd dim analytic CDFs are available in [1], could # be bisected for bounded sampling runtime (i.e. not rejection sampling). # [1]: Inversion sampler via: https://ieeexplore.ieee.org/document/7347705/ # The inversion is: u = 1 + log(z + (1-z)*exp(-2*kappa)) / kappa # We must protect against both kappa and z being zero. safe_conc = tf.where(self.concentration > 0, self.concentration, tf.ones_like(self.concentration)) safe_z = tf.where(z > 0, z, tf.ones_like(z)) safe_u = 1 + tf.reduce_logsumexp(input_tensor=[tf.math.log(safe_z), tf.math.log1p(-safe_z) - 2 * safe_conc], axis=0) / safe_conc # Limit of the above expression as kappa->0 is 2*z-1 u = tf.where(self.concentration > tf.zeros_like(safe_u), safe_u, 2 * z - 1) # Limit of the expression as z->0 is -1. u = tf.where(tf.equal(z, 0), -tf.ones_like(u), u) if not self._allow_nan_stats: u = tf.debugging.check_numerics(u, 'u in _sample_3d') # depends on [control=['if'], data=[]] return u[..., tf.newaxis]
def t_INITIAL_SHARP(self, t): r'\#' if self.find_column(t) == 1: t.lexer.begin('preproc') else: self.t_INITIAL_preproc_error(t)
def function[t_INITIAL_SHARP, parameter[self, t]]: constant[\#] if compare[call[name[self].find_column, parameter[name[t]]] equal[==] constant[1]] begin[:] call[name[t].lexer.begin, parameter[constant[preproc]]]
keyword[def] identifier[t_INITIAL_SHARP] ( identifier[self] , identifier[t] ): literal[string] keyword[if] identifier[self] . identifier[find_column] ( identifier[t] )== literal[int] : identifier[t] . identifier[lexer] . identifier[begin] ( literal[string] ) keyword[else] : identifier[self] . identifier[t_INITIAL_preproc_error] ( identifier[t] )
def t_INITIAL_SHARP(self, t): """\\#""" if self.find_column(t) == 1: t.lexer.begin('preproc') # depends on [control=['if'], data=[]] else: self.t_INITIAL_preproc_error(t)
def add_to_inventory(self): """Adds lb IPs to stack inventory""" if self.lb_attrs: self.lb_attrs = self.consul.lb_details( self.lb_attrs[A.loadbalancer.ID] ) host = self.lb_attrs['virtualIps'][0]['address'] self.stack.add_lb_secgroup(self.name, [host], self.backend_port) self.stack.add_host( host, [self.name], self.lb_attrs )
def function[add_to_inventory, parameter[self]]: constant[Adds lb IPs to stack inventory] if name[self].lb_attrs begin[:] name[self].lb_attrs assign[=] call[name[self].consul.lb_details, parameter[call[name[self].lb_attrs][name[A].loadbalancer.ID]]] variable[host] assign[=] call[call[call[name[self].lb_attrs][constant[virtualIps]]][constant[0]]][constant[address]] call[name[self].stack.add_lb_secgroup, parameter[name[self].name, list[[<ast.Name object at 0x7da18ede66e0>]], name[self].backend_port]] call[name[self].stack.add_host, parameter[name[host], list[[<ast.Attribute object at 0x7da18ede5090>]], name[self].lb_attrs]]
keyword[def] identifier[add_to_inventory] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[lb_attrs] : identifier[self] . identifier[lb_attrs] = identifier[self] . identifier[consul] . identifier[lb_details] ( identifier[self] . identifier[lb_attrs] [ identifier[A] . identifier[loadbalancer] . identifier[ID] ] ) identifier[host] = identifier[self] . identifier[lb_attrs] [ literal[string] ][ literal[int] ][ literal[string] ] identifier[self] . identifier[stack] . identifier[add_lb_secgroup] ( identifier[self] . identifier[name] ,[ identifier[host] ], identifier[self] . identifier[backend_port] ) identifier[self] . identifier[stack] . identifier[add_host] ( identifier[host] , [ identifier[self] . identifier[name] ], identifier[self] . identifier[lb_attrs] )
def add_to_inventory(self): """Adds lb IPs to stack inventory""" if self.lb_attrs: self.lb_attrs = self.consul.lb_details(self.lb_attrs[A.loadbalancer.ID]) host = self.lb_attrs['virtualIps'][0]['address'] self.stack.add_lb_secgroup(self.name, [host], self.backend_port) self.stack.add_host(host, [self.name], self.lb_attrs) # depends on [control=['if'], data=[]]
def dict2str(self, d: Dict, joiner: str) -> str: """ Convert dict to str as input for tokenizer Args: d (dict): dict for converting joiner (str): join the elements using this string to separate them. Returns: the value of the dict as a string """ result = str() for key in d: result = result + str(key) + " : " if isinstance(d[key], list): result = result + self.list2str(d[key], joiner) + joiner elif isinstance(d[key], dict): result = result + self.dict2str(d[key], joiner) + joiner elif d[key]: result = result + str(d[key]) + joiner return result
def function[dict2str, parameter[self, d, joiner]]: constant[ Convert dict to str as input for tokenizer Args: d (dict): dict for converting joiner (str): join the elements using this string to separate them. Returns: the value of the dict as a string ] variable[result] assign[=] call[name[str], parameter[]] for taget[name[key]] in starred[name[d]] begin[:] variable[result] assign[=] binary_operation[binary_operation[name[result] + call[name[str], parameter[name[key]]]] + constant[ : ]] if call[name[isinstance], parameter[call[name[d]][name[key]], name[list]]] begin[:] variable[result] assign[=] binary_operation[binary_operation[name[result] + call[name[self].list2str, parameter[call[name[d]][name[key]], name[joiner]]]] + name[joiner]] return[name[result]]
keyword[def] identifier[dict2str] ( identifier[self] , identifier[d] : identifier[Dict] , identifier[joiner] : identifier[str] )-> identifier[str] : literal[string] identifier[result] = identifier[str] () keyword[for] identifier[key] keyword[in] identifier[d] : identifier[result] = identifier[result] + identifier[str] ( identifier[key] )+ literal[string] keyword[if] identifier[isinstance] ( identifier[d] [ identifier[key] ], identifier[list] ): identifier[result] = identifier[result] + identifier[self] . identifier[list2str] ( identifier[d] [ identifier[key] ], identifier[joiner] )+ identifier[joiner] keyword[elif] identifier[isinstance] ( identifier[d] [ identifier[key] ], identifier[dict] ): identifier[result] = identifier[result] + identifier[self] . identifier[dict2str] ( identifier[d] [ identifier[key] ], identifier[joiner] )+ identifier[joiner] keyword[elif] identifier[d] [ identifier[key] ]: identifier[result] = identifier[result] + identifier[str] ( identifier[d] [ identifier[key] ])+ identifier[joiner] keyword[return] identifier[result]
def dict2str(self, d: Dict, joiner: str) -> str: """ Convert dict to str as input for tokenizer Args: d (dict): dict for converting joiner (str): join the elements using this string to separate them. Returns: the value of the dict as a string """ result = str() for key in d: result = result + str(key) + ' : ' if isinstance(d[key], list): result = result + self.list2str(d[key], joiner) + joiner # depends on [control=['if'], data=[]] elif isinstance(d[key], dict): result = result + self.dict2str(d[key], joiner) + joiner # depends on [control=['if'], data=[]] elif d[key]: result = result + str(d[key]) + joiner # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] return result
def DeserializeUnsigned(self, reader): """ Deserialize object. Args: reader (neo.IO.BinaryReader): Raises: Exception: if transaction type is incorrect. """ txtype = reader.ReadByte() if txtype != int.from_bytes(self.Type, 'little'): raise Exception('incorrect type {}, wanted {}'.format(txtype, int.from_bytes(self.Type, 'little'))) self.DeserializeUnsignedWithoutType(reader)
def function[DeserializeUnsigned, parameter[self, reader]]: constant[ Deserialize object. Args: reader (neo.IO.BinaryReader): Raises: Exception: if transaction type is incorrect. ] variable[txtype] assign[=] call[name[reader].ReadByte, parameter[]] if compare[name[txtype] not_equal[!=] call[name[int].from_bytes, parameter[name[self].Type, constant[little]]]] begin[:] <ast.Raise object at 0x7da204620400> call[name[self].DeserializeUnsignedWithoutType, parameter[name[reader]]]
keyword[def] identifier[DeserializeUnsigned] ( identifier[self] , identifier[reader] ): literal[string] identifier[txtype] = identifier[reader] . identifier[ReadByte] () keyword[if] identifier[txtype] != identifier[int] . identifier[from_bytes] ( identifier[self] . identifier[Type] , literal[string] ): keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[txtype] , identifier[int] . identifier[from_bytes] ( identifier[self] . identifier[Type] , literal[string] ))) identifier[self] . identifier[DeserializeUnsignedWithoutType] ( identifier[reader] )
def DeserializeUnsigned(self, reader): """ Deserialize object. Args: reader (neo.IO.BinaryReader): Raises: Exception: if transaction type is incorrect. """ txtype = reader.ReadByte() if txtype != int.from_bytes(self.Type, 'little'): raise Exception('incorrect type {}, wanted {}'.format(txtype, int.from_bytes(self.Type, 'little'))) # depends on [control=['if'], data=['txtype']] self.DeserializeUnsignedWithoutType(reader)