code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def treble(self): """int: The speaker's treble EQ. An integer between -10 and 10. """ response = self.renderingControl.GetTreble([ ('InstanceID', 0), ('Channel', 'Master'), ]) treble = response['CurrentTreble'] return int(treble)
int: The speaker's treble EQ. An integer between -10 and 10.
Below is the the instruction that describes the task: ### Input: int: The speaker's treble EQ. An integer between -10 and 10. ### Response: def treble(self): """int: The speaker's treble EQ. An integer between -10 and 10. """ response = self.renderingControl.GetTreble([ ('InstanceID', 0), ('Channel', 'Master'), ]) treble = response['CurrentTreble'] return int(treble)
def logstop(self): """Fully stop logging and close log file. In order to start logging again, a new logstart() call needs to be made, possibly (though not necessarily) with a new filename, mode and other options.""" if self.logfile is not None: self.logfile.close() self.logfile = None else: print "Logging hadn't been started." self.log_active = False
Fully stop logging and close log file. In order to start logging again, a new logstart() call needs to be made, possibly (though not necessarily) with a new filename, mode and other options.
Below is the the instruction that describes the task: ### Input: Fully stop logging and close log file. In order to start logging again, a new logstart() call needs to be made, possibly (though not necessarily) with a new filename, mode and other options. ### Response: def logstop(self): """Fully stop logging and close log file. In order to start logging again, a new logstart() call needs to be made, possibly (though not necessarily) with a new filename, mode and other options.""" if self.logfile is not None: self.logfile.close() self.logfile = None else: print "Logging hadn't been started." self.log_active = False
def get_label_at_address(self, address, offset = None): """ Creates a label from the given memory address. If the address belongs to the module, the label is made relative to it's base address. @type address: int @param address: Memory address. @type offset: None or int @param offset: (Optional) Offset value. @rtype: str @return: Label pointing to the given address. """ # Add the offset to the address. if offset: address = address + offset # Make the label relative to the base address if no match is found. module = self.get_name() function = None offset = address - self.get_base() # Make the label relative to the entrypoint if no other match is found. # Skip if the entry point is unknown. start = self.get_entry_point() if start and start <= address: function = "start" offset = address - start # Enumerate exported functions and debug symbols, # then find the closest match, if possible. try: symbol = self.get_symbol_at_address(address) if symbol: (SymbolName, SymbolAddress, SymbolSize) = symbol new_offset = address - SymbolAddress if new_offset <= offset: function = SymbolName offset = new_offset except WindowsError: pass # Parse the label and return it. return _ModuleContainer.parse_label(module, function, offset)
Creates a label from the given memory address. If the address belongs to the module, the label is made relative to it's base address. @type address: int @param address: Memory address. @type offset: None or int @param offset: (Optional) Offset value. @rtype: str @return: Label pointing to the given address.
Below is the the instruction that describes the task: ### Input: Creates a label from the given memory address. If the address belongs to the module, the label is made relative to it's base address. @type address: int @param address: Memory address. @type offset: None or int @param offset: (Optional) Offset value. @rtype: str @return: Label pointing to the given address. ### Response: def get_label_at_address(self, address, offset = None): """ Creates a label from the given memory address. If the address belongs to the module, the label is made relative to it's base address. @type address: int @param address: Memory address. @type offset: None or int @param offset: (Optional) Offset value. @rtype: str @return: Label pointing to the given address. """ # Add the offset to the address. if offset: address = address + offset # Make the label relative to the base address if no match is found. module = self.get_name() function = None offset = address - self.get_base() # Make the label relative to the entrypoint if no other match is found. # Skip if the entry point is unknown. start = self.get_entry_point() if start and start <= address: function = "start" offset = address - start # Enumerate exported functions and debug symbols, # then find the closest match, if possible. try: symbol = self.get_symbol_at_address(address) if symbol: (SymbolName, SymbolAddress, SymbolSize) = symbol new_offset = address - SymbolAddress if new_offset <= offset: function = SymbolName offset = new_offset except WindowsError: pass # Parse the label and return it. return _ModuleContainer.parse_label(module, function, offset)
def user(ctx, username, password): """[GROUP] User management operations""" ctx.obj['username'] = username ctx.obj['password'] = password
[GROUP] User management operations
Below is the the instruction that describes the task: ### Input: [GROUP] User management operations ### Response: def user(ctx, username, password): """[GROUP] User management operations""" ctx.obj['username'] = username ctx.obj['password'] = password
def args_range(min_value, max_value, *args): """ 检查参数范围 """ not_null(*args) if not all(map(lambda v: min_value <= v <= max_value, args)): raise ValueError("Argument must be between {0} and {1}!".format(min_value, max_value))
检查参数范围
Below is the the instruction that describes the task: ### Input: 检查参数范围 ### Response: def args_range(min_value, max_value, *args): """ 检查参数范围 """ not_null(*args) if not all(map(lambda v: min_value <= v <= max_value, args)): raise ValueError("Argument must be between {0} and {1}!".format(min_value, max_value))
def valid(number): """ Returns true if the number string is luhn valid, and false otherwise. The number string passed to the function must contain only numeric characters otherwise behavior is undefined. """ checksum = 0 number_len = len(number) offset = ord('0') i = number_len - 1 while i >= 0: n = ord(number[i]) - offset checksum += n i -= 2 i = number_len - 2 while i >= 0: n = ord(number[i]) - offset n *= 2 if n > 9: n -= 9 checksum += n i -= 2 return checksum%10 == 0
Returns true if the number string is luhn valid, and false otherwise. The number string passed to the function must contain only numeric characters otherwise behavior is undefined.
Below is the the instruction that describes the task: ### Input: Returns true if the number string is luhn valid, and false otherwise. The number string passed to the function must contain only numeric characters otherwise behavior is undefined. ### Response: def valid(number): """ Returns true if the number string is luhn valid, and false otherwise. The number string passed to the function must contain only numeric characters otherwise behavior is undefined. """ checksum = 0 number_len = len(number) offset = ord('0') i = number_len - 1 while i >= 0: n = ord(number[i]) - offset checksum += n i -= 2 i = number_len - 2 while i >= 0: n = ord(number[i]) - offset n *= 2 if n > 9: n -= 9 checksum += n i -= 2 return checksum%10 == 0
def bokeh_shot_chart(data, x="LOC_X", y="LOC_Y", fill_color="#1f77b4", scatter_size=10, fill_alpha=0.4, line_alpha=0.4, court_line_color='gray', court_line_width=1, hover_tool=False, tooltips=None, **kwargs): # TODO: Settings for hover tooltip """ Returns a figure with both FGA and basketball court lines drawn onto it. This function expects data to be a ColumnDataSource with the x and y values named "LOC_X" and "LOC_Y". Otherwise specify x and y. Parameters ---------- data : DataFrame The DataFrame that contains the shot chart data. x, y : str, optional The x and y coordinates of the shots taken. fill_color : str, optional The fill color of the shots. Can be a a Hex value. scatter_size : int, optional The size of the dots for the scatter plot. fill_alpha : float, optional Alpha value for the shots. Must be a floating point value between 0 (transparent) to 1 (opaque). line_alpha : float, optiona Alpha value for the outer lines of the plotted shots. Must be a floating point value between 0 (transparent) to 1 (opaque). court_line_color : str, optional The color of the court lines. Can be a a Hex value. court_line_width : float, optional The linewidth the of the court lines in pixels. hover_tool : boolean, optional If ``True``, creates hover tooltip for the plot. tooltips : List of tuples, optional Provides the information for the the hover tooltip. Returns ------- fig : Figure The Figure object with the shot chart plotted on it. """ source = ColumnDataSource(data) fig = figure(width=700, height=658, x_range=[-250, 250], y_range=[422.5, -47.5], min_border=0, x_axis_type=None, y_axis_type=None, outline_line_color="black", **kwargs) fig.scatter(x, y, source=source, size=scatter_size, color=fill_color, alpha=fill_alpha, line_alpha=line_alpha) bokeh_draw_court(fig, line_color=court_line_color, line_width=court_line_width) if hover_tool: hover = HoverTool(renderers=[fig.renderers[0]], tooltips=tooltips) fig.add_tools(hover) return fig
Returns a figure with both FGA and basketball court lines drawn onto it. This function expects data to be a ColumnDataSource with the x and y values named "LOC_X" and "LOC_Y". Otherwise specify x and y. Parameters ---------- data : DataFrame The DataFrame that contains the shot chart data. x, y : str, optional The x and y coordinates of the shots taken. fill_color : str, optional The fill color of the shots. Can be a a Hex value. scatter_size : int, optional The size of the dots for the scatter plot. fill_alpha : float, optional Alpha value for the shots. Must be a floating point value between 0 (transparent) to 1 (opaque). line_alpha : float, optiona Alpha value for the outer lines of the plotted shots. Must be a floating point value between 0 (transparent) to 1 (opaque). court_line_color : str, optional The color of the court lines. Can be a a Hex value. court_line_width : float, optional The linewidth the of the court lines in pixels. hover_tool : boolean, optional If ``True``, creates hover tooltip for the plot. tooltips : List of tuples, optional Provides the information for the the hover tooltip. Returns ------- fig : Figure The Figure object with the shot chart plotted on it.
Below is the the instruction that describes the task: ### Input: Returns a figure with both FGA and basketball court lines drawn onto it. This function expects data to be a ColumnDataSource with the x and y values named "LOC_X" and "LOC_Y". Otherwise specify x and y. Parameters ---------- data : DataFrame The DataFrame that contains the shot chart data. x, y : str, optional The x and y coordinates of the shots taken. fill_color : str, optional The fill color of the shots. Can be a a Hex value. scatter_size : int, optional The size of the dots for the scatter plot. fill_alpha : float, optional Alpha value for the shots. Must be a floating point value between 0 (transparent) to 1 (opaque). line_alpha : float, optiona Alpha value for the outer lines of the plotted shots. Must be a floating point value between 0 (transparent) to 1 (opaque). court_line_color : str, optional The color of the court lines. Can be a a Hex value. court_line_width : float, optional The linewidth the of the court lines in pixels. hover_tool : boolean, optional If ``True``, creates hover tooltip for the plot. tooltips : List of tuples, optional Provides the information for the the hover tooltip. Returns ------- fig : Figure The Figure object with the shot chart plotted on it. ### Response: def bokeh_shot_chart(data, x="LOC_X", y="LOC_Y", fill_color="#1f77b4", scatter_size=10, fill_alpha=0.4, line_alpha=0.4, court_line_color='gray', court_line_width=1, hover_tool=False, tooltips=None, **kwargs): # TODO: Settings for hover tooltip """ Returns a figure with both FGA and basketball court lines drawn onto it. This function expects data to be a ColumnDataSource with the x and y values named "LOC_X" and "LOC_Y". Otherwise specify x and y. Parameters ---------- data : DataFrame The DataFrame that contains the shot chart data. x, y : str, optional The x and y coordinates of the shots taken. fill_color : str, optional The fill color of the shots. Can be a a Hex value. scatter_size : int, optional The size of the dots for the scatter plot. fill_alpha : float, optional Alpha value for the shots. Must be a floating point value between 0 (transparent) to 1 (opaque). line_alpha : float, optiona Alpha value for the outer lines of the plotted shots. Must be a floating point value between 0 (transparent) to 1 (opaque). court_line_color : str, optional The color of the court lines. Can be a a Hex value. court_line_width : float, optional The linewidth the of the court lines in pixels. hover_tool : boolean, optional If ``True``, creates hover tooltip for the plot. tooltips : List of tuples, optional Provides the information for the the hover tooltip. Returns ------- fig : Figure The Figure object with the shot chart plotted on it. """ source = ColumnDataSource(data) fig = figure(width=700, height=658, x_range=[-250, 250], y_range=[422.5, -47.5], min_border=0, x_axis_type=None, y_axis_type=None, outline_line_color="black", **kwargs) fig.scatter(x, y, source=source, size=scatter_size, color=fill_color, alpha=fill_alpha, line_alpha=line_alpha) bokeh_draw_court(fig, line_color=court_line_color, line_width=court_line_width) if hover_tool: hover = HoverTool(renderers=[fig.renderers[0]], tooltips=tooltips) fig.add_tools(hover) return fig
def reverseCommit(self): """ Remove the inserted character(s). """ # Move the cursor to the right of the text to delete. tc = self.qteWidget.textCursor() # Delete as many characters as necessary. For an image that would # be exactly 1 even though the HTML code to embed that image is usually # longer. For text, it would be as many characters as the pasted text # was long. if self.isImage: dataLen = 1 else: dataLen = len(self.data) tc.setPosition(self.selStart + dataLen, QtGui.QTextCursor.MoveAnchor) for ii in range(dataLen): tc.deletePreviousChar() # Add the previously selected text (this may be none). tc.insertHtml(self.selText) self.qteWidget.setTextCursor(tc)
Remove the inserted character(s).
Below is the the instruction that describes the task: ### Input: Remove the inserted character(s). ### Response: def reverseCommit(self): """ Remove the inserted character(s). """ # Move the cursor to the right of the text to delete. tc = self.qteWidget.textCursor() # Delete as many characters as necessary. For an image that would # be exactly 1 even though the HTML code to embed that image is usually # longer. For text, it would be as many characters as the pasted text # was long. if self.isImage: dataLen = 1 else: dataLen = len(self.data) tc.setPosition(self.selStart + dataLen, QtGui.QTextCursor.MoveAnchor) for ii in range(dataLen): tc.deletePreviousChar() # Add the previously selected text (this may be none). tc.insertHtml(self.selText) self.qteWidget.setTextCursor(tc)
def add_decoherence_noise(prog, T1=30e-6, T2=30e-6, gate_time_1q=50e-9, gate_time_2q=150e-09, ro_fidelity=0.95): """ Add generic damping and dephasing noise to a program. This high-level function is provided as a convenience to investigate the effects of a generic noise model on a program. For more fine-grained control, please investigate the other methods available in the ``pyquil.noise`` module. In an attempt to closely model the QPU, noisy versions of RX(+-pi/2) and CZ are provided; I and parametric RZ are noiseless, and other gates are not allowed. To use this function, you need to compile your program to this native gate set. The default noise parameters - T1 = 30 us - T2 = 30 us - 1q gate time = 50 ns - 2q gate time = 150 ns are currently typical for near-term devices. This function will define new gates and add Kraus noise to these gates. It will translate the input program to use the noisy version of the gates. :param prog: A pyquil program consisting of I, RZ, CZ, and RX(+-pi/2) instructions :param Union[Dict[int,float],float] T1: The T1 amplitude damping time either globally or in a dictionary indexed by qubit id. By default, this is 30 us. :param Union[Dict[int,float],float] T2: The T2 dephasing time either globally or in a dictionary indexed by qubit id. By default, this is also 30 us. :param float gate_time_1q: The duration of the one-qubit gates, namely RX(+pi/2) and RX(-pi/2). By default, this is 50 ns. :param float gate_time_2q: The duration of the two-qubit gates, namely CZ. By default, this is 150 ns. :param Union[Dict[int,float],float] ro_fidelity: The readout assignment fidelity :math:`F = (p(0|0) + p(1|1))/2` either globally or in a dictionary indexed by qubit id. :return: A new program with noisy operators. """ gates = _get_program_gates(prog) noise_model = _decoherence_noise_model( gates, T1=T1, T2=T2, gate_time_1q=gate_time_1q, gate_time_2q=gate_time_2q, ro_fidelity=ro_fidelity ) return apply_noise_model(prog, noise_model)
Add generic damping and dephasing noise to a program. This high-level function is provided as a convenience to investigate the effects of a generic noise model on a program. For more fine-grained control, please investigate the other methods available in the ``pyquil.noise`` module. In an attempt to closely model the QPU, noisy versions of RX(+-pi/2) and CZ are provided; I and parametric RZ are noiseless, and other gates are not allowed. To use this function, you need to compile your program to this native gate set. The default noise parameters - T1 = 30 us - T2 = 30 us - 1q gate time = 50 ns - 2q gate time = 150 ns are currently typical for near-term devices. This function will define new gates and add Kraus noise to these gates. It will translate the input program to use the noisy version of the gates. :param prog: A pyquil program consisting of I, RZ, CZ, and RX(+-pi/2) instructions :param Union[Dict[int,float],float] T1: The T1 amplitude damping time either globally or in a dictionary indexed by qubit id. By default, this is 30 us. :param Union[Dict[int,float],float] T2: The T2 dephasing time either globally or in a dictionary indexed by qubit id. By default, this is also 30 us. :param float gate_time_1q: The duration of the one-qubit gates, namely RX(+pi/2) and RX(-pi/2). By default, this is 50 ns. :param float gate_time_2q: The duration of the two-qubit gates, namely CZ. By default, this is 150 ns. :param Union[Dict[int,float],float] ro_fidelity: The readout assignment fidelity :math:`F = (p(0|0) + p(1|1))/2` either globally or in a dictionary indexed by qubit id. :return: A new program with noisy operators.
Below is the the instruction that describes the task: ### Input: Add generic damping and dephasing noise to a program. This high-level function is provided as a convenience to investigate the effects of a generic noise model on a program. For more fine-grained control, please investigate the other methods available in the ``pyquil.noise`` module. In an attempt to closely model the QPU, noisy versions of RX(+-pi/2) and CZ are provided; I and parametric RZ are noiseless, and other gates are not allowed. To use this function, you need to compile your program to this native gate set. The default noise parameters - T1 = 30 us - T2 = 30 us - 1q gate time = 50 ns - 2q gate time = 150 ns are currently typical for near-term devices. This function will define new gates and add Kraus noise to these gates. It will translate the input program to use the noisy version of the gates. :param prog: A pyquil program consisting of I, RZ, CZ, and RX(+-pi/2) instructions :param Union[Dict[int,float],float] T1: The T1 amplitude damping time either globally or in a dictionary indexed by qubit id. By default, this is 30 us. :param Union[Dict[int,float],float] T2: The T2 dephasing time either globally or in a dictionary indexed by qubit id. By default, this is also 30 us. :param float gate_time_1q: The duration of the one-qubit gates, namely RX(+pi/2) and RX(-pi/2). By default, this is 50 ns. :param float gate_time_2q: The duration of the two-qubit gates, namely CZ. By default, this is 150 ns. :param Union[Dict[int,float],float] ro_fidelity: The readout assignment fidelity :math:`F = (p(0|0) + p(1|1))/2` either globally or in a dictionary indexed by qubit id. :return: A new program with noisy operators. ### Response: def add_decoherence_noise(prog, T1=30e-6, T2=30e-6, gate_time_1q=50e-9, gate_time_2q=150e-09, ro_fidelity=0.95): """ Add generic damping and dephasing noise to a program. This high-level function is provided as a convenience to investigate the effects of a generic noise model on a program. For more fine-grained control, please investigate the other methods available in the ``pyquil.noise`` module. In an attempt to closely model the QPU, noisy versions of RX(+-pi/2) and CZ are provided; I and parametric RZ are noiseless, and other gates are not allowed. To use this function, you need to compile your program to this native gate set. The default noise parameters - T1 = 30 us - T2 = 30 us - 1q gate time = 50 ns - 2q gate time = 150 ns are currently typical for near-term devices. This function will define new gates and add Kraus noise to these gates. It will translate the input program to use the noisy version of the gates. :param prog: A pyquil program consisting of I, RZ, CZ, and RX(+-pi/2) instructions :param Union[Dict[int,float],float] T1: The T1 amplitude damping time either globally or in a dictionary indexed by qubit id. By default, this is 30 us. :param Union[Dict[int,float],float] T2: The T2 dephasing time either globally or in a dictionary indexed by qubit id. By default, this is also 30 us. :param float gate_time_1q: The duration of the one-qubit gates, namely RX(+pi/2) and RX(-pi/2). By default, this is 50 ns. :param float gate_time_2q: The duration of the two-qubit gates, namely CZ. By default, this is 150 ns. :param Union[Dict[int,float],float] ro_fidelity: The readout assignment fidelity :math:`F = (p(0|0) + p(1|1))/2` either globally or in a dictionary indexed by qubit id. :return: A new program with noisy operators. """ gates = _get_program_gates(prog) noise_model = _decoherence_noise_model( gates, T1=T1, T2=T2, gate_time_1q=gate_time_1q, gate_time_2q=gate_time_2q, ro_fidelity=ro_fidelity ) return apply_noise_model(prog, noise_model)
def _configure_frozen_scoop(kwargs): """Wrapper function that configures a frozen SCOOP set up. Deletes of data if necessary. """ def _delete_old_scoop_rev_data(old_scoop_rev): if old_scoop_rev is not None: try: elements = shared.elements for key in elements: var_dict = elements[key] if old_scoop_rev in var_dict: del var_dict[old_scoop_rev] logging.getLogger('pypet.scoop').debug('Deleted old SCOOP data from ' 'revolution `%s`.' % old_scoop_rev) except AttributeError: logging.getLogger('pypet.scoop').error('Could not delete old SCOOP data from ' 'revolution `%s`.' % old_scoop_rev) scoop_rev = kwargs.pop('scoop_rev') # Check if we need to reconfigure SCOOP try: old_scoop_rev = _frozen_scoop_single_run.kwargs['scoop_rev'] configured = old_scoop_rev == scoop_rev except (AttributeError, KeyError): old_scoop_rev = None configured = False if not configured: _frozen_scoop_single_run.kwargs = shared.getConst(scoop_rev, timeout=424.2) frozen_kwargs = _frozen_scoop_single_run.kwargs frozen_kwargs['scoop_rev'] = scoop_rev frozen_kwargs['traj'].v_full_copy = frozen_kwargs['full_copy'] if not scoop.IS_ORIGIN: _configure_niceness(frozen_kwargs) _configure_logging(frozen_kwargs, extract=False) _delete_old_scoop_rev_data(old_scoop_rev) logging.getLogger('pypet.scoop').info('Configured Worker %s' % str(scoop.worker))
Wrapper function that configures a frozen SCOOP set up. Deletes of data if necessary.
Below is the the instruction that describes the task: ### Input: Wrapper function that configures a frozen SCOOP set up. Deletes of data if necessary. ### Response: def _configure_frozen_scoop(kwargs): """Wrapper function that configures a frozen SCOOP set up. Deletes of data if necessary. """ def _delete_old_scoop_rev_data(old_scoop_rev): if old_scoop_rev is not None: try: elements = shared.elements for key in elements: var_dict = elements[key] if old_scoop_rev in var_dict: del var_dict[old_scoop_rev] logging.getLogger('pypet.scoop').debug('Deleted old SCOOP data from ' 'revolution `%s`.' % old_scoop_rev) except AttributeError: logging.getLogger('pypet.scoop').error('Could not delete old SCOOP data from ' 'revolution `%s`.' % old_scoop_rev) scoop_rev = kwargs.pop('scoop_rev') # Check if we need to reconfigure SCOOP try: old_scoop_rev = _frozen_scoop_single_run.kwargs['scoop_rev'] configured = old_scoop_rev == scoop_rev except (AttributeError, KeyError): old_scoop_rev = None configured = False if not configured: _frozen_scoop_single_run.kwargs = shared.getConst(scoop_rev, timeout=424.2) frozen_kwargs = _frozen_scoop_single_run.kwargs frozen_kwargs['scoop_rev'] = scoop_rev frozen_kwargs['traj'].v_full_copy = frozen_kwargs['full_copy'] if not scoop.IS_ORIGIN: _configure_niceness(frozen_kwargs) _configure_logging(frozen_kwargs, extract=False) _delete_old_scoop_rev_data(old_scoop_rev) logging.getLogger('pypet.scoop').info('Configured Worker %s' % str(scoop.worker))
def atlas_rank_peers_by_data_availability( peer_list=None, peer_table=None, local_inv=None, con=None, path=None ): """ Get a ranking of peers to contact for a zonefile. Peers are ranked by the number of zonefiles they have which we don't have. This is used to select neighbors. """ with AtlasPeerTableLocked(peer_table) as ptbl: if peer_list is None: peer_list = ptbl.keys()[:] if local_inv is None: # what's my inventory? inv_len = atlasdb_zonefile_inv_length( con=con, path=path ) local_inv = atlas_make_zonefile_inventory( 0, inv_len, con=con, path=path ) peer_availability_ranking = [] # (health score, peer hostport) for peer_hostport in peer_list: peer_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl ) # ignore peers that we don't have an inventory for if len(peer_inv) == 0: continue availability_score = atlas_inventory_count_missing( local_inv, peer_inv ) peer_availability_ranking.append( (availability_score, peer_hostport) ) # sort on availability peer_availability_ranking.sort() peer_availability_ranking.reverse() return [peer_hp for _, peer_hp in peer_availability_ranking]
Get a ranking of peers to contact for a zonefile. Peers are ranked by the number of zonefiles they have which we don't have. This is used to select neighbors.
Below is the the instruction that describes the task: ### Input: Get a ranking of peers to contact for a zonefile. Peers are ranked by the number of zonefiles they have which we don't have. This is used to select neighbors. ### Response: def atlas_rank_peers_by_data_availability( peer_list=None, peer_table=None, local_inv=None, con=None, path=None ): """ Get a ranking of peers to contact for a zonefile. Peers are ranked by the number of zonefiles they have which we don't have. This is used to select neighbors. """ with AtlasPeerTableLocked(peer_table) as ptbl: if peer_list is None: peer_list = ptbl.keys()[:] if local_inv is None: # what's my inventory? inv_len = atlasdb_zonefile_inv_length( con=con, path=path ) local_inv = atlas_make_zonefile_inventory( 0, inv_len, con=con, path=path ) peer_availability_ranking = [] # (health score, peer hostport) for peer_hostport in peer_list: peer_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl ) # ignore peers that we don't have an inventory for if len(peer_inv) == 0: continue availability_score = atlas_inventory_count_missing( local_inv, peer_inv ) peer_availability_ranking.append( (availability_score, peer_hostport) ) # sort on availability peer_availability_ranking.sort() peer_availability_ranking.reverse() return [peer_hp for _, peer_hp in peer_availability_ranking]
def get_upcoming_events(self): """ Get upcoming PythonKC meetup events. Returns ------- List of ``pythonkc_meetups.types.MeetupEvent``, ordered by event time, ascending. Exceptions ---------- * PythonKCMeetupsBadJson * PythonKCMeetupsBadResponse * PythonKCMeetupsMeetupDown * PythonKCMeetupsNotJson * PythonKCMeetupsRateLimitExceeded """ query = urllib.urlencode({'key': self._api_key, 'group_urlname': GROUP_URLNAME}) url = '{0}?{1}'.format(EVENTS_URL, query) data = self._http_get_json(url) events = data['results'] return [parse_event(event) for event in events]
Get upcoming PythonKC meetup events. Returns ------- List of ``pythonkc_meetups.types.MeetupEvent``, ordered by event time, ascending. Exceptions ---------- * PythonKCMeetupsBadJson * PythonKCMeetupsBadResponse * PythonKCMeetupsMeetupDown * PythonKCMeetupsNotJson * PythonKCMeetupsRateLimitExceeded
Below is the the instruction that describes the task: ### Input: Get upcoming PythonKC meetup events. Returns ------- List of ``pythonkc_meetups.types.MeetupEvent``, ordered by event time, ascending. Exceptions ---------- * PythonKCMeetupsBadJson * PythonKCMeetupsBadResponse * PythonKCMeetupsMeetupDown * PythonKCMeetupsNotJson * PythonKCMeetupsRateLimitExceeded ### Response: def get_upcoming_events(self): """ Get upcoming PythonKC meetup events. Returns ------- List of ``pythonkc_meetups.types.MeetupEvent``, ordered by event time, ascending. Exceptions ---------- * PythonKCMeetupsBadJson * PythonKCMeetupsBadResponse * PythonKCMeetupsMeetupDown * PythonKCMeetupsNotJson * PythonKCMeetupsRateLimitExceeded """ query = urllib.urlencode({'key': self._api_key, 'group_urlname': GROUP_URLNAME}) url = '{0}?{1}'.format(EVENTS_URL, query) data = self._http_get_json(url) events = data['results'] return [parse_event(event) for event in events]
def nanmean(values, axis=None, skipna=True, mask=None): """ Compute the mean of the element along an axis ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanmean(s) 1.5 """ values, mask, dtype, dtype_max, _ = _get_values( values, skipna, 0, mask=mask) dtype_sum = dtype_max dtype_count = np.float64 if (is_integer_dtype(dtype) or is_timedelta64_dtype(dtype) or is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype)): dtype_sum = np.float64 elif is_float_dtype(dtype): dtype_sum = dtype dtype_count = dtype count = _get_counts(mask, axis, dtype=dtype_count) the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum)) if axis is not None and getattr(the_sum, 'ndim', False): with np.errstate(all="ignore"): # suppress division by zero warnings the_mean = the_sum / count ct_mask = count == 0 if ct_mask.any(): the_mean[ct_mask] = np.nan else: the_mean = the_sum / count if count > 0 else np.nan return _wrap_results(the_mean, dtype)
Compute the mean of the element along an axis ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanmean(s) 1.5
Below is the the instruction that describes the task: ### Input: Compute the mean of the element along an axis ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanmean(s) 1.5 ### Response: def nanmean(values, axis=None, skipna=True, mask=None): """ Compute the mean of the element along an axis ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanmean(s) 1.5 """ values, mask, dtype, dtype_max, _ = _get_values( values, skipna, 0, mask=mask) dtype_sum = dtype_max dtype_count = np.float64 if (is_integer_dtype(dtype) or is_timedelta64_dtype(dtype) or is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype)): dtype_sum = np.float64 elif is_float_dtype(dtype): dtype_sum = dtype dtype_count = dtype count = _get_counts(mask, axis, dtype=dtype_count) the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum)) if axis is not None and getattr(the_sum, 'ndim', False): with np.errstate(all="ignore"): # suppress division by zero warnings the_mean = the_sum / count ct_mask = count == 0 if ct_mask.any(): the_mean[ct_mask] = np.nan else: the_mean = the_sum / count if count > 0 else np.nan return _wrap_results(the_mean, dtype)
def update_product(product_id, **kwargs): """ Update a Product with new information """ content = update_product_raw(product_id, **kwargs) if content: return utils.format_json(content)
Update a Product with new information
Below is the the instruction that describes the task: ### Input: Update a Product with new information ### Response: def update_product(product_id, **kwargs): """ Update a Product with new information """ content = update_product_raw(product_id, **kwargs) if content: return utils.format_json(content)
def is_entailed_by(self, other): """ Means merging other with self does not produce any new information. """ if not set(self.include.keys()).issubset(set(other.include.keys())): return False if not self.exclude.isuperset(other.exclude): return False if not self.prototype.is_entailed_by(other.prototype): return False return True
Means merging other with self does not produce any new information.
Below is the the instruction that describes the task: ### Input: Means merging other with self does not produce any new information. ### Response: def is_entailed_by(self, other): """ Means merging other with self does not produce any new information. """ if not set(self.include.keys()).issubset(set(other.include.keys())): return False if not self.exclude.isuperset(other.exclude): return False if not self.prototype.is_entailed_by(other.prototype): return False return True
def filter_not_t(func): """ Transformation for Sequence.filter_not :param func: filter_not function :return: transformation """ return Transformation('filter_not({0})'.format(name(func)), partial(six.moves.filterfalse, func), {ExecutionStrategies.PARALLEL})
Transformation for Sequence.filter_not :param func: filter_not function :return: transformation
Below is the the instruction that describes the task: ### Input: Transformation for Sequence.filter_not :param func: filter_not function :return: transformation ### Response: def filter_not_t(func): """ Transformation for Sequence.filter_not :param func: filter_not function :return: transformation """ return Transformation('filter_not({0})'.format(name(func)), partial(six.moves.filterfalse, func), {ExecutionStrategies.PARALLEL})
def get_data(latitude=52.091579, longitude=5.119734, usexml=False): """Get buienradar xml data and return results.""" if usexml: log.info("Getting buienradar XML data for latitude=%s, longitude=%s", latitude, longitude) return get_xml_data(latitude, longitude) else: log.info("Getting buienradar JSON data for latitude=%s, longitude=%s", latitude, longitude) return get_json_data(latitude, longitude)
Get buienradar xml data and return results.
Below is the the instruction that describes the task: ### Input: Get buienradar xml data and return results. ### Response: def get_data(latitude=52.091579, longitude=5.119734, usexml=False): """Get buienradar xml data and return results.""" if usexml: log.info("Getting buienradar XML data for latitude=%s, longitude=%s", latitude, longitude) return get_xml_data(latitude, longitude) else: log.info("Getting buienradar JSON data for latitude=%s, longitude=%s", latitude, longitude) return get_json_data(latitude, longitude)
def _set_sharing_keys(self, keys): """ Set the keys to share or unshare Parameters ---------- keys: string or iterable of strings The iterable may contain formatoptions that shall be shared (or unshared), or group names of formatoptions to share all formatoptions of that group (see the :attr:`fmt_groups` property). If None, all formatoptions of this plotter are inserted. Returns ------- set The set of formatoptions to share (or unshare)""" if isinstance(keys, str): keys = {keys} keys = set(self) if keys is None else set(keys) fmto_groups = self._fmto_groups keys.update(chain(*(map(lambda fmto: fmto.key, fmto_groups[key]) for key in keys.intersection(fmto_groups)))) keys.difference_update(fmto_groups) return keys
Set the keys to share or unshare Parameters ---------- keys: string or iterable of strings The iterable may contain formatoptions that shall be shared (or unshared), or group names of formatoptions to share all formatoptions of that group (see the :attr:`fmt_groups` property). If None, all formatoptions of this plotter are inserted. Returns ------- set The set of formatoptions to share (or unshare)
Below is the the instruction that describes the task: ### Input: Set the keys to share or unshare Parameters ---------- keys: string or iterable of strings The iterable may contain formatoptions that shall be shared (or unshared), or group names of formatoptions to share all formatoptions of that group (see the :attr:`fmt_groups` property). If None, all formatoptions of this plotter are inserted. Returns ------- set The set of formatoptions to share (or unshare) ### Response: def _set_sharing_keys(self, keys): """ Set the keys to share or unshare Parameters ---------- keys: string or iterable of strings The iterable may contain formatoptions that shall be shared (or unshared), or group names of formatoptions to share all formatoptions of that group (see the :attr:`fmt_groups` property). If None, all formatoptions of this plotter are inserted. Returns ------- set The set of formatoptions to share (or unshare)""" if isinstance(keys, str): keys = {keys} keys = set(self) if keys is None else set(keys) fmto_groups = self._fmto_groups keys.update(chain(*(map(lambda fmto: fmto.key, fmto_groups[key]) for key in keys.intersection(fmto_groups)))) keys.difference_update(fmto_groups) return keys
def get_all_kernel_specs_for_envs(self): """Returns the dict of name -> kernel_spec for all environments""" data = self._get_env_data() return {name: data[name][1] for name in data}
Returns the dict of name -> kernel_spec for all environments
Below is the the instruction that describes the task: ### Input: Returns the dict of name -> kernel_spec for all environments ### Response: def get_all_kernel_specs_for_envs(self): """Returns the dict of name -> kernel_spec for all environments""" data = self._get_env_data() return {name: data[name][1] for name in data}
def _driver_signing_reg_reverse_conversion(cls, val, **kwargs): ''' converts the string value seen in the GUI to the correct registry value for secedit ''' if val is not None: if val.upper() == 'SILENTLY SUCCEED': return ','.join(['3', '0']) elif val.upper() == 'WARN BUT ALLOW INSTALLATION': return ','.join(['3', chr(1)]) elif val.upper() == 'DO NOT ALLOW INSTALLATION': return ','.join(['3', chr(2)]) else: return 'Invalid Value' else: return 'Not Defined'
converts the string value seen in the GUI to the correct registry value for secedit
Below is the the instruction that describes the task: ### Input: converts the string value seen in the GUI to the correct registry value for secedit ### Response: def _driver_signing_reg_reverse_conversion(cls, val, **kwargs): ''' converts the string value seen in the GUI to the correct registry value for secedit ''' if val is not None: if val.upper() == 'SILENTLY SUCCEED': return ','.join(['3', '0']) elif val.upper() == 'WARN BUT ALLOW INSTALLATION': return ','.join(['3', chr(1)]) elif val.upper() == 'DO NOT ALLOW INSTALLATION': return ','.join(['3', chr(2)]) else: return 'Invalid Value' else: return 'Not Defined'
def account_lists(self, id): """ Get all of the logged-in users lists which the specified user is a member of. Returns a list of `list dicts`_. """ id = self.__unpack_id(id) params = self.__generate_params(locals(), ['id']) url = '/api/v1/accounts/{0}/lists'.format(str(id)) return self.__api_request('GET', url, params)
Get all of the logged-in users lists which the specified user is a member of. Returns a list of `list dicts`_.
Below is the the instruction that describes the task: ### Input: Get all of the logged-in users lists which the specified user is a member of. Returns a list of `list dicts`_. ### Response: def account_lists(self, id): """ Get all of the logged-in users lists which the specified user is a member of. Returns a list of `list dicts`_. """ id = self.__unpack_id(id) params = self.__generate_params(locals(), ['id']) url = '/api/v1/accounts/{0}/lists'.format(str(id)) return self.__api_request('GET', url, params)
async def begin(request: web.Request) -> web.Response: """ Begin a session """ if None is not session_from_request(request): LOG.warning("begin: requested with active session") return web.json_response( data={'message': 'An update session is already active on this robot', 'error': 'session-already-active'}, status=409) session = UpdateSession( config.config_from_request(request).download_storage_path) request.app[SESSION_VARNAME] = session return web.json_response( data={'token': session.token}, status=201)
Begin a session
Below is the the instruction that describes the task: ### Input: Begin a session ### Response: async def begin(request: web.Request) -> web.Response: """ Begin a session """ if None is not session_from_request(request): LOG.warning("begin: requested with active session") return web.json_response( data={'message': 'An update session is already active on this robot', 'error': 'session-already-active'}, status=409) session = UpdateSession( config.config_from_request(request).download_storage_path) request.app[SESSION_VARNAME] = session return web.json_response( data={'token': session.token}, status=201)
def fromProfileName(cls, name): """Return a `SessionAPI` from a given configuration profile name. :see: `ProfileStore`. """ with profiles.ProfileStore.open() as config: return cls.fromProfile(config.load(name))
Return a `SessionAPI` from a given configuration profile name. :see: `ProfileStore`.
Below is the the instruction that describes the task: ### Input: Return a `SessionAPI` from a given configuration profile name. :see: `ProfileStore`. ### Response: def fromProfileName(cls, name): """Return a `SessionAPI` from a given configuration profile name. :see: `ProfileStore`. """ with profiles.ProfileStore.open() as config: return cls.fromProfile(config.load(name))
def find_field(browser, field, value): """Locate an input field of a given value This first looks for the value as the id of the element, then the name of the element, then a label for the element. """ return find_field_by_id(browser, field, value) + \ find_field_by_name(browser, field, value) + \ find_field_by_label(browser, field, value)
Locate an input field of a given value This first looks for the value as the id of the element, then the name of the element, then a label for the element.
Below is the the instruction that describes the task: ### Input: Locate an input field of a given value This first looks for the value as the id of the element, then the name of the element, then a label for the element. ### Response: def find_field(browser, field, value): """Locate an input field of a given value This first looks for the value as the id of the element, then the name of the element, then a label for the element. """ return find_field_by_id(browser, field, value) + \ find_field_by_name(browser, field, value) + \ find_field_by_label(browser, field, value)
def _FormatReturnOrExitToken(self, token_data): """Formats a return or exit token as a dictionary of values. Args: token_data (bsm_token_data_exit|bsm_token_data_return32| bsm_token_data_return64): AUT_EXIT, AUT_RETURN32 or AUT_RETURN64 token data. Returns: dict[str, str]: token values. """ error_string = bsmtoken.BSM_ERRORS.get(token_data.status, 'UNKNOWN') return { 'error': error_string, 'token_status': token_data.status, 'call_status': token_data.return_value}
Formats a return or exit token as a dictionary of values. Args: token_data (bsm_token_data_exit|bsm_token_data_return32| bsm_token_data_return64): AUT_EXIT, AUT_RETURN32 or AUT_RETURN64 token data. Returns: dict[str, str]: token values.
Below is the the instruction that describes the task: ### Input: Formats a return or exit token as a dictionary of values. Args: token_data (bsm_token_data_exit|bsm_token_data_return32| bsm_token_data_return64): AUT_EXIT, AUT_RETURN32 or AUT_RETURN64 token data. Returns: dict[str, str]: token values. ### Response: def _FormatReturnOrExitToken(self, token_data): """Formats a return or exit token as a dictionary of values. Args: token_data (bsm_token_data_exit|bsm_token_data_return32| bsm_token_data_return64): AUT_EXIT, AUT_RETURN32 or AUT_RETURN64 token data. Returns: dict[str, str]: token values. """ error_string = bsmtoken.BSM_ERRORS.get(token_data.status, 'UNKNOWN') return { 'error': error_string, 'token_status': token_data.status, 'call_status': token_data.return_value}
def open(self, mode='read'): """Open the file.""" if self.file: self.close() raise 'Close file before opening.' if mode == 'write': self.file = open(self.path, 'w') elif mode == 'overwrite': # Delete file if exist. self.file = open(self.path, 'w+') else: # Open for reading. self.file = open(self.path, 'r') self._csv = csv.DictWriter(self.file, fieldnames=self.fields, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL, extrasaction='ignore') if self.file.tell() == 0: self._csv.writeheader()
Open the file.
Below is the the instruction that describes the task: ### Input: Open the file. ### Response: def open(self, mode='read'): """Open the file.""" if self.file: self.close() raise 'Close file before opening.' if mode == 'write': self.file = open(self.path, 'w') elif mode == 'overwrite': # Delete file if exist. self.file = open(self.path, 'w+') else: # Open for reading. self.file = open(self.path, 'r') self._csv = csv.DictWriter(self.file, fieldnames=self.fields, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL, extrasaction='ignore') if self.file.tell() == 0: self._csv.writeheader()
def zhuyin_to_pinyin(s, accented=True): """Convert all Zhuyin syllables in *s* to Pinyin. If *accented* is ``True``, diacritics are added to the Pinyin syllables. If it's ``False``, numbers are used to indicate tone. """ if accented: function = _zhuyin_syllable_to_accented else: function = _zhuyin_syllable_to_numbered return _convert(s, zhon.zhuyin.syllable, function)
Convert all Zhuyin syllables in *s* to Pinyin. If *accented* is ``True``, diacritics are added to the Pinyin syllables. If it's ``False``, numbers are used to indicate tone.
Below is the the instruction that describes the task: ### Input: Convert all Zhuyin syllables in *s* to Pinyin. If *accented* is ``True``, diacritics are added to the Pinyin syllables. If it's ``False``, numbers are used to indicate tone. ### Response: def zhuyin_to_pinyin(s, accented=True): """Convert all Zhuyin syllables in *s* to Pinyin. If *accented* is ``True``, diacritics are added to the Pinyin syllables. If it's ``False``, numbers are used to indicate tone. """ if accented: function = _zhuyin_syllable_to_accented else: function = _zhuyin_syllable_to_numbered return _convert(s, zhon.zhuyin.syllable, function)
def _TerminateProcess(self, process): """Terminate a process. Args: process (MultiProcessBaseProcess): process to terminate. """ pid = process.pid logger.warning('Terminating process: (PID: {0:d}).'.format(pid)) process.terminate() # Wait for the process to exit. process.join(timeout=self._PROCESS_JOIN_TIMEOUT) if process.is_alive(): logger.warning('Killing process: (PID: {0:d}).'.format(pid)) self._KillProcess(pid)
Terminate a process. Args: process (MultiProcessBaseProcess): process to terminate.
Below is the the instruction that describes the task: ### Input: Terminate a process. Args: process (MultiProcessBaseProcess): process to terminate. ### Response: def _TerminateProcess(self, process): """Terminate a process. Args: process (MultiProcessBaseProcess): process to terminate. """ pid = process.pid logger.warning('Terminating process: (PID: {0:d}).'.format(pid)) process.terminate() # Wait for the process to exit. process.join(timeout=self._PROCESS_JOIN_TIMEOUT) if process.is_alive(): logger.warning('Killing process: (PID: {0:d}).'.format(pid)) self._KillProcess(pid)
def IsEnabled(self, *args, **kwargs): "check if all menu items are enabled" for i in range(self.GetMenuItemCount()): it = self.FindItemByPosition(i) if not it.IsEnabled(): return False return True
check if all menu items are enabled
Below is the the instruction that describes the task: ### Input: check if all menu items are enabled ### Response: def IsEnabled(self, *args, **kwargs): "check if all menu items are enabled" for i in range(self.GetMenuItemCount()): it = self.FindItemByPosition(i) if not it.IsEnabled(): return False return True
def significant_control(self, num, entity_id, entity_type='individual', **kwargs): """Get details of a specific entity with significant control. Args: num (str, int): Company number to search on. entity_id (str, int): Entity id to request details for entity_type (str, int): What type of entity to search for. Defaults to 'individual'. Other possible opetions are 'corporate' (for corporate entitys), 'legal' (for legal persons), 'statements' (for a person with significant control statement) and 'secure' (for a super secure person). kwargs (dict): additional keywords passed into requests.session.get *params* keyword. """ # Dict mapping entity_type strings to url strings entities = {'individual': 'individual', 'corporate': 'corporate-entity', 'legal': 'legal-person', 'statements': 'persons-with-significant-control-statements', 'secure': 'super-secure'} # Make sure correct entity_type supplied try: entity = entities[entity_type] except KeyError as e: msg = ("Wrong entity_type supplied. Please choose from " + "individual, corporate, legal, statements or secure") raise Exception(msg) from e # Construct the request and return the result baseuri = (self._BASE_URI + 'company/{}/persons-with-significant-control/'.format(num) + '{}/{}'.format(entity, entity_id)) res = self.session.get(baseuri, params=kwargs) self.handle_http_error(res) return res
Get details of a specific entity with significant control. Args: num (str, int): Company number to search on. entity_id (str, int): Entity id to request details for entity_type (str, int): What type of entity to search for. Defaults to 'individual'. Other possible opetions are 'corporate' (for corporate entitys), 'legal' (for legal persons), 'statements' (for a person with significant control statement) and 'secure' (for a super secure person). kwargs (dict): additional keywords passed into requests.session.get *params* keyword.
Below is the the instruction that describes the task: ### Input: Get details of a specific entity with significant control. Args: num (str, int): Company number to search on. entity_id (str, int): Entity id to request details for entity_type (str, int): What type of entity to search for. Defaults to 'individual'. Other possible opetions are 'corporate' (for corporate entitys), 'legal' (for legal persons), 'statements' (for a person with significant control statement) and 'secure' (for a super secure person). kwargs (dict): additional keywords passed into requests.session.get *params* keyword. ### Response: def significant_control(self, num, entity_id, entity_type='individual', **kwargs): """Get details of a specific entity with significant control. Args: num (str, int): Company number to search on. entity_id (str, int): Entity id to request details for entity_type (str, int): What type of entity to search for. Defaults to 'individual'. Other possible opetions are 'corporate' (for corporate entitys), 'legal' (for legal persons), 'statements' (for a person with significant control statement) and 'secure' (for a super secure person). kwargs (dict): additional keywords passed into requests.session.get *params* keyword. """ # Dict mapping entity_type strings to url strings entities = {'individual': 'individual', 'corporate': 'corporate-entity', 'legal': 'legal-person', 'statements': 'persons-with-significant-control-statements', 'secure': 'super-secure'} # Make sure correct entity_type supplied try: entity = entities[entity_type] except KeyError as e: msg = ("Wrong entity_type supplied. Please choose from " + "individual, corporate, legal, statements or secure") raise Exception(msg) from e # Construct the request and return the result baseuri = (self._BASE_URI + 'company/{}/persons-with-significant-control/'.format(num) + '{}/{}'.format(entity, entity_id)) res = self.session.get(baseuri, params=kwargs) self.handle_http_error(res) return res
def find_reactions_with_identical_genes(model): """ Return reactions that have identical genes. Identify duplicate reactions globally by checking if any two reactions have the same genes. This can be useful to curate merged models or to clean-up bulk model modifications, but also to identify promiscuous enzymes. The heuristic compares reactions in a pairwise manner and reports on reaction pairs whose genes are identical. Reactions with missing genes are skipped. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- dict A mapping from sets of genes to all the reactions containing those genes. int The total number of unique reactions that appear duplicates based on their gene-protein-reaction associations. """ duplicates = dict() for rxn_a, rxn_b in combinations(model.reactions, 2): if not (rxn_a.genes and rxn_b.genes): continue if rxn_a.genes == rxn_b.genes: # This works because the `genes` are frozen sets. identifiers = rxn_a.genes duplicates.setdefault(identifiers, set()).update( [rxn_a.id, rxn_b.id]) # Transform the object for JSON compatibility num_duplicated = set() duplicated = {} for key in duplicates: # Object keys must be strings in JSON. new_key = ",".join(sorted(g.id for g in key)) duplicated[new_key] = rxns = list(duplicates[key]) num_duplicated.update(rxns) return duplicated, len(num_duplicated)
Return reactions that have identical genes. Identify duplicate reactions globally by checking if any two reactions have the same genes. This can be useful to curate merged models or to clean-up bulk model modifications, but also to identify promiscuous enzymes. The heuristic compares reactions in a pairwise manner and reports on reaction pairs whose genes are identical. Reactions with missing genes are skipped. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- dict A mapping from sets of genes to all the reactions containing those genes. int The total number of unique reactions that appear duplicates based on their gene-protein-reaction associations.
Below is the the instruction that describes the task: ### Input: Return reactions that have identical genes. Identify duplicate reactions globally by checking if any two reactions have the same genes. This can be useful to curate merged models or to clean-up bulk model modifications, but also to identify promiscuous enzymes. The heuristic compares reactions in a pairwise manner and reports on reaction pairs whose genes are identical. Reactions with missing genes are skipped. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- dict A mapping from sets of genes to all the reactions containing those genes. int The total number of unique reactions that appear duplicates based on their gene-protein-reaction associations. ### Response: def find_reactions_with_identical_genes(model): """ Return reactions that have identical genes. Identify duplicate reactions globally by checking if any two reactions have the same genes. This can be useful to curate merged models or to clean-up bulk model modifications, but also to identify promiscuous enzymes. The heuristic compares reactions in a pairwise manner and reports on reaction pairs whose genes are identical. Reactions with missing genes are skipped. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- dict A mapping from sets of genes to all the reactions containing those genes. int The total number of unique reactions that appear duplicates based on their gene-protein-reaction associations. """ duplicates = dict() for rxn_a, rxn_b in combinations(model.reactions, 2): if not (rxn_a.genes and rxn_b.genes): continue if rxn_a.genes == rxn_b.genes: # This works because the `genes` are frozen sets. identifiers = rxn_a.genes duplicates.setdefault(identifiers, set()).update( [rxn_a.id, rxn_b.id]) # Transform the object for JSON compatibility num_duplicated = set() duplicated = {} for key in duplicates: # Object keys must be strings in JSON. new_key = ",".join(sorted(g.id for g in key)) duplicated[new_key] = rxns = list(duplicates[key]) num_duplicated.update(rxns) return duplicated, len(num_duplicated)
def _relativePath(self, fullPath): """ Return fullPath relative to Store directory. Return fullPath if fullPath is not inside directory. Return None if fullPath is outside our scope. """ if fullPath is None: return None assert fullPath.startswith("/"), fullPath path = os.path.relpath(fullPath, self.userPath) if not path.startswith("../"): return path elif self.ignoreExtraVolumes: return None else: return fullPath
Return fullPath relative to Store directory. Return fullPath if fullPath is not inside directory. Return None if fullPath is outside our scope.
Below is the the instruction that describes the task: ### Input: Return fullPath relative to Store directory. Return fullPath if fullPath is not inside directory. Return None if fullPath is outside our scope. ### Response: def _relativePath(self, fullPath): """ Return fullPath relative to Store directory. Return fullPath if fullPath is not inside directory. Return None if fullPath is outside our scope. """ if fullPath is None: return None assert fullPath.startswith("/"), fullPath path = os.path.relpath(fullPath, self.userPath) if not path.startswith("../"): return path elif self.ignoreExtraVolumes: return None else: return fullPath
def _parse_image_name(self, image, retry=True): '''starting with an image string in either of the following formats: job_id|collection job_id|collection|job_name Parse the job_name, job_id, and collection uri from it. If the user provides the first option, we use the job_name set by the client (default is build). Parameters ========== image: the string to parse, with values separated by | retry: the client can call itself recursively once, providing the default job_name if the user doesn't. ''' try: job_id, collection, job_name = image.split(',') except: # Retry and add job_name if retry: return self._parse_image_name("%s,%s" %(image, self.job), retry=False) # Or fail bot.exit('''Malformed image string! Please provide: job_id,collection (or) job_id,collection,job_name''') return job_id, collection, job_name
starting with an image string in either of the following formats: job_id|collection job_id|collection|job_name Parse the job_name, job_id, and collection uri from it. If the user provides the first option, we use the job_name set by the client (default is build). Parameters ========== image: the string to parse, with values separated by | retry: the client can call itself recursively once, providing the default job_name if the user doesn't.
Below is the the instruction that describes the task: ### Input: starting with an image string in either of the following formats: job_id|collection job_id|collection|job_name Parse the job_name, job_id, and collection uri from it. If the user provides the first option, we use the job_name set by the client (default is build). Parameters ========== image: the string to parse, with values separated by | retry: the client can call itself recursively once, providing the default job_name if the user doesn't. ### Response: def _parse_image_name(self, image, retry=True): '''starting with an image string in either of the following formats: job_id|collection job_id|collection|job_name Parse the job_name, job_id, and collection uri from it. If the user provides the first option, we use the job_name set by the client (default is build). Parameters ========== image: the string to parse, with values separated by | retry: the client can call itself recursively once, providing the default job_name if the user doesn't. ''' try: job_id, collection, job_name = image.split(',') except: # Retry and add job_name if retry: return self._parse_image_name("%s,%s" %(image, self.job), retry=False) # Or fail bot.exit('''Malformed image string! Please provide: job_id,collection (or) job_id,collection,job_name''') return job_id, collection, job_name
def parse(self, content): """Parse raw response content for a list of remote artifact cache URLs. :API: public """ if self.format == 'json_map': try: return assert_list(json.loads(content.decode(self.encoding))[self.index]) except (KeyError, UnicodeDecodeError, ValueError) as e: raise self.ResponseParserError("Error while parsing response content: {0}".format(str(e))) # Should never get here. raise ValueError('Unknown content format: "{}"'.format(self.format))
Parse raw response content for a list of remote artifact cache URLs. :API: public
Below is the the instruction that describes the task: ### Input: Parse raw response content for a list of remote artifact cache URLs. :API: public ### Response: def parse(self, content): """Parse raw response content for a list of remote artifact cache URLs. :API: public """ if self.format == 'json_map': try: return assert_list(json.loads(content.decode(self.encoding))[self.index]) except (KeyError, UnicodeDecodeError, ValueError) as e: raise self.ResponseParserError("Error while parsing response content: {0}".format(str(e))) # Should never get here. raise ValueError('Unknown content format: "{}"'.format(self.format))
def sign(self, storepass=None, keypass=None, keystore=None, apk=None, alias=None, name='app'): """ Signs (jarsign and zipalign) a target apk file based on keystore information, uses default debug keystore file by default. :param storepass(str): keystore file storepass :param keypass(str): keystore file keypass :param keystore(str): keystore file path :param apk(str): apk file path to be signed :param alias(str): keystore file alias :param name(str): signed apk name to be used by zipalign """ target = self.get_target() build_tool = android_helper.get_highest_build_tool(target.split('-')[1]) if keystore is None: (keystore, storepass, keypass, alias) = android_helper.get_default_keystore() dist = '%s/%s.apk' % ('/'.join(apk.split('/')[:-1]), name) android_helper.jarsign(storepass, keypass, keystore, apk, alias, path=self.path) android_helper.zipalign(apk, dist, build_tool=build_tool, path=self.path)
Signs (jarsign and zipalign) a target apk file based on keystore information, uses default debug keystore file by default. :param storepass(str): keystore file storepass :param keypass(str): keystore file keypass :param keystore(str): keystore file path :param apk(str): apk file path to be signed :param alias(str): keystore file alias :param name(str): signed apk name to be used by zipalign
Below is the the instruction that describes the task: ### Input: Signs (jarsign and zipalign) a target apk file based on keystore information, uses default debug keystore file by default. :param storepass(str): keystore file storepass :param keypass(str): keystore file keypass :param keystore(str): keystore file path :param apk(str): apk file path to be signed :param alias(str): keystore file alias :param name(str): signed apk name to be used by zipalign ### Response: def sign(self, storepass=None, keypass=None, keystore=None, apk=None, alias=None, name='app'): """ Signs (jarsign and zipalign) a target apk file based on keystore information, uses default debug keystore file by default. :param storepass(str): keystore file storepass :param keypass(str): keystore file keypass :param keystore(str): keystore file path :param apk(str): apk file path to be signed :param alias(str): keystore file alias :param name(str): signed apk name to be used by zipalign """ target = self.get_target() build_tool = android_helper.get_highest_build_tool(target.split('-')[1]) if keystore is None: (keystore, storepass, keypass, alias) = android_helper.get_default_keystore() dist = '%s/%s.apk' % ('/'.join(apk.split('/')[:-1]), name) android_helper.jarsign(storepass, keypass, keystore, apk, alias, path=self.path) android_helper.zipalign(apk, dist, build_tool=build_tool, path=self.path)
def get_torrent(self, torrent_id): """Gets the `.torrent` data for the given `torrent_id`. :param torrent_id: the ID of the torrent to download :raises TorrentNotFoundError: if the torrent does not exist :returns: :class:`Torrent` of the associated torrent """ params = { 'page': 'download', 'tid': torrent_id, } r = requests.get(self.base_url, params=params) if r.headers.get('content-type') != 'application/x-bittorrent': raise TorrentNotFoundError(TORRENT_NOT_FOUND_TEXT) torrent_data = r.content return Torrent(torrent_id, torrent_data)
Gets the `.torrent` data for the given `torrent_id`. :param torrent_id: the ID of the torrent to download :raises TorrentNotFoundError: if the torrent does not exist :returns: :class:`Torrent` of the associated torrent
Below is the the instruction that describes the task: ### Input: Gets the `.torrent` data for the given `torrent_id`. :param torrent_id: the ID of the torrent to download :raises TorrentNotFoundError: if the torrent does not exist :returns: :class:`Torrent` of the associated torrent ### Response: def get_torrent(self, torrent_id): """Gets the `.torrent` data for the given `torrent_id`. :param torrent_id: the ID of the torrent to download :raises TorrentNotFoundError: if the torrent does not exist :returns: :class:`Torrent` of the associated torrent """ params = { 'page': 'download', 'tid': torrent_id, } r = requests.get(self.base_url, params=params) if r.headers.get('content-type') != 'application/x-bittorrent': raise TorrentNotFoundError(TORRENT_NOT_FOUND_TEXT) torrent_data = r.content return Torrent(torrent_id, torrent_data)
def add_post(self, *args, **kwargs): """ Shortcut for add_route with method POST """ return self.add_route(hdrs.METH_POST, *args, **kwargs)
Shortcut for add_route with method POST
Below is the the instruction that describes the task: ### Input: Shortcut for add_route with method POST ### Response: def add_post(self, *args, **kwargs): """ Shortcut for add_route with method POST """ return self.add_route(hdrs.METH_POST, *args, **kwargs)
def get_environ(self, parent_environ=None): """Get the environ dict resulting from interpreting this context. @param parent_environ Environment to interpret the context within, defaults to os.environ if None. @returns The environment dict generated by this context, when interpreted in a python rex interpreter. """ interp = Python(target_environ={}, passive=True) executor = self._create_executor(interp, parent_environ) self._execute(executor) return executor.get_output()
Get the environ dict resulting from interpreting this context. @param parent_environ Environment to interpret the context within, defaults to os.environ if None. @returns The environment dict generated by this context, when interpreted in a python rex interpreter.
Below is the the instruction that describes the task: ### Input: Get the environ dict resulting from interpreting this context. @param parent_environ Environment to interpret the context within, defaults to os.environ if None. @returns The environment dict generated by this context, when interpreted in a python rex interpreter. ### Response: def get_environ(self, parent_environ=None): """Get the environ dict resulting from interpreting this context. @param parent_environ Environment to interpret the context within, defaults to os.environ if None. @returns The environment dict generated by this context, when interpreted in a python rex interpreter. """ interp = Python(target_environ={}, passive=True) executor = self._create_executor(interp, parent_environ) self._execute(executor) return executor.get_output()
def get_outliers(campaign, pipeline='everest2', sigma=5): ''' Computes the number of outliers for a given `campaign` and a given `pipeline`. Stores the results in a file under "/missions/k2/tables/". :param int sigma: The sigma level at which to clip outliers. Default 5 ''' # Imports from .utils import GetK2Campaign client = k2plr.API() # Check pipeline assert pipeline.lower() in Pipelines, 'Invalid pipeline: `%s`.' % pipeline # Create file if it doesn't exist file = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.out' % (int(campaign), pipeline)) if not os.path.exists(file): open(file, 'a').close() # Get all EPIC stars stars = GetK2Campaign(campaign, epics_only=True) nstars = len(stars) # Remove ones we've done with warnings.catch_warnings(): warnings.simplefilter("ignore") done = np.loadtxt(file, dtype=float) if len(done): done = [int(s) for s in done[:, 0]] stars = list(set(stars) - set(done)) n = len(done) + 1 # Open the output file with open(file, 'a', 1) as outfile: # Loop over all to get the CDPP for EPIC in stars: # Progress sys.stdout.write('\rRunning target %d/%d...' % (n, nstars)) sys.stdout.flush() n += 1 # Get the number of outliers try: time, flux = get(EPIC, pipeline=pipeline, campaign=campaign) # Get the raw K2 data tpf = os.path.join(KPLR_ROOT, "data", "k2", "target_pixel_files", "%09d" % EPIC, "ktwo%09d-c%02d_lpd-targ.fits.gz" % (EPIC, campaign)) if not os.path.exists(tpf): client.k2_star(EPIC).get_target_pixel_files(fetch=True) with pyfits.open(tpf) as f: k2_qual = np.array(f[1].data.field('QUALITY'), dtype=int) k2_time = np.array( f[1].data.field('TIME'), dtype='float64') mask = [] for b in [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17]: mask += list(np.where(k2_qual & 2 ** (b - 1))[0]) mask = np.array(sorted(list(set(mask)))) # Fill in missing cadences, if any tol = 0.005 if not ((len(time) == len(k2_time)) and (np.abs(time[0] - k2_time[0]) < tol) and (np.abs(time[-1] - k2_time[-1]) < tol)): ftmp = np.zeros_like(k2_time) * np.nan j = 0 for i, t in enumerate(k2_time): if np.abs(time[j] - t) < tol: ftmp[i] = flux[j] j += 1 if j == len(time) - 1: break flux = ftmp # Remove flagged cadences flux = np.delete(flux, mask) # Remove nans nanmask = np.where(np.isnan(flux))[0] flux = np.delete(flux, nanmask) # Iterative sigma clipping inds = np.array([], dtype=int) m = 1 while len(inds) < m: m = len(inds) f = SavGol(np.delete(flux, inds)) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) inds = np.append(inds, np.where( (f > med + sigma * MAD) | (f < med - sigma * MAD))[0]) nout = len(inds) ntot = len(flux) except (urllib.error.HTTPError, urllib.error.URLError, TypeError, ValueError, IndexError): print("{:>09d} {:>5d} {:>5d}".format( EPIC, -1, -1), file=outfile) continue # Log to file print("{:>09d} {:>5d} {:>5d}".format( EPIC, nout, ntot), file=outfile)
Computes the number of outliers for a given `campaign` and a given `pipeline`. Stores the results in a file under "/missions/k2/tables/". :param int sigma: The sigma level at which to clip outliers. Default 5
Below is the the instruction that describes the task: ### Input: Computes the number of outliers for a given `campaign` and a given `pipeline`. Stores the results in a file under "/missions/k2/tables/". :param int sigma: The sigma level at which to clip outliers. Default 5 ### Response: def get_outliers(campaign, pipeline='everest2', sigma=5): ''' Computes the number of outliers for a given `campaign` and a given `pipeline`. Stores the results in a file under "/missions/k2/tables/". :param int sigma: The sigma level at which to clip outliers. Default 5 ''' # Imports from .utils import GetK2Campaign client = k2plr.API() # Check pipeline assert pipeline.lower() in Pipelines, 'Invalid pipeline: `%s`.' % pipeline # Create file if it doesn't exist file = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.out' % (int(campaign), pipeline)) if not os.path.exists(file): open(file, 'a').close() # Get all EPIC stars stars = GetK2Campaign(campaign, epics_only=True) nstars = len(stars) # Remove ones we've done with warnings.catch_warnings(): warnings.simplefilter("ignore") done = np.loadtxt(file, dtype=float) if len(done): done = [int(s) for s in done[:, 0]] stars = list(set(stars) - set(done)) n = len(done) + 1 # Open the output file with open(file, 'a', 1) as outfile: # Loop over all to get the CDPP for EPIC in stars: # Progress sys.stdout.write('\rRunning target %d/%d...' % (n, nstars)) sys.stdout.flush() n += 1 # Get the number of outliers try: time, flux = get(EPIC, pipeline=pipeline, campaign=campaign) # Get the raw K2 data tpf = os.path.join(KPLR_ROOT, "data", "k2", "target_pixel_files", "%09d" % EPIC, "ktwo%09d-c%02d_lpd-targ.fits.gz" % (EPIC, campaign)) if not os.path.exists(tpf): client.k2_star(EPIC).get_target_pixel_files(fetch=True) with pyfits.open(tpf) as f: k2_qual = np.array(f[1].data.field('QUALITY'), dtype=int) k2_time = np.array( f[1].data.field('TIME'), dtype='float64') mask = [] for b in [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17]: mask += list(np.where(k2_qual & 2 ** (b - 1))[0]) mask = np.array(sorted(list(set(mask)))) # Fill in missing cadences, if any tol = 0.005 if not ((len(time) == len(k2_time)) and (np.abs(time[0] - k2_time[0]) < tol) and (np.abs(time[-1] - k2_time[-1]) < tol)): ftmp = np.zeros_like(k2_time) * np.nan j = 0 for i, t in enumerate(k2_time): if np.abs(time[j] - t) < tol: ftmp[i] = flux[j] j += 1 if j == len(time) - 1: break flux = ftmp # Remove flagged cadences flux = np.delete(flux, mask) # Remove nans nanmask = np.where(np.isnan(flux))[0] flux = np.delete(flux, nanmask) # Iterative sigma clipping inds = np.array([], dtype=int) m = 1 while len(inds) < m: m = len(inds) f = SavGol(np.delete(flux, inds)) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) inds = np.append(inds, np.where( (f > med + sigma * MAD) | (f < med - sigma * MAD))[0]) nout = len(inds) ntot = len(flux) except (urllib.error.HTTPError, urllib.error.URLError, TypeError, ValueError, IndexError): print("{:>09d} {:>5d} {:>5d}".format( EPIC, -1, -1), file=outfile) continue # Log to file print("{:>09d} {:>5d} {:>5d}".format( EPIC, nout, ntot), file=outfile)
def set_user_agent(self, user_agent): """Replaces the current user agent in the requests session headers.""" # set a default user_agent if not specified if user_agent is None: requests_ua = requests.utils.default_user_agent() user_agent = '%s (%s/%s)' % (requests_ua, __title__, __version__) # the requests module uses a case-insensitive dict for session headers self.session.headers['User-agent'] = user_agent
Replaces the current user agent in the requests session headers.
Below is the the instruction that describes the task: ### Input: Replaces the current user agent in the requests session headers. ### Response: def set_user_agent(self, user_agent): """Replaces the current user agent in the requests session headers.""" # set a default user_agent if not specified if user_agent is None: requests_ua = requests.utils.default_user_agent() user_agent = '%s (%s/%s)' % (requests_ua, __title__, __version__) # the requests module uses a case-insensitive dict for session headers self.session.headers['User-agent'] = user_agent
def _move_consonant(self, letters: list, positions: List[int]) -> List[str]: """ Given a list of consonant positions, move the consonants according to certain consonant syllable behavioral rules for gathering and grouping. :param letters: :param positions: :return: """ for pos in positions: previous_letter = letters[pos - 1] consonant = letters[pos] next_letter = letters[pos + 1] if self._contains_vowels(next_letter) and self._starts_with_vowel(next_letter): return string_utils.move_consonant_right(letters, [pos]) if self._contains_vowels(previous_letter) and self._ends_with_vowel( previous_letter) and len(previous_letter) == 1: return string_utils.move_consonant_left(letters, [pos]) if previous_letter + consonant in self.constants.ASPIRATES: return string_utils.move_consonant_left(letters, [pos]) if consonant + next_letter in self.constants.ASPIRATES: return string_utils.move_consonant_right(letters, [pos]) if next_letter[0] == consonant: return string_utils.move_consonant_left(letters, [pos]) if consonant in self.constants.MUTES and next_letter[0] in self.constants.LIQUIDS: return string_utils.move_consonant_right(letters, [pos]) if consonant in ['k', 'K'] and next_letter[0] in ['w', 'W']: return string_utils.move_consonant_right(letters, [pos]) if self._contains_consonants(next_letter[0]) and self._starts_with_vowel( previous_letter[-1]): return string_utils.move_consonant_left(letters, [pos]) # fall through case if self._contains_consonants(next_letter[0]): return string_utils.move_consonant_right(letters, [pos]) return letters
Given a list of consonant positions, move the consonants according to certain consonant syllable behavioral rules for gathering and grouping. :param letters: :param positions: :return:
Below is the the instruction that describes the task: ### Input: Given a list of consonant positions, move the consonants according to certain consonant syllable behavioral rules for gathering and grouping. :param letters: :param positions: :return: ### Response: def _move_consonant(self, letters: list, positions: List[int]) -> List[str]: """ Given a list of consonant positions, move the consonants according to certain consonant syllable behavioral rules for gathering and grouping. :param letters: :param positions: :return: """ for pos in positions: previous_letter = letters[pos - 1] consonant = letters[pos] next_letter = letters[pos + 1] if self._contains_vowels(next_letter) and self._starts_with_vowel(next_letter): return string_utils.move_consonant_right(letters, [pos]) if self._contains_vowels(previous_letter) and self._ends_with_vowel( previous_letter) and len(previous_letter) == 1: return string_utils.move_consonant_left(letters, [pos]) if previous_letter + consonant in self.constants.ASPIRATES: return string_utils.move_consonant_left(letters, [pos]) if consonant + next_letter in self.constants.ASPIRATES: return string_utils.move_consonant_right(letters, [pos]) if next_letter[0] == consonant: return string_utils.move_consonant_left(letters, [pos]) if consonant in self.constants.MUTES and next_letter[0] in self.constants.LIQUIDS: return string_utils.move_consonant_right(letters, [pos]) if consonant in ['k', 'K'] and next_letter[0] in ['w', 'W']: return string_utils.move_consonant_right(letters, [pos]) if self._contains_consonants(next_letter[0]) and self._starts_with_vowel( previous_letter[-1]): return string_utils.move_consonant_left(letters, [pos]) # fall through case if self._contains_consonants(next_letter[0]): return string_utils.move_consonant_right(letters, [pos]) return letters
def print_head(self, parent_plate_value, plate_values, interval, n=10, print_func=logging.info): """ Print the first n values from the streams in the given time interval. The parent plate value is the value of the parent plate, and then the plate values are the values for the plate that are to be printed. e.g. print_head(None, ("house", "1")) :param parent_plate_value: The (fixed) parent plate value :param plate_values: The plate values over which to loop :param interval: The time interval :param n: The maximum number of elements to print :param print_func: The function used for printing (e.g. logging.info() or print()) :return: None """ if isinstance(plate_values, Plate): self.print_head(parent_plate_value, plate_values.values, interval, n, print_func) return if len(plate_values) == 1 and len(plate_values[0]) == 2 and isinstance(plate_values[0][0], str): self.print_head(parent_plate_value, (plate_values,), interval, n, print_func) return found = False for plate_value in plate_values: combined_plate_value = Plate.combine_values(parent_plate_value, plate_value) if combined_plate_value not in self._streams: # This can happen if we have created a compound plate and only certain plate values are valid continue found = True print_func("Plate value: {}".format(combined_plate_value)) data = False for k, v in self._streams[combined_plate_value].window(interval).head(n): data = True print_func("{}, {}".format(k, v)) if not data: print_func("No data") print_func("") if not found: print_func("No streams found for the given plate values")
Print the first n values from the streams in the given time interval. The parent plate value is the value of the parent plate, and then the plate values are the values for the plate that are to be printed. e.g. print_head(None, ("house", "1")) :param parent_plate_value: The (fixed) parent plate value :param plate_values: The plate values over which to loop :param interval: The time interval :param n: The maximum number of elements to print :param print_func: The function used for printing (e.g. logging.info() or print()) :return: None
Below is the the instruction that describes the task: ### Input: Print the first n values from the streams in the given time interval. The parent plate value is the value of the parent plate, and then the plate values are the values for the plate that are to be printed. e.g. print_head(None, ("house", "1")) :param parent_plate_value: The (fixed) parent plate value :param plate_values: The plate values over which to loop :param interval: The time interval :param n: The maximum number of elements to print :param print_func: The function used for printing (e.g. logging.info() or print()) :return: None ### Response: def print_head(self, parent_plate_value, plate_values, interval, n=10, print_func=logging.info): """ Print the first n values from the streams in the given time interval. The parent plate value is the value of the parent plate, and then the plate values are the values for the plate that are to be printed. e.g. print_head(None, ("house", "1")) :param parent_plate_value: The (fixed) parent plate value :param plate_values: The plate values over which to loop :param interval: The time interval :param n: The maximum number of elements to print :param print_func: The function used for printing (e.g. logging.info() or print()) :return: None """ if isinstance(plate_values, Plate): self.print_head(parent_plate_value, plate_values.values, interval, n, print_func) return if len(plate_values) == 1 and len(plate_values[0]) == 2 and isinstance(plate_values[0][0], str): self.print_head(parent_plate_value, (plate_values,), interval, n, print_func) return found = False for plate_value in plate_values: combined_plate_value = Plate.combine_values(parent_plate_value, plate_value) if combined_plate_value not in self._streams: # This can happen if we have created a compound plate and only certain plate values are valid continue found = True print_func("Plate value: {}".format(combined_plate_value)) data = False for k, v in self._streams[combined_plate_value].window(interval).head(n): data = True print_func("{}, {}".format(k, v)) if not data: print_func("No data") print_func("") if not found: print_func("No streams found for the given plate values")
def run_preprocessor(self, files_to_download): """ Run file_download_pre_processor for each file we are about to download. :param files_to_download: [ProjectFile]: files that will be downloaded """ for project_file in files_to_download: self.file_download_pre_processor.run(self.remote_store.data_service, project_file)
Run file_download_pre_processor for each file we are about to download. :param files_to_download: [ProjectFile]: files that will be downloaded
Below is the the instruction that describes the task: ### Input: Run file_download_pre_processor for each file we are about to download. :param files_to_download: [ProjectFile]: files that will be downloaded ### Response: def run_preprocessor(self, files_to_download): """ Run file_download_pre_processor for each file we are about to download. :param files_to_download: [ProjectFile]: files that will be downloaded """ for project_file in files_to_download: self.file_download_pre_processor.run(self.remote_store.data_service, project_file)
def insert_header(self, hkey, value, index=None): """ This will insert/overwrite a value to the header and hkeys. Parameters ---------- hkey Header key. Will be appended to self.hkeys if non existent, or inserted at the specified index. If hkey is an integer, uses self.hkeys[hkey]. value Value of the header. index=None If specified (integer), hkey will be inserted at this location in self.hkeys. """ #if hkey is '': return # if it's an integer, use the hkey from the list if type(hkey) in [int, int]: hkey = self.hkeys[hkey] # set the data self.headers[str(hkey)] = value if not hkey in self.hkeys: if index is None: self.hkeys.append(str(hkey)) else: self.hkeys.insert(index, str(hkey)) return self
This will insert/overwrite a value to the header and hkeys. Parameters ---------- hkey Header key. Will be appended to self.hkeys if non existent, or inserted at the specified index. If hkey is an integer, uses self.hkeys[hkey]. value Value of the header. index=None If specified (integer), hkey will be inserted at this location in self.hkeys.
Below is the the instruction that describes the task: ### Input: This will insert/overwrite a value to the header and hkeys. Parameters ---------- hkey Header key. Will be appended to self.hkeys if non existent, or inserted at the specified index. If hkey is an integer, uses self.hkeys[hkey]. value Value of the header. index=None If specified (integer), hkey will be inserted at this location in self.hkeys. ### Response: def insert_header(self, hkey, value, index=None): """ This will insert/overwrite a value to the header and hkeys. Parameters ---------- hkey Header key. Will be appended to self.hkeys if non existent, or inserted at the specified index. If hkey is an integer, uses self.hkeys[hkey]. value Value of the header. index=None If specified (integer), hkey will be inserted at this location in self.hkeys. """ #if hkey is '': return # if it's an integer, use the hkey from the list if type(hkey) in [int, int]: hkey = self.hkeys[hkey] # set the data self.headers[str(hkey)] = value if not hkey in self.hkeys: if index is None: self.hkeys.append(str(hkey)) else: self.hkeys.insert(index, str(hkey)) return self
def create_user(self, email, first_name, last_name, password, role="user", metadata={}): """ Create a new user :type email: str :param email: User's email :type first_name: str :param first_name: User's first name :type last_name: str :param last_name: User's last name :type password: str :param password: User's password :type role: str :param role: User's default role, one of "admin" or "user" :type metadata: dict :param metadata: User metadata :rtype: dict :return: an empty dictionary """ data = { 'firstName': first_name, 'lastName': last_name, 'email': email, 'metadata': metadata, 'role': role.upper() if role else role, 'newPassword': password, } response = self.post('createUser', data) return self._handle_empty(email, response)
Create a new user :type email: str :param email: User's email :type first_name: str :param first_name: User's first name :type last_name: str :param last_name: User's last name :type password: str :param password: User's password :type role: str :param role: User's default role, one of "admin" or "user" :type metadata: dict :param metadata: User metadata :rtype: dict :return: an empty dictionary
Below is the the instruction that describes the task: ### Input: Create a new user :type email: str :param email: User's email :type first_name: str :param first_name: User's first name :type last_name: str :param last_name: User's last name :type password: str :param password: User's password :type role: str :param role: User's default role, one of "admin" or "user" :type metadata: dict :param metadata: User metadata :rtype: dict :return: an empty dictionary ### Response: def create_user(self, email, first_name, last_name, password, role="user", metadata={}): """ Create a new user :type email: str :param email: User's email :type first_name: str :param first_name: User's first name :type last_name: str :param last_name: User's last name :type password: str :param password: User's password :type role: str :param role: User's default role, one of "admin" or "user" :type metadata: dict :param metadata: User metadata :rtype: dict :return: an empty dictionary """ data = { 'firstName': first_name, 'lastName': last_name, 'email': email, 'metadata': metadata, 'role': role.upper() if role else role, 'newPassword': password, } response = self.post('createUser', data) return self._handle_empty(email, response)
def create_git_action_for_new_study(self, new_study_id=None): """Checks out master branch as a side effect""" ga = self.create_git_action() if new_study_id is None: new_study_id = self._mint_new_study_id() self.register_doc_id(ga, new_study_id) return ga, new_study_id
Checks out master branch as a side effect
Below is the the instruction that describes the task: ### Input: Checks out master branch as a side effect ### Response: def create_git_action_for_new_study(self, new_study_id=None): """Checks out master branch as a side effect""" ga = self.create_git_action() if new_study_id is None: new_study_id = self._mint_new_study_id() self.register_doc_id(ga, new_study_id) return ga, new_study_id
def ReadSystemConfigurationArtifact( self, system_configuration, session_identifier=CURRENT_SESSION): """Reads the knowledge base values from a system configuration artifact. Note that this overwrites existing values in the knowledge base. Args: system_configuration (SystemConfigurationArtifact): system configuration artifact. session_identifier (Optional[str])): session identifier, where CURRENT_SESSION represents the active session. """ if system_configuration.code_page: try: self.SetCodepage(system_configuration.code_page) except ValueError: logger.warning( 'Unsupported codepage: {0:s}, defaulting to {1:s}'.format( system_configuration.code_page, self._codepage)) self._hostnames[session_identifier] = system_configuration.hostname self.SetValue('keyboard_layout', system_configuration.keyboard_layout) self.SetValue('operating_system', system_configuration.operating_system) self.SetValue( 'operating_system_product', system_configuration.operating_system_product) self.SetValue( 'operating_system_version', system_configuration.operating_system_version) if system_configuration.time_zone: try: self.SetTimeZone(system_configuration.time_zone) except ValueError: logger.warning( 'Unsupported time zone: {0:s}, defaulting to {1:s}'.format( system_configuration.time_zone, self.timezone.zone)) self._user_accounts[session_identifier] = { user_account.username: user_account for user_account in system_configuration.user_accounts}
Reads the knowledge base values from a system configuration artifact. Note that this overwrites existing values in the knowledge base. Args: system_configuration (SystemConfigurationArtifact): system configuration artifact. session_identifier (Optional[str])): session identifier, where CURRENT_SESSION represents the active session.
Below is the the instruction that describes the task: ### Input: Reads the knowledge base values from a system configuration artifact. Note that this overwrites existing values in the knowledge base. Args: system_configuration (SystemConfigurationArtifact): system configuration artifact. session_identifier (Optional[str])): session identifier, where CURRENT_SESSION represents the active session. ### Response: def ReadSystemConfigurationArtifact( self, system_configuration, session_identifier=CURRENT_SESSION): """Reads the knowledge base values from a system configuration artifact. Note that this overwrites existing values in the knowledge base. Args: system_configuration (SystemConfigurationArtifact): system configuration artifact. session_identifier (Optional[str])): session identifier, where CURRENT_SESSION represents the active session. """ if system_configuration.code_page: try: self.SetCodepage(system_configuration.code_page) except ValueError: logger.warning( 'Unsupported codepage: {0:s}, defaulting to {1:s}'.format( system_configuration.code_page, self._codepage)) self._hostnames[session_identifier] = system_configuration.hostname self.SetValue('keyboard_layout', system_configuration.keyboard_layout) self.SetValue('operating_system', system_configuration.operating_system) self.SetValue( 'operating_system_product', system_configuration.operating_system_product) self.SetValue( 'operating_system_version', system_configuration.operating_system_version) if system_configuration.time_zone: try: self.SetTimeZone(system_configuration.time_zone) except ValueError: logger.warning( 'Unsupported time zone: {0:s}, defaulting to {1:s}'.format( system_configuration.time_zone, self.timezone.zone)) self._user_accounts[session_identifier] = { user_account.username: user_account for user_account in system_configuration.user_accounts}
def info_user(self,userid): '''Get user info using a ID''' headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain",'Referer': 'http://'+self.domain+'/standings.phtml',"User-Agent": user_agent} req = self.session.get('http://'+self.domain+'/playerInfo.phtml?pid='+userid,headers=headers).content soup = BeautifulSoup(req) title = soup.title.string community = soup.find_all('table',border=0)[1].a.text info = [] info.append(title) info.append(community) for i in soup.find_all('table',border=0)[1].find_all('td')[1:]: info.append(i.text) for i in soup.find('table',cellpadding=2).find_all('tr')[1:]: cad = i.find_all('td') numero=cad[0].text nombre=cad[2].text.strip() team=cad[3].find('img')['alt'] precio=cad[4].text.replace(".","") puntos=cad[5].text posicion=cad[6].text info.append([numero,nombre,team,precio,puntos,posicion]) return info
Get user info using a ID
Below is the the instruction that describes the task: ### Input: Get user info using a ID ### Response: def info_user(self,userid): '''Get user info using a ID''' headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain",'Referer': 'http://'+self.domain+'/standings.phtml',"User-Agent": user_agent} req = self.session.get('http://'+self.domain+'/playerInfo.phtml?pid='+userid,headers=headers).content soup = BeautifulSoup(req) title = soup.title.string community = soup.find_all('table',border=0)[1].a.text info = [] info.append(title) info.append(community) for i in soup.find_all('table',border=0)[1].find_all('td')[1:]: info.append(i.text) for i in soup.find('table',cellpadding=2).find_all('tr')[1:]: cad = i.find_all('td') numero=cad[0].text nombre=cad[2].text.strip() team=cad[3].find('img')['alt'] precio=cad[4].text.replace(".","") puntos=cad[5].text posicion=cad[6].text info.append([numero,nombre,team,precio,puntos,posicion]) return info
def nacm_rule_list_rule_comment(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") nacm = ET.SubElement(config, "nacm", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-acm") rule_list = ET.SubElement(nacm, "rule-list") name_key = ET.SubElement(rule_list, "name") name_key.text = kwargs.pop('name') rule = ET.SubElement(rule_list, "rule") name_key = ET.SubElement(rule, "name") name_key.text = kwargs.pop('name') comment = ET.SubElement(rule, "comment") comment.text = kwargs.pop('comment') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
Below is the the instruction that describes the task: ### Input: Auto Generated Code ### Response: def nacm_rule_list_rule_comment(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") nacm = ET.SubElement(config, "nacm", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-acm") rule_list = ET.SubElement(nacm, "rule-list") name_key = ET.SubElement(rule_list, "name") name_key.text = kwargs.pop('name') rule = ET.SubElement(rule_list, "rule") name_key = ET.SubElement(rule, "name") name_key.text = kwargs.pop('name') comment = ET.SubElement(rule, "comment") comment.text = kwargs.pop('comment') callback = kwargs.pop('callback', self._callback) return callback(config)
def terminate(self): """Override of PantsService.terminate() that cleans up when the Pailgun server is terminated.""" # Tear down the Pailgun TCPServer. if self.pailgun: self.pailgun.server_close() super(PailgunService, self).terminate()
Override of PantsService.terminate() that cleans up when the Pailgun server is terminated.
Below is the the instruction that describes the task: ### Input: Override of PantsService.terminate() that cleans up when the Pailgun server is terminated. ### Response: def terminate(self): """Override of PantsService.terminate() that cleans up when the Pailgun server is terminated.""" # Tear down the Pailgun TCPServer. if self.pailgun: self.pailgun.server_close() super(PailgunService, self).terminate()
def _iter_code(code): """Yield '(op,arg)' pair for each operation in code object 'code'""" from array import array from dis import HAVE_ARGUMENT, EXTENDED_ARG bytes = array('b',code.co_code) eof = len(code.co_code) ptr = 0 extended_arg = 0 while ptr<eof: op = bytes[ptr] if op>=HAVE_ARGUMENT: arg = bytes[ptr+1] + bytes[ptr+2]*256 + extended_arg ptr += 3 if op==EXTENDED_ARG: extended_arg = arg * compat.long_type(65536) continue else: arg = None ptr += 1 yield op,arg
Yield '(op,arg)' pair for each operation in code object 'code
Below is the the instruction that describes the task: ### Input: Yield '(op,arg)' pair for each operation in code object 'code ### Response: def _iter_code(code): """Yield '(op,arg)' pair for each operation in code object 'code'""" from array import array from dis import HAVE_ARGUMENT, EXTENDED_ARG bytes = array('b',code.co_code) eof = len(code.co_code) ptr = 0 extended_arg = 0 while ptr<eof: op = bytes[ptr] if op>=HAVE_ARGUMENT: arg = bytes[ptr+1] + bytes[ptr+2]*256 + extended_arg ptr += 3 if op==EXTENDED_ARG: extended_arg = arg * compat.long_type(65536) continue else: arg = None ptr += 1 yield op,arg
def _login_azure_app_token(client_id=None, client_secret=None, tenant_id=None): """ Authenticate APP using token credentials: https://docs.microsoft.com/en-us/python/azure/python-sdk-azure-authenticate?view=azure-python :return: ~ServicePrincipalCredentials credentials """ client_id = os.getenv('AZURE_CLIENT_ID') if not client_id else client_id client_secret = os.getenv('AZURE_CLIENT_SECRET') if not client_secret else client_secret tenant_id = os.getenv('AZURE_TENANT_ID') if not tenant_id else tenant_id credentials = ServicePrincipalCredentials( client_id=client_id, secret=client_secret, tenant=tenant_id, ) return credentials
Authenticate APP using token credentials: https://docs.microsoft.com/en-us/python/azure/python-sdk-azure-authenticate?view=azure-python :return: ~ServicePrincipalCredentials credentials
Below is the the instruction that describes the task: ### Input: Authenticate APP using token credentials: https://docs.microsoft.com/en-us/python/azure/python-sdk-azure-authenticate?view=azure-python :return: ~ServicePrincipalCredentials credentials ### Response: def _login_azure_app_token(client_id=None, client_secret=None, tenant_id=None): """ Authenticate APP using token credentials: https://docs.microsoft.com/en-us/python/azure/python-sdk-azure-authenticate?view=azure-python :return: ~ServicePrincipalCredentials credentials """ client_id = os.getenv('AZURE_CLIENT_ID') if not client_id else client_id client_secret = os.getenv('AZURE_CLIENT_SECRET') if not client_secret else client_secret tenant_id = os.getenv('AZURE_TENANT_ID') if not tenant_id else tenant_id credentials = ServicePrincipalCredentials( client_id=client_id, secret=client_secret, tenant=tenant_id, ) return credentials
def get_cases(variant_source, case_lines=None, case_type='ped', variant_type='snv', variant_mode='vcf'): """Create a cases and populate it with individuals Args: variant_source (str): Path to vcf files case_lines (Iterable): Ped like lines case_type (str): Format of case lines Returns: case_objs (list(puzzle.models.Case)) """ individuals = get_individuals( variant_source=variant_source, case_lines=case_lines, case_type=case_type, variant_mode=variant_mode ) case_objs = [] case_ids = set() compressed = False tabix_index = False #If no individuals we still need to have a case id if variant_source.endswith('.gz'): logger.debug("Found compressed variant source") compressed = True tabix_file = '.'.join([variant_source, 'tbi']) if os.path.exists(tabix_file): logger.debug("Found index file") tabix_index = True if len(individuals) > 0: for individual in individuals: case_ids.add(individual.case_id) else: case_ids = [os.path.basename(variant_source)] for case_id in case_ids: logger.info("Found case {0}".format(case_id)) case = Case( case_id=case_id, name=case_id, variant_source=variant_source, variant_type=variant_type, variant_mode=variant_mode, compressed=compressed, tabix_index=tabix_index ) # Add the individuals to the correct case for individual in individuals: if individual.case_id == case_id: logger.info("Adding ind {0} to case {1}".format( individual.name, individual.case_id )) case.add_individual(individual) case_objs.append(case) return case_objs
Create a cases and populate it with individuals Args: variant_source (str): Path to vcf files case_lines (Iterable): Ped like lines case_type (str): Format of case lines Returns: case_objs (list(puzzle.models.Case))
Below is the the instruction that describes the task: ### Input: Create a cases and populate it with individuals Args: variant_source (str): Path to vcf files case_lines (Iterable): Ped like lines case_type (str): Format of case lines Returns: case_objs (list(puzzle.models.Case)) ### Response: def get_cases(variant_source, case_lines=None, case_type='ped', variant_type='snv', variant_mode='vcf'): """Create a cases and populate it with individuals Args: variant_source (str): Path to vcf files case_lines (Iterable): Ped like lines case_type (str): Format of case lines Returns: case_objs (list(puzzle.models.Case)) """ individuals = get_individuals( variant_source=variant_source, case_lines=case_lines, case_type=case_type, variant_mode=variant_mode ) case_objs = [] case_ids = set() compressed = False tabix_index = False #If no individuals we still need to have a case id if variant_source.endswith('.gz'): logger.debug("Found compressed variant source") compressed = True tabix_file = '.'.join([variant_source, 'tbi']) if os.path.exists(tabix_file): logger.debug("Found index file") tabix_index = True if len(individuals) > 0: for individual in individuals: case_ids.add(individual.case_id) else: case_ids = [os.path.basename(variant_source)] for case_id in case_ids: logger.info("Found case {0}".format(case_id)) case = Case( case_id=case_id, name=case_id, variant_source=variant_source, variant_type=variant_type, variant_mode=variant_mode, compressed=compressed, tabix_index=tabix_index ) # Add the individuals to the correct case for individual in individuals: if individual.case_id == case_id: logger.info("Adding ind {0} to case {1}".format( individual.name, individual.case_id )) case.add_individual(individual) case_objs.append(case) return case_objs
def _get_image(self): """ Prepare watermark image :return: Image.Image """ if self.image is None: image_path = '%s/%s' % (current_app.static_folder, os.path.normpath(self.image_path)) try: self.image = Image.open(image_path) self._reduce_opacity() except Exception as err: raise ValueError('Unsupported watermark format: %s' % str(err)) return self.image
Prepare watermark image :return: Image.Image
Below is the the instruction that describes the task: ### Input: Prepare watermark image :return: Image.Image ### Response: def _get_image(self): """ Prepare watermark image :return: Image.Image """ if self.image is None: image_path = '%s/%s' % (current_app.static_folder, os.path.normpath(self.image_path)) try: self.image = Image.open(image_path) self._reduce_opacity() except Exception as err: raise ValueError('Unsupported watermark format: %s' % str(err)) return self.image
def option_list_all(self): """Get a list of all options, including those in option groups.""" res = self.option_list[:] for i in self.option_groups: res.extend(i.option_list) return res
Get a list of all options, including those in option groups.
Below is the the instruction that describes the task: ### Input: Get a list of all options, including those in option groups. ### Response: def option_list_all(self): """Get a list of all options, including those in option groups.""" res = self.option_list[:] for i in self.option_groups: res.extend(i.option_list) return res
def set_config_value(name, value): """Set a config by name to a value.""" cli_config = CLIConfig(SF_CLI_CONFIG_DIR, SF_CLI_ENV_VAR_PREFIX) cli_config.set_value('servicefabric', name, value)
Set a config by name to a value.
Below is the the instruction that describes the task: ### Input: Set a config by name to a value. ### Response: def set_config_value(name, value): """Set a config by name to a value.""" cli_config = CLIConfig(SF_CLI_CONFIG_DIR, SF_CLI_ENV_VAR_PREFIX) cli_config.set_value('servicefabric', name, value)
def chain_split(*splits: Iterable[Callable[..., Any]]) -> Callable[[BaseChain], Iterable[BaseChain]]: # noqa: E501 """ Construct and execute multiple concurrent forks of the chain. Any number of forks may be executed. For each fork, provide an iterable of commands. Returns the resulting chain objects for each fork. .. code-block:: python chain_a, chain_b = build( mining_chain, chain_split( (mine_block(extra_data=b'chain-a'), mine_block()), (mine_block(extra_data=b'chain-b'), mine_block(), mine_block()), ), ) """ if not splits: raise ValidationError("Cannot use `chain_split` without providing at least one split") @functools.wraps(chain_split) @to_tuple def _chain_split(chain: BaseChain) -> Iterable[BaseChain]: for split_fns in splits: result = build( chain, *split_fns, ) yield result return _chain_split
Construct and execute multiple concurrent forks of the chain. Any number of forks may be executed. For each fork, provide an iterable of commands. Returns the resulting chain objects for each fork. .. code-block:: python chain_a, chain_b = build( mining_chain, chain_split( (mine_block(extra_data=b'chain-a'), mine_block()), (mine_block(extra_data=b'chain-b'), mine_block(), mine_block()), ), )
Below is the the instruction that describes the task: ### Input: Construct and execute multiple concurrent forks of the chain. Any number of forks may be executed. For each fork, provide an iterable of commands. Returns the resulting chain objects for each fork. .. code-block:: python chain_a, chain_b = build( mining_chain, chain_split( (mine_block(extra_data=b'chain-a'), mine_block()), (mine_block(extra_data=b'chain-b'), mine_block(), mine_block()), ), ) ### Response: def chain_split(*splits: Iterable[Callable[..., Any]]) -> Callable[[BaseChain], Iterable[BaseChain]]: # noqa: E501 """ Construct and execute multiple concurrent forks of the chain. Any number of forks may be executed. For each fork, provide an iterable of commands. Returns the resulting chain objects for each fork. .. code-block:: python chain_a, chain_b = build( mining_chain, chain_split( (mine_block(extra_data=b'chain-a'), mine_block()), (mine_block(extra_data=b'chain-b'), mine_block(), mine_block()), ), ) """ if not splits: raise ValidationError("Cannot use `chain_split` without providing at least one split") @functools.wraps(chain_split) @to_tuple def _chain_split(chain: BaseChain) -> Iterable[BaseChain]: for split_fns in splits: result = build( chain, *split_fns, ) yield result return _chain_split
def process_filter_directive(filter_operation_info, location, context): """Return a Filter basic block that corresponds to the filter operation in the directive. Args: filter_operation_info: FilterOperationInfo object, containing the directive and field info of the field where the filter is to be applied. location: Location where this filter is used. context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! Returns: a Filter basic block that performs the requested filtering operation """ op_name, operator_params = _get_filter_op_name_and_values(filter_operation_info.directive) non_comparison_filters = { u'name_or_alias': _process_name_or_alias_filter_directive, u'between': _process_between_filter_directive, u'in_collection': _process_in_collection_filter_directive, u'has_substring': _process_has_substring_filter_directive, u'contains': _process_contains_filter_directive, u'intersects': _process_intersects_filter_directive, u'has_edge_degree': _process_has_edge_degree_filter_directive, } all_recognized_filters = frozenset(non_comparison_filters.keys()) | COMPARISON_OPERATORS if all_recognized_filters != ALL_OPERATORS: unrecognized_filters = ALL_OPERATORS - all_recognized_filters raise AssertionError(u'Some filtering operators are defined but do not have an associated ' u'processing function. This is a bug: {}'.format(unrecognized_filters)) if op_name in COMPARISON_OPERATORS: process_func = partial(_process_comparison_filter_directive, operator=op_name) else: process_func = non_comparison_filters.get(op_name, None) if process_func is None: raise GraphQLCompilationError(u'Unknown op_name for filter directive: {}'.format(op_name)) # Operators that do not affect the inner scope require a field name to which they apply. # There is no field name on InlineFragment ASTs, which is why only operators that affect # the inner scope make semantic sense when applied to InlineFragments. # Here, we ensure that we either have a field name to which the filter applies, # or that the operator affects the inner scope. if (filter_operation_info.field_name is None and op_name not in INNER_SCOPE_VERTEX_FIELD_OPERATORS): raise GraphQLCompilationError(u'The filter with op_name "{}" must be applied on a field. ' u'It may not be applied on a type coercion.'.format(op_name)) fields = ((filter_operation_info.field_name,) if op_name != 'name_or_alias' else ('name', 'alias')) context['metadata'].record_filter_info( location, FilterInfo(fields=fields, op_name=op_name, args=tuple(operator_params)) ) return process_func(filter_operation_info, location, context, operator_params)
Return a Filter basic block that corresponds to the filter operation in the directive. Args: filter_operation_info: FilterOperationInfo object, containing the directive and field info of the field where the filter is to be applied. location: Location where this filter is used. context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! Returns: a Filter basic block that performs the requested filtering operation
Below is the the instruction that describes the task: ### Input: Return a Filter basic block that corresponds to the filter operation in the directive. Args: filter_operation_info: FilterOperationInfo object, containing the directive and field info of the field where the filter is to be applied. location: Location where this filter is used. context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! Returns: a Filter basic block that performs the requested filtering operation ### Response: def process_filter_directive(filter_operation_info, location, context): """Return a Filter basic block that corresponds to the filter operation in the directive. Args: filter_operation_info: FilterOperationInfo object, containing the directive and field info of the field where the filter is to be applied. location: Location where this filter is used. context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! Returns: a Filter basic block that performs the requested filtering operation """ op_name, operator_params = _get_filter_op_name_and_values(filter_operation_info.directive) non_comparison_filters = { u'name_or_alias': _process_name_or_alias_filter_directive, u'between': _process_between_filter_directive, u'in_collection': _process_in_collection_filter_directive, u'has_substring': _process_has_substring_filter_directive, u'contains': _process_contains_filter_directive, u'intersects': _process_intersects_filter_directive, u'has_edge_degree': _process_has_edge_degree_filter_directive, } all_recognized_filters = frozenset(non_comparison_filters.keys()) | COMPARISON_OPERATORS if all_recognized_filters != ALL_OPERATORS: unrecognized_filters = ALL_OPERATORS - all_recognized_filters raise AssertionError(u'Some filtering operators are defined but do not have an associated ' u'processing function. This is a bug: {}'.format(unrecognized_filters)) if op_name in COMPARISON_OPERATORS: process_func = partial(_process_comparison_filter_directive, operator=op_name) else: process_func = non_comparison_filters.get(op_name, None) if process_func is None: raise GraphQLCompilationError(u'Unknown op_name for filter directive: {}'.format(op_name)) # Operators that do not affect the inner scope require a field name to which they apply. # There is no field name on InlineFragment ASTs, which is why only operators that affect # the inner scope make semantic sense when applied to InlineFragments. # Here, we ensure that we either have a field name to which the filter applies, # or that the operator affects the inner scope. if (filter_operation_info.field_name is None and op_name not in INNER_SCOPE_VERTEX_FIELD_OPERATORS): raise GraphQLCompilationError(u'The filter with op_name "{}" must be applied on a field. ' u'It may not be applied on a type coercion.'.format(op_name)) fields = ((filter_operation_info.field_name,) if op_name != 'name_or_alias' else ('name', 'alias')) context['metadata'].record_filter_info( location, FilterInfo(fields=fields, op_name=op_name, args=tuple(operator_params)) ) return process_func(filter_operation_info, location, context, operator_params)
def controller(self): """Show current linked controllers.""" if hasattr(self, 'controllers'): if len(self.controllers) > 1: # in the future, we should support more controllers raise TypeError("Only one controller per account.") return self.controllers[0] raise AttributeError("There is no controller assigned.")
Show current linked controllers.
Below is the the instruction that describes the task: ### Input: Show current linked controllers. ### Response: def controller(self): """Show current linked controllers.""" if hasattr(self, 'controllers'): if len(self.controllers) > 1: # in the future, we should support more controllers raise TypeError("Only one controller per account.") return self.controllers[0] raise AttributeError("There is no controller assigned.")
def _scoop_single_run(kwargs): """Wrapper function for scoop, that does not configure logging""" try: try: is_origin = scoop.IS_ORIGIN except AttributeError: # scoop is not properly started, i.e. with `python -m scoop...` # in this case scoop uses default `map` function, i.e. # the main process is_origin = True if not is_origin: # configure logging and niceness if not the main process: _configure_niceness(kwargs) _configure_logging(kwargs) return _single_run(kwargs) except Exception: scoop.logger.exception('ERROR occurred during a single run!') raise
Wrapper function for scoop, that does not configure logging
Below is the the instruction that describes the task: ### Input: Wrapper function for scoop, that does not configure logging ### Response: def _scoop_single_run(kwargs): """Wrapper function for scoop, that does not configure logging""" try: try: is_origin = scoop.IS_ORIGIN except AttributeError: # scoop is not properly started, i.e. with `python -m scoop...` # in this case scoop uses default `map` function, i.e. # the main process is_origin = True if not is_origin: # configure logging and niceness if not the main process: _configure_niceness(kwargs) _configure_logging(kwargs) return _single_run(kwargs) except Exception: scoop.logger.exception('ERROR occurred during a single run!') raise
def _set_get_contained_in_ID(self, v, load=False): """ Setter method for get_contained_in_ID, mapped from YANG variable /brocade_entity_rpc/get_contained_in_ID (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_get_contained_in_ID is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_get_contained_in_ID() directly. YANG Description: This is a function that returns the slot/container name/ID, where this managed device is 'contained in'. The managed device here, is typically 1 Rack Unit (RU) device. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=get_contained_in_ID.get_contained_in_ID, is_leaf=True, yang_name="get-contained-in-ID", rest_name="get-contained-in-ID", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'get_container_id_action_point'}}, namespace='urn:brocade.com:mgmt:brocade-entity', defining_module='brocade-entity', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """get_contained_in_ID must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=get_contained_in_ID.get_contained_in_ID, is_leaf=True, yang_name="get-contained-in-ID", rest_name="get-contained-in-ID", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'get_container_id_action_point'}}, namespace='urn:brocade.com:mgmt:brocade-entity', defining_module='brocade-entity', yang_type='rpc', is_config=True)""", }) self.__get_contained_in_ID = t if hasattr(self, '_set'): self._set()
Setter method for get_contained_in_ID, mapped from YANG variable /brocade_entity_rpc/get_contained_in_ID (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_get_contained_in_ID is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_get_contained_in_ID() directly. YANG Description: This is a function that returns the slot/container name/ID, where this managed device is 'contained in'. The managed device here, is typically 1 Rack Unit (RU) device.
Below is the the instruction that describes the task: ### Input: Setter method for get_contained_in_ID, mapped from YANG variable /brocade_entity_rpc/get_contained_in_ID (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_get_contained_in_ID is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_get_contained_in_ID() directly. YANG Description: This is a function that returns the slot/container name/ID, where this managed device is 'contained in'. The managed device here, is typically 1 Rack Unit (RU) device. ### Response: def _set_get_contained_in_ID(self, v, load=False): """ Setter method for get_contained_in_ID, mapped from YANG variable /brocade_entity_rpc/get_contained_in_ID (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_get_contained_in_ID is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_get_contained_in_ID() directly. YANG Description: This is a function that returns the slot/container name/ID, where this managed device is 'contained in'. The managed device here, is typically 1 Rack Unit (RU) device. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=get_contained_in_ID.get_contained_in_ID, is_leaf=True, yang_name="get-contained-in-ID", rest_name="get-contained-in-ID", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'get_container_id_action_point'}}, namespace='urn:brocade.com:mgmt:brocade-entity', defining_module='brocade-entity', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """get_contained_in_ID must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=get_contained_in_ID.get_contained_in_ID, is_leaf=True, yang_name="get-contained-in-ID", rest_name="get-contained-in-ID", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'get_container_id_action_point'}}, namespace='urn:brocade.com:mgmt:brocade-entity', defining_module='brocade-entity', yang_type='rpc', is_config=True)""", }) self.__get_contained_in_ID = t if hasattr(self, '_set'): self._set()
def betas_for_cov(self, covariate = '0'): """betas_for_cov returns the beta values (i.e. IRF) associated with a specific covariate. :param covariate: name of covariate. :type covariate: string """ # find the index in the designmatrix of the current covariate this_covariate_index = list(self.covariates.keys()).index(covariate) return self.betas[int(this_covariate_index*self.deconvolution_interval_size):int((this_covariate_index+1)*self.deconvolution_interval_size)]
betas_for_cov returns the beta values (i.e. IRF) associated with a specific covariate. :param covariate: name of covariate. :type covariate: string
Below is the the instruction that describes the task: ### Input: betas_for_cov returns the beta values (i.e. IRF) associated with a specific covariate. :param covariate: name of covariate. :type covariate: string ### Response: def betas_for_cov(self, covariate = '0'): """betas_for_cov returns the beta values (i.e. IRF) associated with a specific covariate. :param covariate: name of covariate. :type covariate: string """ # find the index in the designmatrix of the current covariate this_covariate_index = list(self.covariates.keys()).index(covariate) return self.betas[int(this_covariate_index*self.deconvolution_interval_size):int((this_covariate_index+1)*self.deconvolution_interval_size)]
def fix_display(self): """If this is being run on a headless system the Matplotlib backend must be changed to one that doesn't need a display. """ try: tkinter.Tk() except (tkinter.TclError, NameError): # If there is no display. try: import matplotlib as mpl except ImportError: pass else: print("Setting matplotlib backend to Agg") mpl.use('Agg')
If this is being run on a headless system the Matplotlib backend must be changed to one that doesn't need a display.
Below is the the instruction that describes the task: ### Input: If this is being run on a headless system the Matplotlib backend must be changed to one that doesn't need a display. ### Response: def fix_display(self): """If this is being run on a headless system the Matplotlib backend must be changed to one that doesn't need a display. """ try: tkinter.Tk() except (tkinter.TclError, NameError): # If there is no display. try: import matplotlib as mpl except ImportError: pass else: print("Setting matplotlib backend to Agg") mpl.use('Agg')
def _set_modId(self, v, load=False): """ Setter method for modId, mapped from YANG variable /logging/raslog/module/modId (list) If this variable is read-only (config: false) in the source YANG file, then _set_modId is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_modId() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("modId",modId.modId, yang_name="modId", rest_name="modId", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='modId', extensions={u'tailf-common': {u'info': u'Configure RAS module configuration', u'cli-drop-node-name': None, u'callpoint': u'RASMODConfigureCallPoint'}}), is_container='list', yang_name="modId", rest_name="modId", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure RAS module configuration', u'cli-drop-node-name': None, u'callpoint': u'RASMODConfigureCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ras', defining_module='brocade-ras', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """modId must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("modId",modId.modId, yang_name="modId", rest_name="modId", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='modId', extensions={u'tailf-common': {u'info': u'Configure RAS module configuration', u'cli-drop-node-name': None, u'callpoint': u'RASMODConfigureCallPoint'}}), is_container='list', yang_name="modId", rest_name="modId", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure RAS module configuration', u'cli-drop-node-name': None, u'callpoint': u'RASMODConfigureCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ras', defining_module='brocade-ras', yang_type='list', is_config=True)""", }) self.__modId = t if hasattr(self, '_set'): self._set()
Setter method for modId, mapped from YANG variable /logging/raslog/module/modId (list) If this variable is read-only (config: false) in the source YANG file, then _set_modId is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_modId() directly.
Below is the the instruction that describes the task: ### Input: Setter method for modId, mapped from YANG variable /logging/raslog/module/modId (list) If this variable is read-only (config: false) in the source YANG file, then _set_modId is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_modId() directly. ### Response: def _set_modId(self, v, load=False): """ Setter method for modId, mapped from YANG variable /logging/raslog/module/modId (list) If this variable is read-only (config: false) in the source YANG file, then _set_modId is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_modId() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("modId",modId.modId, yang_name="modId", rest_name="modId", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='modId', extensions={u'tailf-common': {u'info': u'Configure RAS module configuration', u'cli-drop-node-name': None, u'callpoint': u'RASMODConfigureCallPoint'}}), is_container='list', yang_name="modId", rest_name="modId", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure RAS module configuration', u'cli-drop-node-name': None, u'callpoint': u'RASMODConfigureCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ras', defining_module='brocade-ras', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """modId must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("modId",modId.modId, yang_name="modId", rest_name="modId", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='modId', extensions={u'tailf-common': {u'info': u'Configure RAS module configuration', u'cli-drop-node-name': None, u'callpoint': u'RASMODConfigureCallPoint'}}), is_container='list', yang_name="modId", rest_name="modId", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure RAS module configuration', u'cli-drop-node-name': None, u'callpoint': u'RASMODConfigureCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ras', defining_module='brocade-ras', yang_type='list', is_config=True)""", }) self.__modId = t if hasattr(self, '_set'): self._set()
def fix_lamdaline(source): """Remove the last redundant token from lambda expression lambda x: return x) ^ Return string without irrelevant tokens returned from inspect.getsource on lamda expr returns """ # Using undocumented generate_tokens due to a tokenize.tokenize bug # See https://bugs.python.org/issue23297 strio = io.StringIO(source) gen = tokenize.generate_tokens(strio.readline) tkns = [] try: for t in gen: tkns.append(t) except tokenize.TokenError: pass # Find the position of 'lambda' lambda_pos = [(t.type, t.string) for t in tkns].index( (tokenize.NAME, "lambda") ) # Ignore tokes before 'lambda' tkns = tkns[lambda_pos:] # Find the position of th las OP lastop_pos = ( len(tkns) - 1 - [t.type for t in tkns[::-1]].index(tokenize.OP) ) lastop = tkns[lastop_pos] # Remove OP from the line fiedlineno = lastop.start[0] fixedline = lastop.line[: lastop.start[1]] + lastop.line[lastop.end[1] :] tkns = tkns[:lastop_pos] fixedlines = "" last_lineno = 0 for t in tkns: if last_lineno == t.start[0]: continue elif t.start[0] == fiedlineno: fixedlines += fixedline last_lineno = t.start[0] else: fixedlines += t.line last_lineno = t.start[0] return fixedlines
Remove the last redundant token from lambda expression lambda x: return x) ^ Return string without irrelevant tokens returned from inspect.getsource on lamda expr returns
Below is the the instruction that describes the task: ### Input: Remove the last redundant token from lambda expression lambda x: return x) ^ Return string without irrelevant tokens returned from inspect.getsource on lamda expr returns ### Response: def fix_lamdaline(source): """Remove the last redundant token from lambda expression lambda x: return x) ^ Return string without irrelevant tokens returned from inspect.getsource on lamda expr returns """ # Using undocumented generate_tokens due to a tokenize.tokenize bug # See https://bugs.python.org/issue23297 strio = io.StringIO(source) gen = tokenize.generate_tokens(strio.readline) tkns = [] try: for t in gen: tkns.append(t) except tokenize.TokenError: pass # Find the position of 'lambda' lambda_pos = [(t.type, t.string) for t in tkns].index( (tokenize.NAME, "lambda") ) # Ignore tokes before 'lambda' tkns = tkns[lambda_pos:] # Find the position of th las OP lastop_pos = ( len(tkns) - 1 - [t.type for t in tkns[::-1]].index(tokenize.OP) ) lastop = tkns[lastop_pos] # Remove OP from the line fiedlineno = lastop.start[0] fixedline = lastop.line[: lastop.start[1]] + lastop.line[lastop.end[1] :] tkns = tkns[:lastop_pos] fixedlines = "" last_lineno = 0 for t in tkns: if last_lineno == t.start[0]: continue elif t.start[0] == fiedlineno: fixedlines += fixedline last_lineno = t.start[0] else: fixedlines += t.line last_lineno = t.start[0] return fixedlines
def set_rotation(self, r=0, redraw=True): """ Sets the LED matrix rotation for viewing, adjust if the Pi is upside down or sideways. 0 is with the Pi HDMI port facing downwards """ if r in self._pix_map.keys(): if redraw: pixel_list = self.get_pixels() self._rotation = r if redraw: self.set_pixels(pixel_list) else: raise ValueError('Rotation must be 0, 90, 180 or 270 degrees')
Sets the LED matrix rotation for viewing, adjust if the Pi is upside down or sideways. 0 is with the Pi HDMI port facing downwards
Below is the the instruction that describes the task: ### Input: Sets the LED matrix rotation for viewing, adjust if the Pi is upside down or sideways. 0 is with the Pi HDMI port facing downwards ### Response: def set_rotation(self, r=0, redraw=True): """ Sets the LED matrix rotation for viewing, adjust if the Pi is upside down or sideways. 0 is with the Pi HDMI port facing downwards """ if r in self._pix_map.keys(): if redraw: pixel_list = self.get_pixels() self._rotation = r if redraw: self.set_pixels(pixel_list) else: raise ValueError('Rotation must be 0, 90, 180 or 270 degrees')
def graph_from_edges(edges: Iterable[Edge], **kwargs) -> BELGraph: """Build a BEL graph from edges.""" graph = BELGraph(**kwargs) for edge in edges: edge.insert_into_graph(graph) return graph
Build a BEL graph from edges.
Below is the the instruction that describes the task: ### Input: Build a BEL graph from edges. ### Response: def graph_from_edges(edges: Iterable[Edge], **kwargs) -> BELGraph: """Build a BEL graph from edges.""" graph = BELGraph(**kwargs) for edge in edges: edge.insert_into_graph(graph) return graph
def jr6_jr6(mag_file, dir_path=".", input_dir_path="", meas_file="measurements.txt", spec_file="specimens.txt", samp_file="samples.txt", site_file="sites.txt", loc_file="locations.txt", specnum=1, samp_con='1', location='unknown', lat='', lon='', noave=False, meth_code="LP-NO", volume=12, JR=False, user=""): """ Convert JR6 .jr6 files to MagIC file(s) Parameters ---------- mag_file : str input file name dir_path : str working directory, default "." input_dir_path : str input file directory IF different from dir_path, default "" meas_file : str output measurement file name, default "measurements.txt" spec_file : str output specimen file name, default "specimens.txt" samp_file: str output sample file name, default "samples.txt" site_file : str output site file name, default "sites.txt" loc_file : str output location file name, default "locations.txt" specnum : int number of characters to designate a specimen, default 0 samp_con : str sample/site naming convention, default '1', see info below location : str location name, default "unknown" lat : float latitude, default "" lon : float longitude, default "" noave : bool do not average duplicate measurements, default False (so by default, DO average) meth_code : str colon-delimited method codes, default "LP-NO" volume : float volume in ccs, default 12 JR : bool IODP samples were measured on the JOIDES RESOLUTION, default False user : str user name, default "" Returns --------- Tuple : (True or False indicating if conversion was sucessful, meas_file name written) Info -------- Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name same as sample [6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED [7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY """ version_num = pmag.get_version() input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path) specnum = - int(specnum) samp_con = str(samp_con) volume = float(volume) * 1e-6 # need to add these meas_file = pmag.resolve_file_name(meas_file, output_dir_path) spec_file = pmag.resolve_file_name(spec_file, output_dir_path) samp_file = pmag.resolve_file_name(samp_file, output_dir_path) site_file = pmag.resolve_file_name(site_file, output_dir_path) loc_file = pmag.resolve_file_name(loc_file, output_dir_path) mag_file = pmag.resolve_file_name(mag_file, input_dir_path) if JR: if meth_code == "LP-NO": meth_code = "" meth_code = meth_code+":FS-C-DRILL-IODP:SP-SS-C:SO-V" meth_code = meth_code.strip(":") samp_con = '5' # format variables tmp_file = mag_file.split(os.extsep)[0]+os.extsep+'tmp' mag_file = pmag.resolve_file_name(mag_file, input_dir_path) if samp_con.startswith("4"): if "-" not in samp_con: print("option [4] must be in form 4-Z where Z is an integer") return False, "naming convention option [4] must be in form 4-Z where Z is an integer" else: Z = samp_con.split("-")[1] samp_con = "4" elif samp_con.startswith("7"): if "-" not in samp_con: print("option [7] must be in form 7-Z where Z is an integer") return False, "naming convention option [7] must be in form 7-Z where Z is an integer" else: Z = samp_con.split("-")[1] samp_con = "7" else: Z = 1 # parse data # fix .jr6 file so that there are spaces between all the columns. pre_data = open(mag_file, 'r') tmp_data = open(tmp_file, 'w') if samp_con != '2': fixed_data = pre_data.read().replace('-', ' -') else: fixed_data = "" for line in pre_data.readlines(): entries = line.split() if len(entries) < 2: continue fixed_line = entries[0] + ' ' + reduce( lambda x, y: x+' '+y, [x.replace('-', ' -') for x in entries[1:]]) fixed_data += fixed_line+os.linesep tmp_data.write(fixed_data) tmp_data.close() pre_data.close() if not JR: column_names = ['specimen', 'step', 'x', 'y', 'z', 'expon', 'azimuth', 'dip', 'bed_dip_direction', 'bed_dip', 'bed_dip_dir2', 'bed_dip2', 'param1', 'param2', 'param3', 'param4', 'dir_csd'] else: # measured on the Joides Resolution JR6 column_names = ['specimen', 'step', 'negz', 'y', 'x', 'expon', 'azimuth', 'dip', 'bed_dip_direction', 'bed_dip', 'bed_dip_dir2', 'bed_dip2', 'param1', 'param2', 'param3', 'param4', 'dir_csd'] data = pd.read_csv(tmp_file, delim_whitespace=True, names=column_names, index_col=False) if isinstance(data['x'][0], str): column_names = ['specimen', 'step', 'step_unit', 'x', 'y', 'z', 'expon', 'azimuth', 'dip', 'bed_dip_direction', 'bed_dip', 'bed_dip_dir2', 'bed_dip2', 'param1', 'param2', 'param3', 'param4', 'dir_csd'] data = pd.read_csv(tmp_file, delim_whitespace=True, names=column_names, index_col=False) if JR: data['z'] = -data['negz'] cart = np.array([data['x'], data['y'], data['z']]).transpose() dir_dat = pmag.cart2dir(cart).transpose() data['dir_dec'] = dir_dat[0] data['dir_inc'] = dir_dat[1] # the data are in A/m - this converts to Am^2 data['magn_moment'] = dir_dat[2]*(10.0**data['expon'])*volume data['magn_volume'] = dir_dat[2] * \ (10.0**data['expon']) # A/m - data in A/m data['dip'] = -data['dip'] data['specimen'] # put data into magic tables MagRecs, SpecRecs, SampRecs, SiteRecs, LocRecs = [], [], [], [], [] for rowNum, row in data.iterrows(): MeasRec, SpecRec, SampRec, SiteRec, LocRec = {}, {}, {}, {}, {} specimen = row['specimen'] if specnum != 0: sample = specimen[:specnum] else: sample = specimen site = pmag.parse_site(sample, samp_con, Z) if specimen != "" and specimen not in [x['specimen'] if 'specimen' in list(x.keys()) else "" for x in SpecRecs]: SpecRec['specimen'] = specimen SpecRec['sample'] = sample SpecRec["citations"] = "This study" SpecRec["analysts"] = user SpecRec['volume'] = volume SpecRecs.append(SpecRec) if sample != "" and sample not in [x['sample'] if 'sample' in list(x.keys()) else "" for x in SampRecs]: SampRec['sample'] = sample SampRec['site'] = site SampRec["citations"] = "This study" SampRec["analysts"] = user SampRec['azimuth'] = row['azimuth'] SampRec['dip'] = row['dip'] SampRec['bed_dip_direction'] = row['bed_dip_direction'] SampRec['bed_dip'] = row['bed_dip'] SampRec['method_codes'] = meth_code SampRecs.append(SampRec) if site != "" and site not in [x['site'] if 'site' in list(x.keys()) else "" for x in SiteRecs]: SiteRec['site'] = site SiteRec['location'] = location SiteRec["citations"] = "This study" SiteRec["analysts"] = user SiteRec['lat'] = lat SiteRec['lon'] = lon SiteRecs.append(SiteRec) if location != "" and location not in [x['location'] if 'location' in list(x.keys()) else "" for x in LocRecs]: LocRec['location'] = location LocRec["citations"] = "This study" LocRec["analysts"] = user LocRec['lat_n'] = lat LocRec['lon_e'] = lon LocRec['lat_s'] = lat LocRec['lon_w'] = lon LocRecs.append(LocRec) MeasRec["citations"] = "This study" MeasRec["analysts"] = user MeasRec["specimen"] = specimen MeasRec['software_packages'] = version_num MeasRec["treat_temp"] = '%8.3e' % (273) # room temp in kelvin MeasRec["meas_temp"] = '%8.3e' % (273) # room temp in kelvin MeasRec["quality"] = 'g' MeasRec["standard"] = 'u' MeasRec["treat_step_num"] = 0 MeasRec["treat_ac_field"] = '0' if row['step'] == 'NRM': meas_type = "LT-NO" elif 'step_unit' in row and row['step_unit'] == 'C': meas_type = "LT-T-Z" treat = float(row['step']) MeasRec["treat_temp"] = '%8.3e' % (treat+273.) # temp in kelvin elif row['step'][0:2] == 'AD': meas_type = "LT-AF-Z" treat = float(row['step'][2:]) MeasRec["treat_ac_field"] = '%8.3e' % ( treat*1e-3) # convert from mT to tesla elif row['step'][0] == 'A': meas_type = "LT-AF-Z" treat = float(row['step'][1:]) MeasRec["treat_ac_field"] = '%8.3e' % ( treat*1e-3) # convert from mT to tesla elif row['step'][0] == 'TD': meas_type = "LT-T-Z" treat = float(row['step'][2:]) MeasRec["treat_temp"] = '%8.3e' % (treat+273.) # temp in kelvin elif row['step'][0] == 'T': meas_type = "LT-T-Z" treat = float(row['step'][1:]) MeasRec["treat_temp"] = '%8.3e' % (treat+273.) # temp in kelvin else: # need to add IRM, and ARM options print("measurement type unknown", row['step']) return False, "measurement type unknown" MeasRec["magn_moment"] = str(row['magn_moment']) MeasRec["magn_volume"] = str(row['magn_volume']) MeasRec["dir_dec"] = str(row['dir_dec']) MeasRec["dir_inc"] = str(row['dir_inc']) MeasRec['method_codes'] = meas_type MagRecs.append(MeasRec) con = cb.Contribution(output_dir_path, read_tables=[]) con.add_magic_table_from_data(dtype='specimens', data=SpecRecs) con.add_magic_table_from_data(dtype='samples', data=SampRecs) con.add_magic_table_from_data(dtype='sites', data=SiteRecs) con.add_magic_table_from_data(dtype='locations', data=LocRecs) MeasOuts = pmag.measurements_methods3(MagRecs, noave) con.add_magic_table_from_data(dtype='measurements', data=MeasOuts) con.tables['specimens'].write_magic_file(custom_name=spec_file) con.tables['samples'].write_magic_file(custom_name=samp_file) con.tables['sites'].write_magic_file(custom_name=site_file) con.tables['locations'].write_magic_file(custom_name=loc_file) con.tables['measurements'].write_magic_file(custom_name=meas_file) try: os.remove(tmp_file) except (OSError, IOError) as e: print("couldn't remove temperary fixed JR6 file %s" % tmp_file) return True, meas_file
Convert JR6 .jr6 files to MagIC file(s) Parameters ---------- mag_file : str input file name dir_path : str working directory, default "." input_dir_path : str input file directory IF different from dir_path, default "" meas_file : str output measurement file name, default "measurements.txt" spec_file : str output specimen file name, default "specimens.txt" samp_file: str output sample file name, default "samples.txt" site_file : str output site file name, default "sites.txt" loc_file : str output location file name, default "locations.txt" specnum : int number of characters to designate a specimen, default 0 samp_con : str sample/site naming convention, default '1', see info below location : str location name, default "unknown" lat : float latitude, default "" lon : float longitude, default "" noave : bool do not average duplicate measurements, default False (so by default, DO average) meth_code : str colon-delimited method codes, default "LP-NO" volume : float volume in ccs, default 12 JR : bool IODP samples were measured on the JOIDES RESOLUTION, default False user : str user name, default "" Returns --------- Tuple : (True or False indicating if conversion was sucessful, meas_file name written) Info -------- Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name same as sample [6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED [7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
Below is the the instruction that describes the task: ### Input: Convert JR6 .jr6 files to MagIC file(s) Parameters ---------- mag_file : str input file name dir_path : str working directory, default "." input_dir_path : str input file directory IF different from dir_path, default "" meas_file : str output measurement file name, default "measurements.txt" spec_file : str output specimen file name, default "specimens.txt" samp_file: str output sample file name, default "samples.txt" site_file : str output site file name, default "sites.txt" loc_file : str output location file name, default "locations.txt" specnum : int number of characters to designate a specimen, default 0 samp_con : str sample/site naming convention, default '1', see info below location : str location name, default "unknown" lat : float latitude, default "" lon : float longitude, default "" noave : bool do not average duplicate measurements, default False (so by default, DO average) meth_code : str colon-delimited method codes, default "LP-NO" volume : float volume in ccs, default 12 JR : bool IODP samples were measured on the JOIDES RESOLUTION, default False user : str user name, default "" Returns --------- Tuple : (True or False indicating if conversion was sucessful, meas_file name written) Info -------- Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name same as sample [6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED [7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY ### Response: def jr6_jr6(mag_file, dir_path=".", input_dir_path="", meas_file="measurements.txt", spec_file="specimens.txt", samp_file="samples.txt", site_file="sites.txt", loc_file="locations.txt", specnum=1, samp_con='1', location='unknown', lat='', lon='', noave=False, meth_code="LP-NO", volume=12, JR=False, user=""): """ Convert JR6 .jr6 files to MagIC file(s) Parameters ---------- mag_file : str input file name dir_path : str working directory, default "." input_dir_path : str input file directory IF different from dir_path, default "" meas_file : str output measurement file name, default "measurements.txt" spec_file : str output specimen file name, default "specimens.txt" samp_file: str output sample file name, default "samples.txt" site_file : str output site file name, default "sites.txt" loc_file : str output location file name, default "locations.txt" specnum : int number of characters to designate a specimen, default 0 samp_con : str sample/site naming convention, default '1', see info below location : str location name, default "unknown" lat : float latitude, default "" lon : float longitude, default "" noave : bool do not average duplicate measurements, default False (so by default, DO average) meth_code : str colon-delimited method codes, default "LP-NO" volume : float volume in ccs, default 12 JR : bool IODP samples were measured on the JOIDES RESOLUTION, default False user : str user name, default "" Returns --------- Tuple : (True or False indicating if conversion was sucessful, meas_file name written) Info -------- Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name same as sample [6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED [7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY """ version_num = pmag.get_version() input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path) specnum = - int(specnum) samp_con = str(samp_con) volume = float(volume) * 1e-6 # need to add these meas_file = pmag.resolve_file_name(meas_file, output_dir_path) spec_file = pmag.resolve_file_name(spec_file, output_dir_path) samp_file = pmag.resolve_file_name(samp_file, output_dir_path) site_file = pmag.resolve_file_name(site_file, output_dir_path) loc_file = pmag.resolve_file_name(loc_file, output_dir_path) mag_file = pmag.resolve_file_name(mag_file, input_dir_path) if JR: if meth_code == "LP-NO": meth_code = "" meth_code = meth_code+":FS-C-DRILL-IODP:SP-SS-C:SO-V" meth_code = meth_code.strip(":") samp_con = '5' # format variables tmp_file = mag_file.split(os.extsep)[0]+os.extsep+'tmp' mag_file = pmag.resolve_file_name(mag_file, input_dir_path) if samp_con.startswith("4"): if "-" not in samp_con: print("option [4] must be in form 4-Z where Z is an integer") return False, "naming convention option [4] must be in form 4-Z where Z is an integer" else: Z = samp_con.split("-")[1] samp_con = "4" elif samp_con.startswith("7"): if "-" not in samp_con: print("option [7] must be in form 7-Z where Z is an integer") return False, "naming convention option [7] must be in form 7-Z where Z is an integer" else: Z = samp_con.split("-")[1] samp_con = "7" else: Z = 1 # parse data # fix .jr6 file so that there are spaces between all the columns. pre_data = open(mag_file, 'r') tmp_data = open(tmp_file, 'w') if samp_con != '2': fixed_data = pre_data.read().replace('-', ' -') else: fixed_data = "" for line in pre_data.readlines(): entries = line.split() if len(entries) < 2: continue fixed_line = entries[0] + ' ' + reduce( lambda x, y: x+' '+y, [x.replace('-', ' -') for x in entries[1:]]) fixed_data += fixed_line+os.linesep tmp_data.write(fixed_data) tmp_data.close() pre_data.close() if not JR: column_names = ['specimen', 'step', 'x', 'y', 'z', 'expon', 'azimuth', 'dip', 'bed_dip_direction', 'bed_dip', 'bed_dip_dir2', 'bed_dip2', 'param1', 'param2', 'param3', 'param4', 'dir_csd'] else: # measured on the Joides Resolution JR6 column_names = ['specimen', 'step', 'negz', 'y', 'x', 'expon', 'azimuth', 'dip', 'bed_dip_direction', 'bed_dip', 'bed_dip_dir2', 'bed_dip2', 'param1', 'param2', 'param3', 'param4', 'dir_csd'] data = pd.read_csv(tmp_file, delim_whitespace=True, names=column_names, index_col=False) if isinstance(data['x'][0], str): column_names = ['specimen', 'step', 'step_unit', 'x', 'y', 'z', 'expon', 'azimuth', 'dip', 'bed_dip_direction', 'bed_dip', 'bed_dip_dir2', 'bed_dip2', 'param1', 'param2', 'param3', 'param4', 'dir_csd'] data = pd.read_csv(tmp_file, delim_whitespace=True, names=column_names, index_col=False) if JR: data['z'] = -data['negz'] cart = np.array([data['x'], data['y'], data['z']]).transpose() dir_dat = pmag.cart2dir(cart).transpose() data['dir_dec'] = dir_dat[0] data['dir_inc'] = dir_dat[1] # the data are in A/m - this converts to Am^2 data['magn_moment'] = dir_dat[2]*(10.0**data['expon'])*volume data['magn_volume'] = dir_dat[2] * \ (10.0**data['expon']) # A/m - data in A/m data['dip'] = -data['dip'] data['specimen'] # put data into magic tables MagRecs, SpecRecs, SampRecs, SiteRecs, LocRecs = [], [], [], [], [] for rowNum, row in data.iterrows(): MeasRec, SpecRec, SampRec, SiteRec, LocRec = {}, {}, {}, {}, {} specimen = row['specimen'] if specnum != 0: sample = specimen[:specnum] else: sample = specimen site = pmag.parse_site(sample, samp_con, Z) if specimen != "" and specimen not in [x['specimen'] if 'specimen' in list(x.keys()) else "" for x in SpecRecs]: SpecRec['specimen'] = specimen SpecRec['sample'] = sample SpecRec["citations"] = "This study" SpecRec["analysts"] = user SpecRec['volume'] = volume SpecRecs.append(SpecRec) if sample != "" and sample not in [x['sample'] if 'sample' in list(x.keys()) else "" for x in SampRecs]: SampRec['sample'] = sample SampRec['site'] = site SampRec["citations"] = "This study" SampRec["analysts"] = user SampRec['azimuth'] = row['azimuth'] SampRec['dip'] = row['dip'] SampRec['bed_dip_direction'] = row['bed_dip_direction'] SampRec['bed_dip'] = row['bed_dip'] SampRec['method_codes'] = meth_code SampRecs.append(SampRec) if site != "" and site not in [x['site'] if 'site' in list(x.keys()) else "" for x in SiteRecs]: SiteRec['site'] = site SiteRec['location'] = location SiteRec["citations"] = "This study" SiteRec["analysts"] = user SiteRec['lat'] = lat SiteRec['lon'] = lon SiteRecs.append(SiteRec) if location != "" and location not in [x['location'] if 'location' in list(x.keys()) else "" for x in LocRecs]: LocRec['location'] = location LocRec["citations"] = "This study" LocRec["analysts"] = user LocRec['lat_n'] = lat LocRec['lon_e'] = lon LocRec['lat_s'] = lat LocRec['lon_w'] = lon LocRecs.append(LocRec) MeasRec["citations"] = "This study" MeasRec["analysts"] = user MeasRec["specimen"] = specimen MeasRec['software_packages'] = version_num MeasRec["treat_temp"] = '%8.3e' % (273) # room temp in kelvin MeasRec["meas_temp"] = '%8.3e' % (273) # room temp in kelvin MeasRec["quality"] = 'g' MeasRec["standard"] = 'u' MeasRec["treat_step_num"] = 0 MeasRec["treat_ac_field"] = '0' if row['step'] == 'NRM': meas_type = "LT-NO" elif 'step_unit' in row and row['step_unit'] == 'C': meas_type = "LT-T-Z" treat = float(row['step']) MeasRec["treat_temp"] = '%8.3e' % (treat+273.) # temp in kelvin elif row['step'][0:2] == 'AD': meas_type = "LT-AF-Z" treat = float(row['step'][2:]) MeasRec["treat_ac_field"] = '%8.3e' % ( treat*1e-3) # convert from mT to tesla elif row['step'][0] == 'A': meas_type = "LT-AF-Z" treat = float(row['step'][1:]) MeasRec["treat_ac_field"] = '%8.3e' % ( treat*1e-3) # convert from mT to tesla elif row['step'][0] == 'TD': meas_type = "LT-T-Z" treat = float(row['step'][2:]) MeasRec["treat_temp"] = '%8.3e' % (treat+273.) # temp in kelvin elif row['step'][0] == 'T': meas_type = "LT-T-Z" treat = float(row['step'][1:]) MeasRec["treat_temp"] = '%8.3e' % (treat+273.) # temp in kelvin else: # need to add IRM, and ARM options print("measurement type unknown", row['step']) return False, "measurement type unknown" MeasRec["magn_moment"] = str(row['magn_moment']) MeasRec["magn_volume"] = str(row['magn_volume']) MeasRec["dir_dec"] = str(row['dir_dec']) MeasRec["dir_inc"] = str(row['dir_inc']) MeasRec['method_codes'] = meas_type MagRecs.append(MeasRec) con = cb.Contribution(output_dir_path, read_tables=[]) con.add_magic_table_from_data(dtype='specimens', data=SpecRecs) con.add_magic_table_from_data(dtype='samples', data=SampRecs) con.add_magic_table_from_data(dtype='sites', data=SiteRecs) con.add_magic_table_from_data(dtype='locations', data=LocRecs) MeasOuts = pmag.measurements_methods3(MagRecs, noave) con.add_magic_table_from_data(dtype='measurements', data=MeasOuts) con.tables['specimens'].write_magic_file(custom_name=spec_file) con.tables['samples'].write_magic_file(custom_name=samp_file) con.tables['sites'].write_magic_file(custom_name=site_file) con.tables['locations'].write_magic_file(custom_name=loc_file) con.tables['measurements'].write_magic_file(custom_name=meas_file) try: os.remove(tmp_file) except (OSError, IOError) as e: print("couldn't remove temperary fixed JR6 file %s" % tmp_file) return True, meas_file
def validate(self): """Validate workflow object. This method currently validates the workflow object with the use of cwltool. It writes the workflow to a tmp CWL file, reads it, validates it and removes the tmp file again. By default, the workflow is written to file using absolute paths to the steps. """ # define tmpfile (fd, tmpfile) = tempfile.mkstemp() os.close(fd) try: # save workflow object to tmpfile, # do not recursively call validate function self.save(tmpfile, mode='abs', validate=False) # load workflow from tmpfile document_loader, processobj, metadata, uri = load_cwl(tmpfile) finally: # cleanup tmpfile os.remove(tmpfile)
Validate workflow object. This method currently validates the workflow object with the use of cwltool. It writes the workflow to a tmp CWL file, reads it, validates it and removes the tmp file again. By default, the workflow is written to file using absolute paths to the steps.
Below is the the instruction that describes the task: ### Input: Validate workflow object. This method currently validates the workflow object with the use of cwltool. It writes the workflow to a tmp CWL file, reads it, validates it and removes the tmp file again. By default, the workflow is written to file using absolute paths to the steps. ### Response: def validate(self): """Validate workflow object. This method currently validates the workflow object with the use of cwltool. It writes the workflow to a tmp CWL file, reads it, validates it and removes the tmp file again. By default, the workflow is written to file using absolute paths to the steps. """ # define tmpfile (fd, tmpfile) = tempfile.mkstemp() os.close(fd) try: # save workflow object to tmpfile, # do not recursively call validate function self.save(tmpfile, mode='abs', validate=False) # load workflow from tmpfile document_loader, processobj, metadata, uri = load_cwl(tmpfile) finally: # cleanup tmpfile os.remove(tmpfile)
def process_file(self, file_path, zombies, pickle_dags=False, session=None): """ Process a Python file containing Airflow DAGs. This includes: 1. Execute the file and look for DAG objects in the namespace. 2. Pickle the DAG and save it to the DB (if necessary). 3. For each DAG, see what tasks should run and create appropriate task instances in the DB. 4. Record any errors importing the file into ORM 5. Kill (in ORM) any task instances belonging to the DAGs that haven't issued a heartbeat in a while. Returns a list of SimpleDag objects that represent the DAGs found in the file :param file_path: the path to the Python file that should be executed :type file_path: unicode :param zombies: zombie task instances to kill. :type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance] :param pickle_dags: whether serialize the DAGs found in the file and save them to the db :type pickle_dags: bool :return: a list of SimpleDags made from the Dags found in the file :rtype: list[airflow.utils.dag_processing.SimpleDagBag] """ self.log.info("Processing file %s for tasks to queue", file_path) # As DAGs are parsed from this file, they will be converted into SimpleDags simple_dags = [] try: dagbag = models.DagBag(file_path, include_examples=False) except Exception: self.log.exception("Failed at reloading the DAG file %s", file_path) Stats.incr('dag_file_refresh_error', 1, 1) return [] if len(dagbag.dags) > 0: self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path) else: self.log.warning("No viable dags retrieved from %s", file_path) self.update_import_errors(session, dagbag) return [] # Save individual DAGs in the ORM and update DagModel.last_scheduled_time for dag in dagbag.dags.values(): dag.sync_to_db() paused_dag_ids = [dag.dag_id for dag in dagbag.dags.values() if dag.is_paused] # Pickle the DAGs (if necessary) and put them into a SimpleDag for dag_id in dagbag.dags: # Only return DAGs that are not paused if dag_id not in paused_dag_ids: dag = dagbag.get_dag(dag_id) pickle_id = None if pickle_dags: pickle_id = dag.pickle(session).id simple_dags.append(SimpleDag(dag, pickle_id=pickle_id)) if len(self.dag_ids) > 0: dags = [dag for dag in dagbag.dags.values() if dag.dag_id in self.dag_ids and dag.dag_id not in paused_dag_ids] else: dags = [dag for dag in dagbag.dags.values() if not dag.parent_dag and dag.dag_id not in paused_dag_ids] # Not using multiprocessing.Queue() since it's no longer a separate # process and due to some unusual behavior. (empty() incorrectly # returns true?) ti_keys_to_schedule = [] self._process_dags(dagbag, dags, ti_keys_to_schedule) for ti_key in ti_keys_to_schedule: dag = dagbag.dags[ti_key[0]] task = dag.get_task(ti_key[1]) ti = models.TaskInstance(task, ti_key[2]) ti.refresh_from_db(session=session, lock_for_update=True) # We can defer checking the task dependency checks to the worker themselves # since they can be expensive to run in the scheduler. dep_context = DepContext(deps=QUEUE_DEPS, ignore_task_deps=True) # Only schedule tasks that have their dependencies met, e.g. to avoid # a task that recently got its state changed to RUNNING from somewhere # other than the scheduler from getting its state overwritten. # TODO(aoen): It's not great that we have to check all the task instance # dependencies twice; once to get the task scheduled, and again to actually # run the task. We should try to come up with a way to only check them once. if ti.are_dependencies_met( dep_context=dep_context, session=session, verbose=True): # Task starts out in the scheduled state. All tasks in the # scheduled state will be sent to the executor ti.state = State.SCHEDULED # Also save this task instance to the DB. self.log.info("Creating / updating %s in ORM", ti) session.merge(ti) # commit batch session.commit() # Record import errors into the ORM try: self.update_import_errors(session, dagbag) except Exception: self.log.exception("Error logging import errors!") try: dagbag.kill_zombies(zombies) except Exception: self.log.exception("Error killing zombies!") return simple_dags
Process a Python file containing Airflow DAGs. This includes: 1. Execute the file and look for DAG objects in the namespace. 2. Pickle the DAG and save it to the DB (if necessary). 3. For each DAG, see what tasks should run and create appropriate task instances in the DB. 4. Record any errors importing the file into ORM 5. Kill (in ORM) any task instances belonging to the DAGs that haven't issued a heartbeat in a while. Returns a list of SimpleDag objects that represent the DAGs found in the file :param file_path: the path to the Python file that should be executed :type file_path: unicode :param zombies: zombie task instances to kill. :type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance] :param pickle_dags: whether serialize the DAGs found in the file and save them to the db :type pickle_dags: bool :return: a list of SimpleDags made from the Dags found in the file :rtype: list[airflow.utils.dag_processing.SimpleDagBag]
Below is the the instruction that describes the task: ### Input: Process a Python file containing Airflow DAGs. This includes: 1. Execute the file and look for DAG objects in the namespace. 2. Pickle the DAG and save it to the DB (if necessary). 3. For each DAG, see what tasks should run and create appropriate task instances in the DB. 4. Record any errors importing the file into ORM 5. Kill (in ORM) any task instances belonging to the DAGs that haven't issued a heartbeat in a while. Returns a list of SimpleDag objects that represent the DAGs found in the file :param file_path: the path to the Python file that should be executed :type file_path: unicode :param zombies: zombie task instances to kill. :type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance] :param pickle_dags: whether serialize the DAGs found in the file and save them to the db :type pickle_dags: bool :return: a list of SimpleDags made from the Dags found in the file :rtype: list[airflow.utils.dag_processing.SimpleDagBag] ### Response: def process_file(self, file_path, zombies, pickle_dags=False, session=None): """ Process a Python file containing Airflow DAGs. This includes: 1. Execute the file and look for DAG objects in the namespace. 2. Pickle the DAG and save it to the DB (if necessary). 3. For each DAG, see what tasks should run and create appropriate task instances in the DB. 4. Record any errors importing the file into ORM 5. Kill (in ORM) any task instances belonging to the DAGs that haven't issued a heartbeat in a while. Returns a list of SimpleDag objects that represent the DAGs found in the file :param file_path: the path to the Python file that should be executed :type file_path: unicode :param zombies: zombie task instances to kill. :type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance] :param pickle_dags: whether serialize the DAGs found in the file and save them to the db :type pickle_dags: bool :return: a list of SimpleDags made from the Dags found in the file :rtype: list[airflow.utils.dag_processing.SimpleDagBag] """ self.log.info("Processing file %s for tasks to queue", file_path) # As DAGs are parsed from this file, they will be converted into SimpleDags simple_dags = [] try: dagbag = models.DagBag(file_path, include_examples=False) except Exception: self.log.exception("Failed at reloading the DAG file %s", file_path) Stats.incr('dag_file_refresh_error', 1, 1) return [] if len(dagbag.dags) > 0: self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path) else: self.log.warning("No viable dags retrieved from %s", file_path) self.update_import_errors(session, dagbag) return [] # Save individual DAGs in the ORM and update DagModel.last_scheduled_time for dag in dagbag.dags.values(): dag.sync_to_db() paused_dag_ids = [dag.dag_id for dag in dagbag.dags.values() if dag.is_paused] # Pickle the DAGs (if necessary) and put them into a SimpleDag for dag_id in dagbag.dags: # Only return DAGs that are not paused if dag_id not in paused_dag_ids: dag = dagbag.get_dag(dag_id) pickle_id = None if pickle_dags: pickle_id = dag.pickle(session).id simple_dags.append(SimpleDag(dag, pickle_id=pickle_id)) if len(self.dag_ids) > 0: dags = [dag for dag in dagbag.dags.values() if dag.dag_id in self.dag_ids and dag.dag_id not in paused_dag_ids] else: dags = [dag for dag in dagbag.dags.values() if not dag.parent_dag and dag.dag_id not in paused_dag_ids] # Not using multiprocessing.Queue() since it's no longer a separate # process and due to some unusual behavior. (empty() incorrectly # returns true?) ti_keys_to_schedule = [] self._process_dags(dagbag, dags, ti_keys_to_schedule) for ti_key in ti_keys_to_schedule: dag = dagbag.dags[ti_key[0]] task = dag.get_task(ti_key[1]) ti = models.TaskInstance(task, ti_key[2]) ti.refresh_from_db(session=session, lock_for_update=True) # We can defer checking the task dependency checks to the worker themselves # since they can be expensive to run in the scheduler. dep_context = DepContext(deps=QUEUE_DEPS, ignore_task_deps=True) # Only schedule tasks that have their dependencies met, e.g. to avoid # a task that recently got its state changed to RUNNING from somewhere # other than the scheduler from getting its state overwritten. # TODO(aoen): It's not great that we have to check all the task instance # dependencies twice; once to get the task scheduled, and again to actually # run the task. We should try to come up with a way to only check them once. if ti.are_dependencies_met( dep_context=dep_context, session=session, verbose=True): # Task starts out in the scheduled state. All tasks in the # scheduled state will be sent to the executor ti.state = State.SCHEDULED # Also save this task instance to the DB. self.log.info("Creating / updating %s in ORM", ti) session.merge(ti) # commit batch session.commit() # Record import errors into the ORM try: self.update_import_errors(session, dagbag) except Exception: self.log.exception("Error logging import errors!") try: dagbag.kill_zombies(zombies) except Exception: self.log.exception("Error killing zombies!") return simple_dags
def slug_with_level(self, language=None): """Display the slug of the page prepended with insecable spaces to simluate the level of page in the hierarchy.""" level = '' if self.level: for n in range(0, self.level): level += '&nbsp;&nbsp;&nbsp;' return mark_safe(level + self.slug(language))
Display the slug of the page prepended with insecable spaces to simluate the level of page in the hierarchy.
Below is the the instruction that describes the task: ### Input: Display the slug of the page prepended with insecable spaces to simluate the level of page in the hierarchy. ### Response: def slug_with_level(self, language=None): """Display the slug of the page prepended with insecable spaces to simluate the level of page in the hierarchy.""" level = '' if self.level: for n in range(0, self.level): level += '&nbsp;&nbsp;&nbsp;' return mark_safe(level + self.slug(language))
def main(ctx, root_project_dir, verbose): """stack-docs is a CLI for building LSST Stack documentation, such as pipelines.lsst.io. This command should be run on the "main" documentation repository, namely https://github.com/lsst/pipelines_lsst_io. The stack-docs command replaces the usual Makefile and sphinx-build system for Sphinx projects. This dedicated tool provide subcommands that are engineered specifically for building the ``pipelines_lsst_io`` project. The key commands provided by stack-docs are: - ``stack-docs build``: compile the pipelines.lsst.io site from the ``pipelines_lsst_io`` repository and linked packages. - ``stack-docs clean``: removes build products. Use this command to clear the build cache. See also: package-docs, a tool for building previews of package documentation. For more information about stack-docs, see https://documenteer.lsst.io. """ root_project_dir = discover_conf_py_directory(root_project_dir) # Subcommands should use the click.pass_obj decorator to get this # ctx.obj object as the first argument. ctx.obj = {'root_project_dir': root_project_dir, 'verbose': verbose} # Set up application logging. This ensures that only documenteer's # logger is activated. If necessary, we can add other app's loggers too. if verbose: log_level = logging.DEBUG else: log_level = logging.INFO logger = logging.getLogger('documenteer') logger.addHandler(logging.StreamHandler()) logger.setLevel(log_level)
stack-docs is a CLI for building LSST Stack documentation, such as pipelines.lsst.io. This command should be run on the "main" documentation repository, namely https://github.com/lsst/pipelines_lsst_io. The stack-docs command replaces the usual Makefile and sphinx-build system for Sphinx projects. This dedicated tool provide subcommands that are engineered specifically for building the ``pipelines_lsst_io`` project. The key commands provided by stack-docs are: - ``stack-docs build``: compile the pipelines.lsst.io site from the ``pipelines_lsst_io`` repository and linked packages. - ``stack-docs clean``: removes build products. Use this command to clear the build cache. See also: package-docs, a tool for building previews of package documentation. For more information about stack-docs, see https://documenteer.lsst.io.
Below is the the instruction that describes the task: ### Input: stack-docs is a CLI for building LSST Stack documentation, such as pipelines.lsst.io. This command should be run on the "main" documentation repository, namely https://github.com/lsst/pipelines_lsst_io. The stack-docs command replaces the usual Makefile and sphinx-build system for Sphinx projects. This dedicated tool provide subcommands that are engineered specifically for building the ``pipelines_lsst_io`` project. The key commands provided by stack-docs are: - ``stack-docs build``: compile the pipelines.lsst.io site from the ``pipelines_lsst_io`` repository and linked packages. - ``stack-docs clean``: removes build products. Use this command to clear the build cache. See also: package-docs, a tool for building previews of package documentation. For more information about stack-docs, see https://documenteer.lsst.io. ### Response: def main(ctx, root_project_dir, verbose): """stack-docs is a CLI for building LSST Stack documentation, such as pipelines.lsst.io. This command should be run on the "main" documentation repository, namely https://github.com/lsst/pipelines_lsst_io. The stack-docs command replaces the usual Makefile and sphinx-build system for Sphinx projects. This dedicated tool provide subcommands that are engineered specifically for building the ``pipelines_lsst_io`` project. The key commands provided by stack-docs are: - ``stack-docs build``: compile the pipelines.lsst.io site from the ``pipelines_lsst_io`` repository and linked packages. - ``stack-docs clean``: removes build products. Use this command to clear the build cache. See also: package-docs, a tool for building previews of package documentation. For more information about stack-docs, see https://documenteer.lsst.io. """ root_project_dir = discover_conf_py_directory(root_project_dir) # Subcommands should use the click.pass_obj decorator to get this # ctx.obj object as the first argument. ctx.obj = {'root_project_dir': root_project_dir, 'verbose': verbose} # Set up application logging. This ensures that only documenteer's # logger is activated. If necessary, we can add other app's loggers too. if verbose: log_level = logging.DEBUG else: log_level = logging.INFO logger = logging.getLogger('documenteer') logger.addHandler(logging.StreamHandler()) logger.setLevel(log_level)
def _release_and_issues(changelog, branch, release_type): """ Return most recent branch-appropriate release, if any, and its contents. :param dict changelog: Changelog contents, as returned by ``releases.util.parse_changelog``. :param str branch: Branch name. :param release_type: Member of `Release`, e.g. `Release.FEATURE`. :returns: Two-tuple of release (``str``) and issues (``list`` of issue numbers.) If there is no latest release for the given branch (e.g. if it's a feature or master branch), it will be ``None``. """ # Bugfix lines just use the branch to find issues bucket = branch # Features need a bit more logic if release_type is Release.FEATURE: bucket = _latest_feature_bucket(changelog) # Issues is simply what's in the bucket issues = changelog[bucket] # Latest release is undefined for feature lines release = None # And requires scanning changelog, for bugfix lines if release_type is Release.BUGFIX: versions = [text_type(x) for x in _versions_from_changelog(changelog)] release = [x for x in versions if x.startswith(bucket)][-1] return release, issues
Return most recent branch-appropriate release, if any, and its contents. :param dict changelog: Changelog contents, as returned by ``releases.util.parse_changelog``. :param str branch: Branch name. :param release_type: Member of `Release`, e.g. `Release.FEATURE`. :returns: Two-tuple of release (``str``) and issues (``list`` of issue numbers.) If there is no latest release for the given branch (e.g. if it's a feature or master branch), it will be ``None``.
Below is the the instruction that describes the task: ### Input: Return most recent branch-appropriate release, if any, and its contents. :param dict changelog: Changelog contents, as returned by ``releases.util.parse_changelog``. :param str branch: Branch name. :param release_type: Member of `Release`, e.g. `Release.FEATURE`. :returns: Two-tuple of release (``str``) and issues (``list`` of issue numbers.) If there is no latest release for the given branch (e.g. if it's a feature or master branch), it will be ``None``. ### Response: def _release_and_issues(changelog, branch, release_type): """ Return most recent branch-appropriate release, if any, and its contents. :param dict changelog: Changelog contents, as returned by ``releases.util.parse_changelog``. :param str branch: Branch name. :param release_type: Member of `Release`, e.g. `Release.FEATURE`. :returns: Two-tuple of release (``str``) and issues (``list`` of issue numbers.) If there is no latest release for the given branch (e.g. if it's a feature or master branch), it will be ``None``. """ # Bugfix lines just use the branch to find issues bucket = branch # Features need a bit more logic if release_type is Release.FEATURE: bucket = _latest_feature_bucket(changelog) # Issues is simply what's in the bucket issues = changelog[bucket] # Latest release is undefined for feature lines release = None # And requires scanning changelog, for bugfix lines if release_type is Release.BUGFIX: versions = [text_type(x) for x in _versions_from_changelog(changelog)] release = [x for x in versions if x.startswith(bucket)][-1] return release, issues
def register(self): """Register the persistent identifier with the provider. :raises invenio_pidstore.errors.PIDInvalidAction: If the PID is not already registered or is deleted or is a redirection to another PID. :returns: `True` if the PID is successfully register. """ if self.is_registered() or self.is_deleted() or self.is_redirected(): raise PIDInvalidAction( "Persistent identifier has already been registered" " or is deleted.") try: with db.session.begin_nested(): self.status = PIDStatus.REGISTERED db.session.add(self) except SQLAlchemyError: logger.exception("Failed to register PID.", extra=dict(pid=self)) raise logger.info("Registered PID.", extra=dict(pid=self)) return True
Register the persistent identifier with the provider. :raises invenio_pidstore.errors.PIDInvalidAction: If the PID is not already registered or is deleted or is a redirection to another PID. :returns: `True` if the PID is successfully register.
Below is the the instruction that describes the task: ### Input: Register the persistent identifier with the provider. :raises invenio_pidstore.errors.PIDInvalidAction: If the PID is not already registered or is deleted or is a redirection to another PID. :returns: `True` if the PID is successfully register. ### Response: def register(self): """Register the persistent identifier with the provider. :raises invenio_pidstore.errors.PIDInvalidAction: If the PID is not already registered or is deleted or is a redirection to another PID. :returns: `True` if the PID is successfully register. """ if self.is_registered() or self.is_deleted() or self.is_redirected(): raise PIDInvalidAction( "Persistent identifier has already been registered" " or is deleted.") try: with db.session.begin_nested(): self.status = PIDStatus.REGISTERED db.session.add(self) except SQLAlchemyError: logger.exception("Failed to register PID.", extra=dict(pid=self)) raise logger.info("Registered PID.", extra=dict(pid=self)) return True
def reversible_deroot(self): """ Stores info required to restore rootedness to derooted Tree. Returns the edge that was originally rooted, the length of e1, and the length of e2. Dendropy Derooting Process: In a rooted tree the root node is bifurcating. Derooting makes it trifurcating. Call the two edges leading out of the root node e1 and e2. Derooting with Tree.deroot() deletes one of e1 and e2 (let's say e2), and stretches the other to the sum of their lengths. Call this e3. Rooted tree: Derooted tree: A A B |_ B \ / / | /e1 |e3 (length = e1+e2; e2 is deleted) Root--o ===> | \e2 Root--o _ C \ _ C | | D D Reverse this with Tree.reroot_at_edge(edge, length1, length2, ...) """ root_edge = self._tree.seed_node.edge lengths = dict([(edge, edge.length) for edge in self._tree.seed_node.incident_edges() if edge is not root_edge]) self._tree.deroot() reroot_edge = (set(self._tree.seed_node.incident_edges()) & set(lengths.keys())).pop() self._tree.encode_bipartitions() self._dirty = True return (reroot_edge, reroot_edge.length - lengths[reroot_edge], lengths[reroot_edge])
Stores info required to restore rootedness to derooted Tree. Returns the edge that was originally rooted, the length of e1, and the length of e2. Dendropy Derooting Process: In a rooted tree the root node is bifurcating. Derooting makes it trifurcating. Call the two edges leading out of the root node e1 and e2. Derooting with Tree.deroot() deletes one of e1 and e2 (let's say e2), and stretches the other to the sum of their lengths. Call this e3. Rooted tree: Derooted tree: A A B |_ B \ / / | /e1 |e3 (length = e1+e2; e2 is deleted) Root--o ===> | \e2 Root--o _ C \ _ C | | D D Reverse this with Tree.reroot_at_edge(edge, length1, length2, ...)
Below is the the instruction that describes the task: ### Input: Stores info required to restore rootedness to derooted Tree. Returns the edge that was originally rooted, the length of e1, and the length of e2. Dendropy Derooting Process: In a rooted tree the root node is bifurcating. Derooting makes it trifurcating. Call the two edges leading out of the root node e1 and e2. Derooting with Tree.deroot() deletes one of e1 and e2 (let's say e2), and stretches the other to the sum of their lengths. Call this e3. Rooted tree: Derooted tree: A A B |_ B \ / / | /e1 |e3 (length = e1+e2; e2 is deleted) Root--o ===> | \e2 Root--o _ C \ _ C | | D D Reverse this with Tree.reroot_at_edge(edge, length1, length2, ...) ### Response: def reversible_deroot(self): """ Stores info required to restore rootedness to derooted Tree. Returns the edge that was originally rooted, the length of e1, and the length of e2. Dendropy Derooting Process: In a rooted tree the root node is bifurcating. Derooting makes it trifurcating. Call the two edges leading out of the root node e1 and e2. Derooting with Tree.deroot() deletes one of e1 and e2 (let's say e2), and stretches the other to the sum of their lengths. Call this e3. Rooted tree: Derooted tree: A A B |_ B \ / / | /e1 |e3 (length = e1+e2; e2 is deleted) Root--o ===> | \e2 Root--o _ C \ _ C | | D D Reverse this with Tree.reroot_at_edge(edge, length1, length2, ...) """ root_edge = self._tree.seed_node.edge lengths = dict([(edge, edge.length) for edge in self._tree.seed_node.incident_edges() if edge is not root_edge]) self._tree.deroot() reroot_edge = (set(self._tree.seed_node.incident_edges()) & set(lengths.keys())).pop() self._tree.encode_bipartitions() self._dirty = True return (reroot_edge, reroot_edge.length - lengths[reroot_edge], lengths[reroot_edge])
def clean(self): """ Delete temp dir if not save_temp set at __init__ """ if not self._save_temp: if hasattr(self, '_written_files'): map(os.unlink, self._written_files) if getattr(self, '_remove_tempdir_on_clean', False): shutil.rmtree(self._tempdir)
Delete temp dir if not save_temp set at __init__
Below is the the instruction that describes the task: ### Input: Delete temp dir if not save_temp set at __init__ ### Response: def clean(self): """ Delete temp dir if not save_temp set at __init__ """ if not self._save_temp: if hasattr(self, '_written_files'): map(os.unlink, self._written_files) if getattr(self, '_remove_tempdir_on_clean', False): shutil.rmtree(self._tempdir)
def map(self, function, options=None): """ Add a map phase to the map/reduce operation. :param function: Either a named Javascript function (ie: 'Riak.mapValues'), or an anonymous javascript function (ie: 'function(...) ... ' or an array ['erlang_module', 'function']. :type function: string, list :param options: phase options, containing 'language', 'keep' flag, and/or 'arg'. :type options: dict :rtype: :class:`RiakMapReduce` """ if options is None: options = dict() if isinstance(function, list): language = 'erlang' else: language = 'javascript' mr = RiakMapReducePhase('map', function, options.get('language', language), options.get('keep', False), options.get('arg', None)) self._phases.append(mr) return self
Add a map phase to the map/reduce operation. :param function: Either a named Javascript function (ie: 'Riak.mapValues'), or an anonymous javascript function (ie: 'function(...) ... ' or an array ['erlang_module', 'function']. :type function: string, list :param options: phase options, containing 'language', 'keep' flag, and/or 'arg'. :type options: dict :rtype: :class:`RiakMapReduce`
Below is the the instruction that describes the task: ### Input: Add a map phase to the map/reduce operation. :param function: Either a named Javascript function (ie: 'Riak.mapValues'), or an anonymous javascript function (ie: 'function(...) ... ' or an array ['erlang_module', 'function']. :type function: string, list :param options: phase options, containing 'language', 'keep' flag, and/or 'arg'. :type options: dict :rtype: :class:`RiakMapReduce` ### Response: def map(self, function, options=None): """ Add a map phase to the map/reduce operation. :param function: Either a named Javascript function (ie: 'Riak.mapValues'), or an anonymous javascript function (ie: 'function(...) ... ' or an array ['erlang_module', 'function']. :type function: string, list :param options: phase options, containing 'language', 'keep' flag, and/or 'arg'. :type options: dict :rtype: :class:`RiakMapReduce` """ if options is None: options = dict() if isinstance(function, list): language = 'erlang' else: language = 'javascript' mr = RiakMapReducePhase('map', function, options.get('language', language), options.get('keep', False), options.get('arg', None)) self._phases.append(mr) return self
def get_value(self, context): """Run python eval on the input string.""" if self.value: return expressions.eval_string(self.value, context) else: # Empty input raises cryptic EOF syntax err, this more human # friendly raise ValueError('!py string expression is empty. It must be a ' 'valid python expression instead.')
Run python eval on the input string.
Below is the the instruction that describes the task: ### Input: Run python eval on the input string. ### Response: def get_value(self, context): """Run python eval on the input string.""" if self.value: return expressions.eval_string(self.value, context) else: # Empty input raises cryptic EOF syntax err, this more human # friendly raise ValueError('!py string expression is empty. It must be a ' 'valid python expression instead.')
def on_epoch_end(self, epoch, logs={}): ''' Run on end of each epoch ''' LOG.debug(logs) nni.report_intermediate_result(logs["val_acc"])
Run on end of each epoch
Below is the the instruction that describes the task: ### Input: Run on end of each epoch ### Response: def on_epoch_end(self, epoch, logs={}): ''' Run on end of each epoch ''' LOG.debug(logs) nni.report_intermediate_result(logs["val_acc"])
def select_actions(root, action_space, max_episode_steps): """ Select actions from the tree Normally we select the greedy action that has the highest reward associated with that subtree. We have a small chance to select a random action based on the exploration param and visit count of the current node at each step. We select actions for the longest possible episode, but normally these will not all be used. They will instead be truncated to the length of the actual episode and then used to update the tree. """ node = root acts = [] steps = 0 while steps < max_episode_steps: if node is None: # we've fallen off the explored area of the tree, just select random actions act = action_space.sample() else: epsilon = EXPLORATION_PARAM / np.log(node.visits + 2) if random.random() < epsilon: # random action act = action_space.sample() else: # greedy action act_value = {} for act in range(action_space.n): if node is not None and act in node.children: act_value[act] = node.children[act].value else: act_value[act] = -np.inf best_value = max(act_value.values()) best_acts = [ act for act, value in act_value.items() if value == best_value ] act = random.choice(best_acts) if act in node.children: node = node.children[act] else: node = None acts.append(act) steps += 1 return acts
Select actions from the tree Normally we select the greedy action that has the highest reward associated with that subtree. We have a small chance to select a random action based on the exploration param and visit count of the current node at each step. We select actions for the longest possible episode, but normally these will not all be used. They will instead be truncated to the length of the actual episode and then used to update the tree.
Below is the the instruction that describes the task: ### Input: Select actions from the tree Normally we select the greedy action that has the highest reward associated with that subtree. We have a small chance to select a random action based on the exploration param and visit count of the current node at each step. We select actions for the longest possible episode, but normally these will not all be used. They will instead be truncated to the length of the actual episode and then used to update the tree. ### Response: def select_actions(root, action_space, max_episode_steps): """ Select actions from the tree Normally we select the greedy action that has the highest reward associated with that subtree. We have a small chance to select a random action based on the exploration param and visit count of the current node at each step. We select actions for the longest possible episode, but normally these will not all be used. They will instead be truncated to the length of the actual episode and then used to update the tree. """ node = root acts = [] steps = 0 while steps < max_episode_steps: if node is None: # we've fallen off the explored area of the tree, just select random actions act = action_space.sample() else: epsilon = EXPLORATION_PARAM / np.log(node.visits + 2) if random.random() < epsilon: # random action act = action_space.sample() else: # greedy action act_value = {} for act in range(action_space.n): if node is not None and act in node.children: act_value[act] = node.children[act].value else: act_value[act] = -np.inf best_value = max(act_value.values()) best_acts = [ act for act, value in act_value.items() if value == best_value ] act = random.choice(best_acts) if act in node.children: node = node.children[act] else: node = None acts.append(act) steps += 1 return acts
def track_metric(self, name: str, value: float, type: TelemetryDataPointType =None, count: int =None, min: float=None, max: float=None, std_dev: float=None, properties: Dict[str, object]=None) -> NotImplemented: """ Send information about a single metric data point that was captured for the application. :param name: The name of the metric that was captured. :param value: The value of the metric that was captured. :param type: The type of the metric. (defaults to: TelemetryDataPointType.aggregation`) :param count: the number of metrics that were aggregated into this data point. (defaults to: None) :param min: the minimum of all metrics collected that were aggregated into this data point. (defaults to: None) :param max: the maximum of all metrics collected that were aggregated into this data point. (defaults to: None) :param std_dev: the standard deviation of all metrics collected that were aggregated into this data point. (defaults to: None) :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None) """ self._client.track_metric(name, value, type, count, min, max, std_dev, properties)
Send information about a single metric data point that was captured for the application. :param name: The name of the metric that was captured. :param value: The value of the metric that was captured. :param type: The type of the metric. (defaults to: TelemetryDataPointType.aggregation`) :param count: the number of metrics that were aggregated into this data point. (defaults to: None) :param min: the minimum of all metrics collected that were aggregated into this data point. (defaults to: None) :param max: the maximum of all metrics collected that were aggregated into this data point. (defaults to: None) :param std_dev: the standard deviation of all metrics collected that were aggregated into this data point. (defaults to: None) :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
Below is the the instruction that describes the task: ### Input: Send information about a single metric data point that was captured for the application. :param name: The name of the metric that was captured. :param value: The value of the metric that was captured. :param type: The type of the metric. (defaults to: TelemetryDataPointType.aggregation`) :param count: the number of metrics that were aggregated into this data point. (defaults to: None) :param min: the minimum of all metrics collected that were aggregated into this data point. (defaults to: None) :param max: the maximum of all metrics collected that were aggregated into this data point. (defaults to: None) :param std_dev: the standard deviation of all metrics collected that were aggregated into this data point. (defaults to: None) :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None) ### Response: def track_metric(self, name: str, value: float, type: TelemetryDataPointType =None, count: int =None, min: float=None, max: float=None, std_dev: float=None, properties: Dict[str, object]=None) -> NotImplemented: """ Send information about a single metric data point that was captured for the application. :param name: The name of the metric that was captured. :param value: The value of the metric that was captured. :param type: The type of the metric. (defaults to: TelemetryDataPointType.aggregation`) :param count: the number of metrics that were aggregated into this data point. (defaults to: None) :param min: the minimum of all metrics collected that were aggregated into this data point. (defaults to: None) :param max: the maximum of all metrics collected that were aggregated into this data point. (defaults to: None) :param std_dev: the standard deviation of all metrics collected that were aggregated into this data point. (defaults to: None) :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None) """ self._client.track_metric(name, value, type, count, min, max, std_dev, properties)
async def send_and_receive(self, message, generate_identifier=True, timeout=5): """Send a message and wait for a response.""" await self._connect_and_encrypt() # Some messages will respond with the same identifier as used in the # corresponding request. Others will not and one example is the crypto # message (for pairing). They will never include an identifer, but it # it is in turn only possible to have one of those message outstanding # at one time (i.e. it's not possible to mix up the responses). In # those cases, a "fake" identifier is used that includes the message # type instead. if generate_identifier: identifier = str(uuid.uuid4()) message.identifier = identifier else: identifier = 'type_' + str(message.type) self.connection.send(message) return await self._receive(identifier, timeout)
Send a message and wait for a response.
Below is the the instruction that describes the task: ### Input: Send a message and wait for a response. ### Response: async def send_and_receive(self, message, generate_identifier=True, timeout=5): """Send a message and wait for a response.""" await self._connect_and_encrypt() # Some messages will respond with the same identifier as used in the # corresponding request. Others will not and one example is the crypto # message (for pairing). They will never include an identifer, but it # it is in turn only possible to have one of those message outstanding # at one time (i.e. it's not possible to mix up the responses). In # those cases, a "fake" identifier is used that includes the message # type instead. if generate_identifier: identifier = str(uuid.uuid4()) message.identifier = identifier else: identifier = 'type_' + str(message.type) self.connection.send(message) return await self._receive(identifier, timeout)
def cc(self, cc_emails, global_substitutions=None, is_multiple=False, p=0): """Adds Cc objects to the Personalization object :param cc_emails: An Cc or list of Cc objects :type cc_emails: Cc, list(Cc), tuple :param global_substitutions: A dict of substitutions for all recipients :type global_substitutions: dict :param is_multiple: Create a new personilization for each recipient :type is_multiple: bool :param p: p is the Personalization object or Personalization object index :type p: Personalization, integer, optional """ if isinstance(cc_emails, list): for email in cc_emails: if isinstance(email, str): email = Cc(email, None) if isinstance(email, tuple): email = Cc(email[0], email[1]) self.add_cc(email, global_substitutions, is_multiple, p) else: if isinstance(cc_emails, str): cc_emails = Cc(cc_emails, None) if isinstance(cc_emails, tuple): cc_emails = To(cc_emails[0], cc_emails[1]) self.add_cc(cc_emails, global_substitutions, is_multiple, p)
Adds Cc objects to the Personalization object :param cc_emails: An Cc or list of Cc objects :type cc_emails: Cc, list(Cc), tuple :param global_substitutions: A dict of substitutions for all recipients :type global_substitutions: dict :param is_multiple: Create a new personilization for each recipient :type is_multiple: bool :param p: p is the Personalization object or Personalization object index :type p: Personalization, integer, optional
Below is the the instruction that describes the task: ### Input: Adds Cc objects to the Personalization object :param cc_emails: An Cc or list of Cc objects :type cc_emails: Cc, list(Cc), tuple :param global_substitutions: A dict of substitutions for all recipients :type global_substitutions: dict :param is_multiple: Create a new personilization for each recipient :type is_multiple: bool :param p: p is the Personalization object or Personalization object index :type p: Personalization, integer, optional ### Response: def cc(self, cc_emails, global_substitutions=None, is_multiple=False, p=0): """Adds Cc objects to the Personalization object :param cc_emails: An Cc or list of Cc objects :type cc_emails: Cc, list(Cc), tuple :param global_substitutions: A dict of substitutions for all recipients :type global_substitutions: dict :param is_multiple: Create a new personilization for each recipient :type is_multiple: bool :param p: p is the Personalization object or Personalization object index :type p: Personalization, integer, optional """ if isinstance(cc_emails, list): for email in cc_emails: if isinstance(email, str): email = Cc(email, None) if isinstance(email, tuple): email = Cc(email[0], email[1]) self.add_cc(email, global_substitutions, is_multiple, p) else: if isinstance(cc_emails, str): cc_emails = Cc(cc_emails, None) if isinstance(cc_emails, tuple): cc_emails = To(cc_emails[0], cc_emails[1]) self.add_cc(cc_emails, global_substitutions, is_multiple, p)
def bind_fields_to_model_cls(cls, model_fields): """Bind fields to model class.""" return dict( (field.name, field.bind_model_cls(cls)) for field in model_fields)
Bind fields to model class.
Below is the the instruction that describes the task: ### Input: Bind fields to model class. ### Response: def bind_fields_to_model_cls(cls, model_fields): """Bind fields to model class.""" return dict( (field.name, field.bind_model_cls(cls)) for field in model_fields)
def get_quoted_columns(self, platform): """ Returns the quoted representation of the column names the constraint is associated with. But only if they were defined with one or a column name is a keyword reserved by the platform. Otherwise the plain unquoted value as inserted is returned. :param platform: The platform to use for quotation. :type platform: Platform :rtype: list """ columns = [] for column in self._columns.values(): columns.append(column.get_quoted_name(platform)) return columns
Returns the quoted representation of the column names the constraint is associated with. But only if they were defined with one or a column name is a keyword reserved by the platform. Otherwise the plain unquoted value as inserted is returned. :param platform: The platform to use for quotation. :type platform: Platform :rtype: list
Below is the the instruction that describes the task: ### Input: Returns the quoted representation of the column names the constraint is associated with. But only if they were defined with one or a column name is a keyword reserved by the platform. Otherwise the plain unquoted value as inserted is returned. :param platform: The platform to use for quotation. :type platform: Platform :rtype: list ### Response: def get_quoted_columns(self, platform): """ Returns the quoted representation of the column names the constraint is associated with. But only if they were defined with one or a column name is a keyword reserved by the platform. Otherwise the plain unquoted value as inserted is returned. :param platform: The platform to use for quotation. :type platform: Platform :rtype: list """ columns = [] for column in self._columns.values(): columns.append(column.get_quoted_name(platform)) return columns
def get_plotable3d(self): """ :returns: matplotlib Poly3DCollection :rtype: list of mpl_toolkits.mplot3d """ polyhedra = sum([polyhedron.get_plotable3d() for polyhedron in self.polyhedra], []) return polyhedra + self.surface.get_plotable3d()
:returns: matplotlib Poly3DCollection :rtype: list of mpl_toolkits.mplot3d
Below is the the instruction that describes the task: ### Input: :returns: matplotlib Poly3DCollection :rtype: list of mpl_toolkits.mplot3d ### Response: def get_plotable3d(self): """ :returns: matplotlib Poly3DCollection :rtype: list of mpl_toolkits.mplot3d """ polyhedra = sum([polyhedron.get_plotable3d() for polyhedron in self.polyhedra], []) return polyhedra + self.surface.get_plotable3d()
def reject(self, func): """ Return all the elements for which a truth test fails. """ return self._wrap(list(filter(lambda val: not func(val), self.obj)))
Return all the elements for which a truth test fails.
Below is the the instruction that describes the task: ### Input: Return all the elements for which a truth test fails. ### Response: def reject(self, func): """ Return all the elements for which a truth test fails. """ return self._wrap(list(filter(lambda val: not func(val), self.obj)))
def as_bool(self, key): """ Accepts a key as input. The corresponding value must be a string or the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to retain compatibility with Python 2.2. If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns ``True``. If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns ``False``. ``as_bool`` is not case sensitive. Any other input will raise a ``ValueError``. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_bool('a') Traceback (most recent call last): ValueError: Value "fish" is neither True nor False >>> a['b'] = 'True' >>> a.as_bool('b') 1 >>> a['b'] = 'off' >>> a.as_bool('b') 0 """ val = self[key] if val == True: return True elif val == False: return False else: try: if not isinstance(val, string_types): # TODO: Why do we raise a KeyError here? raise KeyError() else: return self.main._bools[val.lower()] except KeyError: raise ValueError('Value "%s" is neither True nor False' % val)
Accepts a key as input. The corresponding value must be a string or the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to retain compatibility with Python 2.2. If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns ``True``. If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns ``False``. ``as_bool`` is not case sensitive. Any other input will raise a ``ValueError``. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_bool('a') Traceback (most recent call last): ValueError: Value "fish" is neither True nor False >>> a['b'] = 'True' >>> a.as_bool('b') 1 >>> a['b'] = 'off' >>> a.as_bool('b') 0
Below is the the instruction that describes the task: ### Input: Accepts a key as input. The corresponding value must be a string or the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to retain compatibility with Python 2.2. If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns ``True``. If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns ``False``. ``as_bool`` is not case sensitive. Any other input will raise a ``ValueError``. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_bool('a') Traceback (most recent call last): ValueError: Value "fish" is neither True nor False >>> a['b'] = 'True' >>> a.as_bool('b') 1 >>> a['b'] = 'off' >>> a.as_bool('b') 0 ### Response: def as_bool(self, key): """ Accepts a key as input. The corresponding value must be a string or the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to retain compatibility with Python 2.2. If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns ``True``. If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns ``False``. ``as_bool`` is not case sensitive. Any other input will raise a ``ValueError``. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_bool('a') Traceback (most recent call last): ValueError: Value "fish" is neither True nor False >>> a['b'] = 'True' >>> a.as_bool('b') 1 >>> a['b'] = 'off' >>> a.as_bool('b') 0 """ val = self[key] if val == True: return True elif val == False: return False else: try: if not isinstance(val, string_types): # TODO: Why do we raise a KeyError here? raise KeyError() else: return self.main._bools[val.lower()] except KeyError: raise ValueError('Value "%s" is neither True nor False' % val)
def restart_on_change(restart_map, stopstart=False, restart_functions=None): """Restart services based on configuration files changing This function is used a decorator, for example:: @restart_on_change({ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] '/etc/apache/sites-enabled/*': [ 'apache2' ] }) def config_changed(): pass # your code here In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the ceph_client_changed function. The apache2 service would be restarted if any file matching the pattern got changed, created or removed. Standard wildcards are supported, see documentation for the 'glob' module for more information. @param restart_map: {path_file_name: [service_name, ...] @param stopstart: DEFAULT false; whether to stop, start OR restart @param restart_functions: nonstandard functions to use to restart services {svc: func, ...} @returns result from decorated function """ def wrap(f): @functools.wraps(f) def wrapped_f(*args, **kwargs): return restart_on_change_helper( (lambda: f(*args, **kwargs)), restart_map, stopstart, restart_functions) return wrapped_f return wrap
Restart services based on configuration files changing This function is used a decorator, for example:: @restart_on_change({ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] '/etc/apache/sites-enabled/*': [ 'apache2' ] }) def config_changed(): pass # your code here In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the ceph_client_changed function. The apache2 service would be restarted if any file matching the pattern got changed, created or removed. Standard wildcards are supported, see documentation for the 'glob' module for more information. @param restart_map: {path_file_name: [service_name, ...] @param stopstart: DEFAULT false; whether to stop, start OR restart @param restart_functions: nonstandard functions to use to restart services {svc: func, ...} @returns result from decorated function
Below is the the instruction that describes the task: ### Input: Restart services based on configuration files changing This function is used a decorator, for example:: @restart_on_change({ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] '/etc/apache/sites-enabled/*': [ 'apache2' ] }) def config_changed(): pass # your code here In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the ceph_client_changed function. The apache2 service would be restarted if any file matching the pattern got changed, created or removed. Standard wildcards are supported, see documentation for the 'glob' module for more information. @param restart_map: {path_file_name: [service_name, ...] @param stopstart: DEFAULT false; whether to stop, start OR restart @param restart_functions: nonstandard functions to use to restart services {svc: func, ...} @returns result from decorated function ### Response: def restart_on_change(restart_map, stopstart=False, restart_functions=None): """Restart services based on configuration files changing This function is used a decorator, for example:: @restart_on_change({ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] '/etc/apache/sites-enabled/*': [ 'apache2' ] }) def config_changed(): pass # your code here In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the ceph_client_changed function. The apache2 service would be restarted if any file matching the pattern got changed, created or removed. Standard wildcards are supported, see documentation for the 'glob' module for more information. @param restart_map: {path_file_name: [service_name, ...] @param stopstart: DEFAULT false; whether to stop, start OR restart @param restart_functions: nonstandard functions to use to restart services {svc: func, ...} @returns result from decorated function """ def wrap(f): @functools.wraps(f) def wrapped_f(*args, **kwargs): return restart_on_change_helper( (lambda: f(*args, **kwargs)), restart_map, stopstart, restart_functions) return wrapped_f return wrap
def artifact_mime_type(instance): """Ensure the 'mime_type' property of artifact objects comes from the Template column in the IANA media type registry. """ for key, obj in instance['objects'].items(): if ('type' in obj and obj['type'] == 'artifact' and 'mime_type' in obj): if enums.media_types(): if obj['mime_type'] not in enums.media_types(): yield JSONError("The 'mime_type' property of object '%s' " "('%s') must be an IANA registered MIME " "Type of the form 'type/subtype'." % (key, obj['mime_type']), instance['id']) else: info("Can't reach IANA website; using regex for mime types.") mime_re = re.compile(r'^(application|audio|font|image|message|model' '|multipart|text|video)/[a-zA-Z0-9.+_-]+') if not mime_re.match(obj['mime_type']): yield JSONError("The 'mime_type' property of object '%s' " "('%s') should be an IANA MIME Type of the" " form 'type/subtype'." % (key, obj['mime_type']), instance['id'])
Ensure the 'mime_type' property of artifact objects comes from the Template column in the IANA media type registry.
Below is the the instruction that describes the task: ### Input: Ensure the 'mime_type' property of artifact objects comes from the Template column in the IANA media type registry. ### Response: def artifact_mime_type(instance): """Ensure the 'mime_type' property of artifact objects comes from the Template column in the IANA media type registry. """ for key, obj in instance['objects'].items(): if ('type' in obj and obj['type'] == 'artifact' and 'mime_type' in obj): if enums.media_types(): if obj['mime_type'] not in enums.media_types(): yield JSONError("The 'mime_type' property of object '%s' " "('%s') must be an IANA registered MIME " "Type of the form 'type/subtype'." % (key, obj['mime_type']), instance['id']) else: info("Can't reach IANA website; using regex for mime types.") mime_re = re.compile(r'^(application|audio|font|image|message|model' '|multipart|text|video)/[a-zA-Z0-9.+_-]+') if not mime_re.match(obj['mime_type']): yield JSONError("The 'mime_type' property of object '%s' " "('%s') should be an IANA MIME Type of the" " form 'type/subtype'." % (key, obj['mime_type']), instance['id'])
def dense_to_human_readable(dense_repr: Sequence[Sequence[int]], index_to_label: Dict[int, str]) -> List[List[str]]: """ Converts a dense representation of model decoded output into human readable, using a mapping from indices to labels. """ transcripts = [] for dense_r in dense_repr: non_empty_phonemes = [phn_i for phn_i in dense_r if phn_i != 0] transcript = [index_to_label[index] for index in non_empty_phonemes] transcripts.append(transcript) return transcripts
Converts a dense representation of model decoded output into human readable, using a mapping from indices to labels.
Below is the the instruction that describes the task: ### Input: Converts a dense representation of model decoded output into human readable, using a mapping from indices to labels. ### Response: def dense_to_human_readable(dense_repr: Sequence[Sequence[int]], index_to_label: Dict[int, str]) -> List[List[str]]: """ Converts a dense representation of model decoded output into human readable, using a mapping from indices to labels. """ transcripts = [] for dense_r in dense_repr: non_empty_phonemes = [phn_i for phn_i in dense_r if phn_i != 0] transcript = [index_to_label[index] for index in non_empty_phonemes] transcripts.append(transcript) return transcripts
def add_property(self, *args, **kwargs): # type: (*Any, **Any) -> Property """Add a new property to this model. See :class:`pykechain.Client.create_property` for available parameters. :return: :class:`Property` :raises APIError: in case an Error occurs """ if self.category != Category.MODEL: raise APIError("Part should be of category MODEL") return self._client.create_property(self, *args, **kwargs)
Add a new property to this model. See :class:`pykechain.Client.create_property` for available parameters. :return: :class:`Property` :raises APIError: in case an Error occurs
Below is the the instruction that describes the task: ### Input: Add a new property to this model. See :class:`pykechain.Client.create_property` for available parameters. :return: :class:`Property` :raises APIError: in case an Error occurs ### Response: def add_property(self, *args, **kwargs): # type: (*Any, **Any) -> Property """Add a new property to this model. See :class:`pykechain.Client.create_property` for available parameters. :return: :class:`Property` :raises APIError: in case an Error occurs """ if self.category != Category.MODEL: raise APIError("Part should be of category MODEL") return self._client.create_property(self, *args, **kwargs)
def write_cache(entries, stream, extension_data=None, ShaStreamCls=IndexFileSHA1Writer): """Write the cache represented by entries to a stream :param entries: **sorted** list of entries :param stream: stream to wrap into the AdapterStreamCls - it is used for final output. :param ShaStreamCls: Type to use when writing to the stream. It produces a sha while writing to it, before the data is passed on to the wrapped stream :param extension_data: any kind of data to write as a trailer, it must begin a 4 byte identifier, followed by its size ( 4 bytes )""" # wrap the stream into a compatible writer stream = ShaStreamCls(stream) tell = stream.tell write = stream.write # header version = 2 write(b"DIRC") write(pack(">LL", version, len(entries))) # body for entry in entries: beginoffset = tell() write(entry[4]) # ctime write(entry[5]) # mtime path = entry[3] path = force_bytes(path, encoding=defenc) plen = len(path) & CE_NAMEMASK # path length assert plen == len(path), "Path %s too long to fit into index" % entry[3] flags = plen | (entry[2] & CE_NAMEMASK_INV) # clear possible previous values write(pack(">LLLLLL20sH", entry[6], entry[7], entry[0], entry[8], entry[9], entry[10], entry[1], flags)) write(path) real_size = ((tell() - beginoffset + 8) & ~7) write(b"\0" * ((beginoffset + real_size) - tell())) # END for each entry # write previously cached extensions data if extension_data is not None: stream.write(extension_data) # write the sha over the content stream.write_sha()
Write the cache represented by entries to a stream :param entries: **sorted** list of entries :param stream: stream to wrap into the AdapterStreamCls - it is used for final output. :param ShaStreamCls: Type to use when writing to the stream. It produces a sha while writing to it, before the data is passed on to the wrapped stream :param extension_data: any kind of data to write as a trailer, it must begin a 4 byte identifier, followed by its size ( 4 bytes )
Below is the the instruction that describes the task: ### Input: Write the cache represented by entries to a stream :param entries: **sorted** list of entries :param stream: stream to wrap into the AdapterStreamCls - it is used for final output. :param ShaStreamCls: Type to use when writing to the stream. It produces a sha while writing to it, before the data is passed on to the wrapped stream :param extension_data: any kind of data to write as a trailer, it must begin a 4 byte identifier, followed by its size ( 4 bytes ) ### Response: def write_cache(entries, stream, extension_data=None, ShaStreamCls=IndexFileSHA1Writer): """Write the cache represented by entries to a stream :param entries: **sorted** list of entries :param stream: stream to wrap into the AdapterStreamCls - it is used for final output. :param ShaStreamCls: Type to use when writing to the stream. It produces a sha while writing to it, before the data is passed on to the wrapped stream :param extension_data: any kind of data to write as a trailer, it must begin a 4 byte identifier, followed by its size ( 4 bytes )""" # wrap the stream into a compatible writer stream = ShaStreamCls(stream) tell = stream.tell write = stream.write # header version = 2 write(b"DIRC") write(pack(">LL", version, len(entries))) # body for entry in entries: beginoffset = tell() write(entry[4]) # ctime write(entry[5]) # mtime path = entry[3] path = force_bytes(path, encoding=defenc) plen = len(path) & CE_NAMEMASK # path length assert plen == len(path), "Path %s too long to fit into index" % entry[3] flags = plen | (entry[2] & CE_NAMEMASK_INV) # clear possible previous values write(pack(">LLLLLL20sH", entry[6], entry[7], entry[0], entry[8], entry[9], entry[10], entry[1], flags)) write(path) real_size = ((tell() - beginoffset + 8) & ~7) write(b"\0" * ((beginoffset + real_size) - tell())) # END for each entry # write previously cached extensions data if extension_data is not None: stream.write(extension_data) # write the sha over the content stream.write_sha()
def n_tasks(dec_num): """ Takes a decimal number as input and returns the number of ones in the binary representation. This translates to the number of tasks being done by an organism with a phenotype represented as a decimal number. """ bitstring = "" try: bitstring = dec_num[2:] except: bitstring = bin(int(dec_num))[2:] # cut off 0b # print bin(int(dec_num)), bitstring return bitstring.count("1")
Takes a decimal number as input and returns the number of ones in the binary representation. This translates to the number of tasks being done by an organism with a phenotype represented as a decimal number.
Below is the the instruction that describes the task: ### Input: Takes a decimal number as input and returns the number of ones in the binary representation. This translates to the number of tasks being done by an organism with a phenotype represented as a decimal number. ### Response: def n_tasks(dec_num): """ Takes a decimal number as input and returns the number of ones in the binary representation. This translates to the number of tasks being done by an organism with a phenotype represented as a decimal number. """ bitstring = "" try: bitstring = dec_num[2:] except: bitstring = bin(int(dec_num))[2:] # cut off 0b # print bin(int(dec_num)), bitstring return bitstring.count("1")
def register(self, src, trg, trg_mask=None, src_mask=None): """ Implementation of pair-wise registration and warping using point-based matching This function estimates a number of transforms (Euler, PartialAffine and Homography) using point-based matching. Features descriptor are first extracted from the pair of images using either SIFT or SURF descriptors. A brute-force point-matching algorithm estimates matching points and a transformation is computed. All transformations use RANSAC to robustly fit a tranform to the matching points. However, the feature extraction and point matching estimation can be very poor and unstable. In those cases, an identity transform is used to warp the images instead. :param src: 2D single channel source moving image :param trg: 2D single channel target reference image :param trg_mask: Mask of target image. Not used in this method. :param src_mask: Mask of source image. Not used in this method. :return: Estimated 2D transformation matrix of shape 2x3 """ # Initialise matrix and failed registrations flag warp_matrix = None # Initiate point detector ptdt = cv2.xfeatures2d.SIFT_create() if self.params['Descriptor'] == 'SIFT' else cv2.xfeatures2d.SURF_create() # create BFMatcher object bf_matcher = cv2.BFMatcher(cv2.NORM_L1, crossCheck=True) # find the keypoints and descriptors with SIFT kp1, des1 = ptdt.detectAndCompute(self.rescale_image(src), None) kp2, des2 = ptdt.detectAndCompute(self.rescale_image(trg), None) # Match descriptors if any are found if des1 is not None and des2 is not None: matches = bf_matcher.match(des1, des2) # Sort them in the order of their distance. matches = sorted(matches, key=lambda x: x.distance) src_pts = np.asarray([kp1[m.queryIdx].pt for m in matches], dtype=np.float32).reshape(-1, 2) trg_pts = np.asarray([kp2[m.trainIdx].pt for m in matches], dtype=np.float32).reshape(-1, 2) # Parse model and estimate matrix if self.params['Model'] == 'PartialAffine': warp_matrix = cv2.estimateRigidTransform(src_pts, trg_pts, fullAffine=False) elif self.params['Model'] == 'Euler': model = EstimateEulerTransformModel(src_pts, trg_pts) warp_matrix = ransac(src_pts.shape[0], model, 3, self.params['MaxIters'], 1, 5) elif self.params['Model'] == 'Homography': warp_matrix, _ = cv2.findHomography(src_pts, trg_pts, cv2.RANSAC, ransacReprojThreshold=self.params['RANSACThreshold'], maxIters=self.params['MaxIters']) if warp_matrix is not None: warp_matrix = warp_matrix[:2, :] return warp_matrix
Implementation of pair-wise registration and warping using point-based matching This function estimates a number of transforms (Euler, PartialAffine and Homography) using point-based matching. Features descriptor are first extracted from the pair of images using either SIFT or SURF descriptors. A brute-force point-matching algorithm estimates matching points and a transformation is computed. All transformations use RANSAC to robustly fit a tranform to the matching points. However, the feature extraction and point matching estimation can be very poor and unstable. In those cases, an identity transform is used to warp the images instead. :param src: 2D single channel source moving image :param trg: 2D single channel target reference image :param trg_mask: Mask of target image. Not used in this method. :param src_mask: Mask of source image. Not used in this method. :return: Estimated 2D transformation matrix of shape 2x3
Below is the the instruction that describes the task: ### Input: Implementation of pair-wise registration and warping using point-based matching This function estimates a number of transforms (Euler, PartialAffine and Homography) using point-based matching. Features descriptor are first extracted from the pair of images using either SIFT or SURF descriptors. A brute-force point-matching algorithm estimates matching points and a transformation is computed. All transformations use RANSAC to robustly fit a tranform to the matching points. However, the feature extraction and point matching estimation can be very poor and unstable. In those cases, an identity transform is used to warp the images instead. :param src: 2D single channel source moving image :param trg: 2D single channel target reference image :param trg_mask: Mask of target image. Not used in this method. :param src_mask: Mask of source image. Not used in this method. :return: Estimated 2D transformation matrix of shape 2x3 ### Response: def register(self, src, trg, trg_mask=None, src_mask=None): """ Implementation of pair-wise registration and warping using point-based matching This function estimates a number of transforms (Euler, PartialAffine and Homography) using point-based matching. Features descriptor are first extracted from the pair of images using either SIFT or SURF descriptors. A brute-force point-matching algorithm estimates matching points and a transformation is computed. All transformations use RANSAC to robustly fit a tranform to the matching points. However, the feature extraction and point matching estimation can be very poor and unstable. In those cases, an identity transform is used to warp the images instead. :param src: 2D single channel source moving image :param trg: 2D single channel target reference image :param trg_mask: Mask of target image. Not used in this method. :param src_mask: Mask of source image. Not used in this method. :return: Estimated 2D transformation matrix of shape 2x3 """ # Initialise matrix and failed registrations flag warp_matrix = None # Initiate point detector ptdt = cv2.xfeatures2d.SIFT_create() if self.params['Descriptor'] == 'SIFT' else cv2.xfeatures2d.SURF_create() # create BFMatcher object bf_matcher = cv2.BFMatcher(cv2.NORM_L1, crossCheck=True) # find the keypoints and descriptors with SIFT kp1, des1 = ptdt.detectAndCompute(self.rescale_image(src), None) kp2, des2 = ptdt.detectAndCompute(self.rescale_image(trg), None) # Match descriptors if any are found if des1 is not None and des2 is not None: matches = bf_matcher.match(des1, des2) # Sort them in the order of their distance. matches = sorted(matches, key=lambda x: x.distance) src_pts = np.asarray([kp1[m.queryIdx].pt for m in matches], dtype=np.float32).reshape(-1, 2) trg_pts = np.asarray([kp2[m.trainIdx].pt for m in matches], dtype=np.float32).reshape(-1, 2) # Parse model and estimate matrix if self.params['Model'] == 'PartialAffine': warp_matrix = cv2.estimateRigidTransform(src_pts, trg_pts, fullAffine=False) elif self.params['Model'] == 'Euler': model = EstimateEulerTransformModel(src_pts, trg_pts) warp_matrix = ransac(src_pts.shape[0], model, 3, self.params['MaxIters'], 1, 5) elif self.params['Model'] == 'Homography': warp_matrix, _ = cv2.findHomography(src_pts, trg_pts, cv2.RANSAC, ransacReprojThreshold=self.params['RANSACThreshold'], maxIters=self.params['MaxIters']) if warp_matrix is not None: warp_matrix = warp_matrix[:2, :] return warp_matrix
def get_query_tables(query): """ :type query str :rtype: list[str] """ tables = [] last_keyword = None last_token = None table_syntax_keywords = [ # SELECT queries 'FROM', 'WHERE', 'JOIN', 'INNER JOIN', 'LEFT JOIN', 'RIGHT JOIN', 'ON', # INSERT queries 'INTO', 'VALUES', # UPDATE queries 'UPDATE', 'SET', # Hive queries 'TABLE', # INSERT TABLE ] # print(query, get_query_tokens(query)) for token in get_query_tokens(query): # print([token, token.ttype, last_token, last_keyword]) if token.is_keyword and token.value.upper() in table_syntax_keywords: # keep the name of the last keyword, the next one can be a table name last_keyword = token.value.upper() # print('keyword', last_keyword) elif str(token) == '(': # reset the last_keyword for INSERT `foo` VALUES(id, bar) ... last_keyword = None elif token.is_keyword and str(token) in ['FORCE', 'ORDER']: # reset the last_keyword for "SELECT x FORCE INDEX" queries and "SELECT x ORDER BY" last_keyword = None elif token.is_keyword and str(token) == 'SELECT' and last_keyword in ['INTO', 'TABLE']: # reset the last_keyword for "INSERT INTO SELECT" and "INSERT TABLE SELECT" queries last_keyword = None elif token.ttype is Name or token.is_keyword: # print([last_keyword, last_token, token.value]) # analyze the name tokens, column names and where condition values if last_keyword in ['FROM', 'JOIN', 'INNER JOIN', 'LEFT JOIN', 'RIGHT JOIN', 'INTO', 'UPDATE', 'TABLE'] \ and last_token not in ['AS'] \ and token.value not in ['AS', 'SELECT']: if last_token == '.': # we have database.table notation example # append table name to the last entry of tables # as it is a database name in fact database_name = tables[-1] tables[-1] = '{}.{}'.format(database_name, token) last_keyword = None elif last_token not in [',', last_keyword]: # it's not a list of tables, e.g. SELECT * FROM foo, bar # hence, it can be the case of alias without AS, e.g. SELECT * FROM foo bar pass else: table_name = str(token.value.strip('`')) tables.append(table_name) last_token = token.value.upper() return unique(tables)
:type query str :rtype: list[str]
Below is the the instruction that describes the task: ### Input: :type query str :rtype: list[str] ### Response: def get_query_tables(query): """ :type query str :rtype: list[str] """ tables = [] last_keyword = None last_token = None table_syntax_keywords = [ # SELECT queries 'FROM', 'WHERE', 'JOIN', 'INNER JOIN', 'LEFT JOIN', 'RIGHT JOIN', 'ON', # INSERT queries 'INTO', 'VALUES', # UPDATE queries 'UPDATE', 'SET', # Hive queries 'TABLE', # INSERT TABLE ] # print(query, get_query_tokens(query)) for token in get_query_tokens(query): # print([token, token.ttype, last_token, last_keyword]) if token.is_keyword and token.value.upper() in table_syntax_keywords: # keep the name of the last keyword, the next one can be a table name last_keyword = token.value.upper() # print('keyword', last_keyword) elif str(token) == '(': # reset the last_keyword for INSERT `foo` VALUES(id, bar) ... last_keyword = None elif token.is_keyword and str(token) in ['FORCE', 'ORDER']: # reset the last_keyword for "SELECT x FORCE INDEX" queries and "SELECT x ORDER BY" last_keyword = None elif token.is_keyword and str(token) == 'SELECT' and last_keyword in ['INTO', 'TABLE']: # reset the last_keyword for "INSERT INTO SELECT" and "INSERT TABLE SELECT" queries last_keyword = None elif token.ttype is Name or token.is_keyword: # print([last_keyword, last_token, token.value]) # analyze the name tokens, column names and where condition values if last_keyword in ['FROM', 'JOIN', 'INNER JOIN', 'LEFT JOIN', 'RIGHT JOIN', 'INTO', 'UPDATE', 'TABLE'] \ and last_token not in ['AS'] \ and token.value not in ['AS', 'SELECT']: if last_token == '.': # we have database.table notation example # append table name to the last entry of tables # as it is a database name in fact database_name = tables[-1] tables[-1] = '{}.{}'.format(database_name, token) last_keyword = None elif last_token not in [',', last_keyword]: # it's not a list of tables, e.g. SELECT * FROM foo, bar # hence, it can be the case of alias without AS, e.g. SELECT * FROM foo bar pass else: table_name = str(token.value.strip('`')) tables.append(table_name) last_token = token.value.upper() return unique(tables)
def add_main_manifest_entry(jar, binary): """Creates a jar manifest for the given binary. If the binary declares a main then a 'Main-Class' manifest entry will be included. """ main = binary.main if main is not None: jar.main(main)
Creates a jar manifest for the given binary. If the binary declares a main then a 'Main-Class' manifest entry will be included.
Below is the the instruction that describes the task: ### Input: Creates a jar manifest for the given binary. If the binary declares a main then a 'Main-Class' manifest entry will be included. ### Response: def add_main_manifest_entry(jar, binary): """Creates a jar manifest for the given binary. If the binary declares a main then a 'Main-Class' manifest entry will be included. """ main = binary.main if main is not None: jar.main(main)
def next(self): """ :return: The next result item. :rtype: dict :raises StopIteration: If there is no more result. """ if self._cur_item is not None: res = self._cur_item self._cur_item = None return res return next(self._ex_context)
:return: The next result item. :rtype: dict :raises StopIteration: If there is no more result.
Below is the the instruction that describes the task: ### Input: :return: The next result item. :rtype: dict :raises StopIteration: If there is no more result. ### Response: def next(self): """ :return: The next result item. :rtype: dict :raises StopIteration: If there is no more result. """ if self._cur_item is not None: res = self._cur_item self._cur_item = None return res return next(self._ex_context)
def update( self, plan=None, application_fee_percent=None, billing_cycle_anchor=None, coupon=None, prorate=djstripe_settings.PRORATION_POLICY, proration_date=None, metadata=None, quantity=None, tax_percent=None, trial_end=None, ): """ See `Customer.subscribe() <#djstripe.models.Customer.subscribe>`__ :param plan: The plan to which to subscribe the customer. :type plan: Plan or string (plan ID) :param application_fee_percent: :type application_fee_percent: :param billing_cycle_anchor: :type billing_cycle_anchor: :param coupon: :type coupon: :param prorate: Whether or not to prorate when switching plans. Default is True. :type prorate: boolean :param proration_date: If set, the proration will be calculated as though the subscription was updated at the given time. This can be used to apply exactly the same proration that was previewed with upcoming invoice endpoint. It can also be used to implement custom proration logic, such as prorating by day instead of by second, by providing the time that you wish to use for proration calculations. :type proration_date: datetime :param metadata: :type metadata: :param quantity: :type quantity: :param tax_percent: :type tax_percent: :param trial_end: :type trial_end: .. note:: The default value for ``prorate`` is the DJSTRIPE_PRORATION_POLICY setting. .. important:: Updating a subscription by changing the plan or quantity creates a new ``Subscription`` in \ Stripe (and dj-stripe). """ # Convert Plan to id if plan is not None and isinstance(plan, StripeModel): plan = plan.id kwargs = deepcopy(locals()) del kwargs["self"] stripe_subscription = self.api_retrieve() for kwarg, value in kwargs.items(): if value is not None: setattr(stripe_subscription, kwarg, value) return Subscription.sync_from_stripe_data(stripe_subscription.save())
See `Customer.subscribe() <#djstripe.models.Customer.subscribe>`__ :param plan: The plan to which to subscribe the customer. :type plan: Plan or string (plan ID) :param application_fee_percent: :type application_fee_percent: :param billing_cycle_anchor: :type billing_cycle_anchor: :param coupon: :type coupon: :param prorate: Whether or not to prorate when switching plans. Default is True. :type prorate: boolean :param proration_date: If set, the proration will be calculated as though the subscription was updated at the given time. This can be used to apply exactly the same proration that was previewed with upcoming invoice endpoint. It can also be used to implement custom proration logic, such as prorating by day instead of by second, by providing the time that you wish to use for proration calculations. :type proration_date: datetime :param metadata: :type metadata: :param quantity: :type quantity: :param tax_percent: :type tax_percent: :param trial_end: :type trial_end: .. note:: The default value for ``prorate`` is the DJSTRIPE_PRORATION_POLICY setting. .. important:: Updating a subscription by changing the plan or quantity creates a new ``Subscription`` in \ Stripe (and dj-stripe).
Below is the the instruction that describes the task: ### Input: See `Customer.subscribe() <#djstripe.models.Customer.subscribe>`__ :param plan: The plan to which to subscribe the customer. :type plan: Plan or string (plan ID) :param application_fee_percent: :type application_fee_percent: :param billing_cycle_anchor: :type billing_cycle_anchor: :param coupon: :type coupon: :param prorate: Whether or not to prorate when switching plans. Default is True. :type prorate: boolean :param proration_date: If set, the proration will be calculated as though the subscription was updated at the given time. This can be used to apply exactly the same proration that was previewed with upcoming invoice endpoint. It can also be used to implement custom proration logic, such as prorating by day instead of by second, by providing the time that you wish to use for proration calculations. :type proration_date: datetime :param metadata: :type metadata: :param quantity: :type quantity: :param tax_percent: :type tax_percent: :param trial_end: :type trial_end: .. note:: The default value for ``prorate`` is the DJSTRIPE_PRORATION_POLICY setting. .. important:: Updating a subscription by changing the plan or quantity creates a new ``Subscription`` in \ Stripe (and dj-stripe). ### Response: def update( self, plan=None, application_fee_percent=None, billing_cycle_anchor=None, coupon=None, prorate=djstripe_settings.PRORATION_POLICY, proration_date=None, metadata=None, quantity=None, tax_percent=None, trial_end=None, ): """ See `Customer.subscribe() <#djstripe.models.Customer.subscribe>`__ :param plan: The plan to which to subscribe the customer. :type plan: Plan or string (plan ID) :param application_fee_percent: :type application_fee_percent: :param billing_cycle_anchor: :type billing_cycle_anchor: :param coupon: :type coupon: :param prorate: Whether or not to prorate when switching plans. Default is True. :type prorate: boolean :param proration_date: If set, the proration will be calculated as though the subscription was updated at the given time. This can be used to apply exactly the same proration that was previewed with upcoming invoice endpoint. It can also be used to implement custom proration logic, such as prorating by day instead of by second, by providing the time that you wish to use for proration calculations. :type proration_date: datetime :param metadata: :type metadata: :param quantity: :type quantity: :param tax_percent: :type tax_percent: :param trial_end: :type trial_end: .. note:: The default value for ``prorate`` is the DJSTRIPE_PRORATION_POLICY setting. .. important:: Updating a subscription by changing the plan or quantity creates a new ``Subscription`` in \ Stripe (and dj-stripe). """ # Convert Plan to id if plan is not None and isinstance(plan, StripeModel): plan = plan.id kwargs = deepcopy(locals()) del kwargs["self"] stripe_subscription = self.api_retrieve() for kwarg, value in kwargs.items(): if value is not None: setattr(stripe_subscription, kwarg, value) return Subscription.sync_from_stripe_data(stripe_subscription.save())
def single_cmd(container, command, fail_nonzero=False, download_result=None, **kwargs): """ Runs a script inside a container, which is created with all its dependencies. The container is removed after it has been run, whereas the dependencies are not destroyed. The output is printed to the console. :param container: Container configuration name. :param command: Command line to run. :param fail_nonzero: Fail if the script returns with a nonzero exit code. :param download_result: Download any results that the command has written back to a temporary directory. :param kwargs: Additional keyword arguments to the run_script action. """ with temp_dir() as remote_tmp: kwargs.setdefault('command_format', ['-c', command]) results = [output.result for output in container_fabric().run_script(container, script_path=remote_tmp, **kwargs) if o.action_type == ContainerUtilAction.SCRIPT] if download_result: get(posixpath.join(remote_tmp, '*'), local_path=download_result) for res in results: puts("Exit code: {0}".format(res['exit_code'])) if res['exit_code'] == 0 or not fail_nonzero: puts(res['log']) else: error(res['log'])
Runs a script inside a container, which is created with all its dependencies. The container is removed after it has been run, whereas the dependencies are not destroyed. The output is printed to the console. :param container: Container configuration name. :param command: Command line to run. :param fail_nonzero: Fail if the script returns with a nonzero exit code. :param download_result: Download any results that the command has written back to a temporary directory. :param kwargs: Additional keyword arguments to the run_script action.
Below is the the instruction that describes the task: ### Input: Runs a script inside a container, which is created with all its dependencies. The container is removed after it has been run, whereas the dependencies are not destroyed. The output is printed to the console. :param container: Container configuration name. :param command: Command line to run. :param fail_nonzero: Fail if the script returns with a nonzero exit code. :param download_result: Download any results that the command has written back to a temporary directory. :param kwargs: Additional keyword arguments to the run_script action. ### Response: def single_cmd(container, command, fail_nonzero=False, download_result=None, **kwargs): """ Runs a script inside a container, which is created with all its dependencies. The container is removed after it has been run, whereas the dependencies are not destroyed. The output is printed to the console. :param container: Container configuration name. :param command: Command line to run. :param fail_nonzero: Fail if the script returns with a nonzero exit code. :param download_result: Download any results that the command has written back to a temporary directory. :param kwargs: Additional keyword arguments to the run_script action. """ with temp_dir() as remote_tmp: kwargs.setdefault('command_format', ['-c', command]) results = [output.result for output in container_fabric().run_script(container, script_path=remote_tmp, **kwargs) if o.action_type == ContainerUtilAction.SCRIPT] if download_result: get(posixpath.join(remote_tmp, '*'), local_path=download_result) for res in results: puts("Exit code: {0}".format(res['exit_code'])) if res['exit_code'] == 0 or not fail_nonzero: puts(res['log']) else: error(res['log'])