code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def transform(self, rules, theme_template, is_html5, context_data=None): """Method used to make a transformation on the content of the http response based on the rules and theme_templates passed as paremters :param rules: A file with a set of diazo rules to make a transformation over the original response content :param theme_template: A file containing the template used to format the the original response content :param is_html5: A boolean parameter to identify a html5 doctype :returns: A response with a content transformed based on the rules and theme_template """ if not self.should_transform(): self.log.info("Don't need to be transformed") return self.response theme = loader.render_to_string(theme_template, context=context_data, request=self.request) output_xslt = compile_theme( rules=rules, theme=StringIO(theme), ) transform = etree.XSLT(output_xslt) self.log.debug("Transform: %s", transform) charset = get_charset(self.response.get('Content-Type')) try: decoded_response = self.response.content.decode(charset) except UnicodeDecodeError: decoded_response = self.response.content.decode(charset, 'ignore') self.log.warning("Charset is {} and type of encode used in file is\ different. Some unknown characteres might be\ ignored.".format(charset)) content_doc = etree.fromstring(decoded_response, parser=etree.HTMLParser()) self.response.content = transform(content_doc) if is_html5: self.set_html5_doctype() self.reset_headers() self.log.debug("Response transformer: %s", self.response) return self.response
Method used to make a transformation on the content of the http response based on the rules and theme_templates passed as paremters :param rules: A file with a set of diazo rules to make a transformation over the original response content :param theme_template: A file containing the template used to format the the original response content :param is_html5: A boolean parameter to identify a html5 doctype :returns: A response with a content transformed based on the rules and theme_template
Below is the the instruction that describes the task: ### Input: Method used to make a transformation on the content of the http response based on the rules and theme_templates passed as paremters :param rules: A file with a set of diazo rules to make a transformation over the original response content :param theme_template: A file containing the template used to format the the original response content :param is_html5: A boolean parameter to identify a html5 doctype :returns: A response with a content transformed based on the rules and theme_template ### Response: def transform(self, rules, theme_template, is_html5, context_data=None): """Method used to make a transformation on the content of the http response based on the rules and theme_templates passed as paremters :param rules: A file with a set of diazo rules to make a transformation over the original response content :param theme_template: A file containing the template used to format the the original response content :param is_html5: A boolean parameter to identify a html5 doctype :returns: A response with a content transformed based on the rules and theme_template """ if not self.should_transform(): self.log.info("Don't need to be transformed") return self.response theme = loader.render_to_string(theme_template, context=context_data, request=self.request) output_xslt = compile_theme( rules=rules, theme=StringIO(theme), ) transform = etree.XSLT(output_xslt) self.log.debug("Transform: %s", transform) charset = get_charset(self.response.get('Content-Type')) try: decoded_response = self.response.content.decode(charset) except UnicodeDecodeError: decoded_response = self.response.content.decode(charset, 'ignore') self.log.warning("Charset is {} and type of encode used in file is\ different. Some unknown characteres might be\ ignored.".format(charset)) content_doc = etree.fromstring(decoded_response, parser=etree.HTMLParser()) self.response.content = transform(content_doc) if is_html5: self.set_html5_doctype() self.reset_headers() self.log.debug("Response transformer: %s", self.response) return self.response
def query_bytes(self, transport, num_bytes, header, *data): """Queries for binary data :param transport: A transport object. :param num_bytes: The exact number of data bytes expected. :param header: The message header. :param data: Optional data. :returns: The raw unparsed data bytearray. """ message = self.create_message(header, *data) logger.debug('SignalRecovery query bytes: %r', message) with transport: transport.write(message) response = transport.read_exactly(num_bytes) logger.debug('SignalRecovery response: %r', response) # We need to read 3 bytes, because there is a \0 character # separating the data from the status bytes. _, status_byte, overload_byte = transport.read_exactly(3) logger.debug('SignalRecovery stb: %r olb: %r', status_byte, overload_byte) self.call_byte_handler(status_byte, overload_byte) # returns raw unparsed bytes. return response
Queries for binary data :param transport: A transport object. :param num_bytes: The exact number of data bytes expected. :param header: The message header. :param data: Optional data. :returns: The raw unparsed data bytearray.
Below is the the instruction that describes the task: ### Input: Queries for binary data :param transport: A transport object. :param num_bytes: The exact number of data bytes expected. :param header: The message header. :param data: Optional data. :returns: The raw unparsed data bytearray. ### Response: def query_bytes(self, transport, num_bytes, header, *data): """Queries for binary data :param transport: A transport object. :param num_bytes: The exact number of data bytes expected. :param header: The message header. :param data: Optional data. :returns: The raw unparsed data bytearray. """ message = self.create_message(header, *data) logger.debug('SignalRecovery query bytes: %r', message) with transport: transport.write(message) response = transport.read_exactly(num_bytes) logger.debug('SignalRecovery response: %r', response) # We need to read 3 bytes, because there is a \0 character # separating the data from the status bytes. _, status_byte, overload_byte = transport.read_exactly(3) logger.debug('SignalRecovery stb: %r olb: %r', status_byte, overload_byte) self.call_byte_handler(status_byte, overload_byte) # returns raw unparsed bytes. return response
def is_transport_reaction_formulae(rxn): """ Return boolean if a reaction is a transport reaction (from formulae). Parameters ---------- rxn: cobra.Reaction The metabolic reaction under investigation. """ # Collecting criteria to classify transporters by. rxn_reactants = set([met.formula for met in rxn.reactants]) rxn_products = set([met.formula for met in rxn.products]) # Looking for formulas that stay the same on both side of the reaction. transported_mets = \ [formula for formula in rxn_reactants if formula in rxn_products] # Collect information on the elemental differences between # compartments in the reaction. delta_dicts = find_transported_elements(rxn) non_zero_array = [v for (k, v) in iteritems(delta_dicts) if v != 0] # Excluding reactions such as oxidoreductases where no net # transport of Hydrogen is occurring, but rather just an exchange of # electrons or charges effecting a change in protonation. if set(transported_mets) != set('H') and list( delta_dicts.keys() ) == ['H']: pass # All other reactions for which the amount of transported elements is # not zero, which are not part of the model's exchange nor # biomass reactions, are defined as transport reactions. # This includes reactions where the transported metabolite reacts with # a carrier molecule. elif sum(non_zero_array): return True
Return boolean if a reaction is a transport reaction (from formulae). Parameters ---------- rxn: cobra.Reaction The metabolic reaction under investigation.
Below is the the instruction that describes the task: ### Input: Return boolean if a reaction is a transport reaction (from formulae). Parameters ---------- rxn: cobra.Reaction The metabolic reaction under investigation. ### Response: def is_transport_reaction_formulae(rxn): """ Return boolean if a reaction is a transport reaction (from formulae). Parameters ---------- rxn: cobra.Reaction The metabolic reaction under investigation. """ # Collecting criteria to classify transporters by. rxn_reactants = set([met.formula for met in rxn.reactants]) rxn_products = set([met.formula for met in rxn.products]) # Looking for formulas that stay the same on both side of the reaction. transported_mets = \ [formula for formula in rxn_reactants if formula in rxn_products] # Collect information on the elemental differences between # compartments in the reaction. delta_dicts = find_transported_elements(rxn) non_zero_array = [v for (k, v) in iteritems(delta_dicts) if v != 0] # Excluding reactions such as oxidoreductases where no net # transport of Hydrogen is occurring, but rather just an exchange of # electrons or charges effecting a change in protonation. if set(transported_mets) != set('H') and list( delta_dicts.keys() ) == ['H']: pass # All other reactions for which the amount of transported elements is # not zero, which are not part of the model's exchange nor # biomass reactions, are defined as transport reactions. # This includes reactions where the transported metabolite reacts with # a carrier molecule. elif sum(non_zero_array): return True
async def read(self, *, decode: bool=False) -> Any: """Reads body part data. decode: Decodes data following by encoding method from Content-Encoding header. If it missed data remains untouched """ if self._at_eof: return b'' data = bytearray() while not self._at_eof: data.extend((await self.read_chunk(self.chunk_size))) if decode: return self.decode(data) return data
Reads body part data. decode: Decodes data following by encoding method from Content-Encoding header. If it missed data remains untouched
Below is the the instruction that describes the task: ### Input: Reads body part data. decode: Decodes data following by encoding method from Content-Encoding header. If it missed data remains untouched ### Response: async def read(self, *, decode: bool=False) -> Any: """Reads body part data. decode: Decodes data following by encoding method from Content-Encoding header. If it missed data remains untouched """ if self._at_eof: return b'' data = bytearray() while not self._at_eof: data.extend((await self.read_chunk(self.chunk_size))) if decode: return self.decode(data) return data
def show_inputs(self, varnames=None, nids=None, wslice=None, stream=sys.stdout): """ Print the input of the tasks to the given stream. Args: varnames: List of Abinit variables. If not None, only the variable in varnames are selected and printed. nids: List of node identifiers. By defaults all nodes are shown wslice: Slice object used to select works. stream: File-like object, Default: sys.stdout """ if varnames is not None: # Build dictionary varname --> [(task1, value), (task2, value), ...] varnames = [s.strip() for s in list_strings(varnames)] dlist = collections.defaultdict(list) for task in self.select_tasks(nids=nids, wslice=wslice): dstruct = task.input.structure.as_dict(fmt="abivars") for vname in varnames: value = task.input.get(vname, None) if value is None: # maybe in structure? value = dstruct.get(vname, None) if value is not None: dlist[vname].append((task, value)) for vname in varnames: tv_list = dlist[vname] if not tv_list: stream.write("[%s]: Found 0 tasks with this variable\n" % vname) else: stream.write("[%s]: Found %s tasks with this variable\n" % (vname, len(tv_list))) for i, (task, value) in enumerate(tv_list): stream.write(" %s --> %s\n" % (str(value), task)) stream.write("\n") else: lines = [] for task in self.select_tasks(nids=nids, wslice=wslice): s = task.make_input(with_header=True) # Add info on dependencies. if task.deps: s += "\n\nDependencies:\n" + "\n".join(str(dep) for dep in task.deps) else: s += "\n\nDependencies: None" lines.append(2*"\n" + 80 * "=" + "\n" + s + 2*"\n") stream.writelines(lines)
Print the input of the tasks to the given stream. Args: varnames: List of Abinit variables. If not None, only the variable in varnames are selected and printed. nids: List of node identifiers. By defaults all nodes are shown wslice: Slice object used to select works. stream: File-like object, Default: sys.stdout
Below is the the instruction that describes the task: ### Input: Print the input of the tasks to the given stream. Args: varnames: List of Abinit variables. If not None, only the variable in varnames are selected and printed. nids: List of node identifiers. By defaults all nodes are shown wslice: Slice object used to select works. stream: File-like object, Default: sys.stdout ### Response: def show_inputs(self, varnames=None, nids=None, wslice=None, stream=sys.stdout): """ Print the input of the tasks to the given stream. Args: varnames: List of Abinit variables. If not None, only the variable in varnames are selected and printed. nids: List of node identifiers. By defaults all nodes are shown wslice: Slice object used to select works. stream: File-like object, Default: sys.stdout """ if varnames is not None: # Build dictionary varname --> [(task1, value), (task2, value), ...] varnames = [s.strip() for s in list_strings(varnames)] dlist = collections.defaultdict(list) for task in self.select_tasks(nids=nids, wslice=wslice): dstruct = task.input.structure.as_dict(fmt="abivars") for vname in varnames: value = task.input.get(vname, None) if value is None: # maybe in structure? value = dstruct.get(vname, None) if value is not None: dlist[vname].append((task, value)) for vname in varnames: tv_list = dlist[vname] if not tv_list: stream.write("[%s]: Found 0 tasks with this variable\n" % vname) else: stream.write("[%s]: Found %s tasks with this variable\n" % (vname, len(tv_list))) for i, (task, value) in enumerate(tv_list): stream.write(" %s --> %s\n" % (str(value), task)) stream.write("\n") else: lines = [] for task in self.select_tasks(nids=nids, wslice=wslice): s = task.make_input(with_header=True) # Add info on dependencies. if task.deps: s += "\n\nDependencies:\n" + "\n".join(str(dep) for dep in task.deps) else: s += "\n\nDependencies: None" lines.append(2*"\n" + 80 * "=" + "\n" + s + 2*"\n") stream.writelines(lines)
def weighted_moving_average(data, period): """ Weighted Moving Average. Formula: (P1 + 2 P2 + 3 P3 + ... + n Pn) / K where K = (1+2+...+n) = n(n+1)/2 and Pn is the most recent price """ catch_errors.check_for_period_error(data, period) k = (period * (period + 1)) / 2.0 wmas = [] for idx in range(0, len(data)-period+1): product = [data[idx + period_idx] * (period_idx + 1) for period_idx in range(0, period)] wma = sum(product) / k wmas.append(wma) wmas = fill_for_noncomputable_vals(data, wmas) return wmas
Weighted Moving Average. Formula: (P1 + 2 P2 + 3 P3 + ... + n Pn) / K where K = (1+2+...+n) = n(n+1)/2 and Pn is the most recent price
Below is the the instruction that describes the task: ### Input: Weighted Moving Average. Formula: (P1 + 2 P2 + 3 P3 + ... + n Pn) / K where K = (1+2+...+n) = n(n+1)/2 and Pn is the most recent price ### Response: def weighted_moving_average(data, period): """ Weighted Moving Average. Formula: (P1 + 2 P2 + 3 P3 + ... + n Pn) / K where K = (1+2+...+n) = n(n+1)/2 and Pn is the most recent price """ catch_errors.check_for_period_error(data, period) k = (period * (period + 1)) / 2.0 wmas = [] for idx in range(0, len(data)-period+1): product = [data[idx + period_idx] * (period_idx + 1) for period_idx in range(0, period)] wma = sum(product) / k wmas.append(wma) wmas = fill_for_noncomputable_vals(data, wmas) return wmas
def unregisterFilter(self, column): """Unregister filter on a column of the table. @param column: The column header. """ if self._filters.has_key(column): del self._filters[column]
Unregister filter on a column of the table. @param column: The column header.
Below is the the instruction that describes the task: ### Input: Unregister filter on a column of the table. @param column: The column header. ### Response: def unregisterFilter(self, column): """Unregister filter on a column of the table. @param column: The column header. """ if self._filters.has_key(column): del self._filters[column]
def or_(self, expression): ''' Adds the given expression to this instance's MongoDB ``$or`` expression, starting a new one if one does not exst **Example**: ``(User.name == 'Jeff').or_(User.name == 'Jack')`` .. note:: The prefered usageis via an operator: ``User.name == 'Jeff' | User.name == 'Jack'`` ''' if '$or' in self.obj: self.obj['$or'].append(expression.obj) return self self.obj = { '$or' : [self.obj, expression.obj] } return self
Adds the given expression to this instance's MongoDB ``$or`` expression, starting a new one if one does not exst **Example**: ``(User.name == 'Jeff').or_(User.name == 'Jack')`` .. note:: The prefered usageis via an operator: ``User.name == 'Jeff' | User.name == 'Jack'``
Below is the the instruction that describes the task: ### Input: Adds the given expression to this instance's MongoDB ``$or`` expression, starting a new one if one does not exst **Example**: ``(User.name == 'Jeff').or_(User.name == 'Jack')`` .. note:: The prefered usageis via an operator: ``User.name == 'Jeff' | User.name == 'Jack'`` ### Response: def or_(self, expression): ''' Adds the given expression to this instance's MongoDB ``$or`` expression, starting a new one if one does not exst **Example**: ``(User.name == 'Jeff').or_(User.name == 'Jack')`` .. note:: The prefered usageis via an operator: ``User.name == 'Jeff' | User.name == 'Jack'`` ''' if '$or' in self.obj: self.obj['$or'].append(expression.obj) return self self.obj = { '$or' : [self.obj, expression.obj] } return self
def dict_to_attributes_code(dict_): """Given a nested dict, generate a python code equivalent. Example: >>> d = {'foo': 'bah', 'colors': {'red': 1, 'blue': 2}} >>> print dict_to_attributes_code(d) foo = 'bah' colors.red = 1 colors.blue = 2 Returns: str. """ lines = [] for key, value in dict_.iteritems(): if isinstance(value, dict): txt = dict_to_attributes_code(value) lines_ = txt.split('\n') for line in lines_: if not line.startswith(' '): line = "%s.%s" % (key, line) lines.append(line) else: value_txt = pformat(value) if '\n' in value_txt: lines.append("%s = \\" % key) value_txt = indent(value_txt) lines.extend(value_txt.split('\n')) else: line = "%s = %s" % (key, value_txt) lines.append(line) return '\n'.join(lines)
Given a nested dict, generate a python code equivalent. Example: >>> d = {'foo': 'bah', 'colors': {'red': 1, 'blue': 2}} >>> print dict_to_attributes_code(d) foo = 'bah' colors.red = 1 colors.blue = 2 Returns: str.
Below is the the instruction that describes the task: ### Input: Given a nested dict, generate a python code equivalent. Example: >>> d = {'foo': 'bah', 'colors': {'red': 1, 'blue': 2}} >>> print dict_to_attributes_code(d) foo = 'bah' colors.red = 1 colors.blue = 2 Returns: str. ### Response: def dict_to_attributes_code(dict_): """Given a nested dict, generate a python code equivalent. Example: >>> d = {'foo': 'bah', 'colors': {'red': 1, 'blue': 2}} >>> print dict_to_attributes_code(d) foo = 'bah' colors.red = 1 colors.blue = 2 Returns: str. """ lines = [] for key, value in dict_.iteritems(): if isinstance(value, dict): txt = dict_to_attributes_code(value) lines_ = txt.split('\n') for line in lines_: if not line.startswith(' '): line = "%s.%s" % (key, line) lines.append(line) else: value_txt = pformat(value) if '\n' in value_txt: lines.append("%s = \\" % key) value_txt = indent(value_txt) lines.extend(value_txt.split('\n')) else: line = "%s = %s" % (key, value_txt) lines.append(line) return '\n'.join(lines)
def midPoint(self, point): """identify the midpoint between two mapPoints""" x = (self.x + point.x)/2.0 y = (self.y + point.y)/2.0 z = (self.z + point.z)/2.0 return MapPoint(x,y,z)
identify the midpoint between two mapPoints
Below is the the instruction that describes the task: ### Input: identify the midpoint between two mapPoints ### Response: def midPoint(self, point): """identify the midpoint between two mapPoints""" x = (self.x + point.x)/2.0 y = (self.y + point.y)/2.0 z = (self.z + point.z)/2.0 return MapPoint(x,y,z)
def plot_data(self, proj, ax): """ Creates and plots the contourplot of the original data. This is done by evaluating the density of projected datapoints on a grid. """ x, y = proj x_data = self.ig.independent_data[x] y_data = self.ig.dependent_data[y] projected_data = np.column_stack((x_data, y_data)).T kde = gaussian_kde(projected_data) xx, yy = np.meshgrid(self.ig._x_points[x], self.ig._y_points[y]) x_grid = xx.flatten() y_grid = yy.flatten() contour_grid = kde.pdf(np.column_stack((x_grid, y_grid)).T) # This is an fugly kludge, but it seems nescessary to make low density # areas show up. if self.ig.log_contour: contour_grid = np.log(contour_grid) vmin = -7 else: vmin = None ax.contourf(xx, yy, contour_grid.reshape(xx.shape), 50, vmin=vmin, cmap='Blues')
Creates and plots the contourplot of the original data. This is done by evaluating the density of projected datapoints on a grid.
Below is the the instruction that describes the task: ### Input: Creates and plots the contourplot of the original data. This is done by evaluating the density of projected datapoints on a grid. ### Response: def plot_data(self, proj, ax): """ Creates and plots the contourplot of the original data. This is done by evaluating the density of projected datapoints on a grid. """ x, y = proj x_data = self.ig.independent_data[x] y_data = self.ig.dependent_data[y] projected_data = np.column_stack((x_data, y_data)).T kde = gaussian_kde(projected_data) xx, yy = np.meshgrid(self.ig._x_points[x], self.ig._y_points[y]) x_grid = xx.flatten() y_grid = yy.flatten() contour_grid = kde.pdf(np.column_stack((x_grid, y_grid)).T) # This is an fugly kludge, but it seems nescessary to make low density # areas show up. if self.ig.log_contour: contour_grid = np.log(contour_grid) vmin = -7 else: vmin = None ax.contourf(xx, yy, contour_grid.reshape(xx.shape), 50, vmin=vmin, cmap='Blues')
def authenticate_credentials(self, token): """ Validate the bearer token against the OAuth provider. Arguments: token (str): Access token to validate Returns: (tuple): tuple containing: user (User): User associated with the access token access_token (str): Access token Raises: AuthenticationFailed: The user is inactive, or retrieval of user info failed. """ try: user_info = self.get_user_info(token) except UserInfoRetrievalFailed: msg = 'Failed to retrieve user info. Unable to authenticate.' logger.error(msg) raise exceptions.AuthenticationFailed(msg) user, __ = get_user_model().objects.get_or_create(username=user_info['username'], defaults=user_info) if not user.is_active: raise exceptions.AuthenticationFailed('User inactive or deleted.') return user, token
Validate the bearer token against the OAuth provider. Arguments: token (str): Access token to validate Returns: (tuple): tuple containing: user (User): User associated with the access token access_token (str): Access token Raises: AuthenticationFailed: The user is inactive, or retrieval of user info failed.
Below is the the instruction that describes the task: ### Input: Validate the bearer token against the OAuth provider. Arguments: token (str): Access token to validate Returns: (tuple): tuple containing: user (User): User associated with the access token access_token (str): Access token Raises: AuthenticationFailed: The user is inactive, or retrieval of user info failed. ### Response: def authenticate_credentials(self, token): """ Validate the bearer token against the OAuth provider. Arguments: token (str): Access token to validate Returns: (tuple): tuple containing: user (User): User associated with the access token access_token (str): Access token Raises: AuthenticationFailed: The user is inactive, or retrieval of user info failed. """ try: user_info = self.get_user_info(token) except UserInfoRetrievalFailed: msg = 'Failed to retrieve user info. Unable to authenticate.' logger.error(msg) raise exceptions.AuthenticationFailed(msg) user, __ = get_user_model().objects.get_or_create(username=user_info['username'], defaults=user_info) if not user.is_active: raise exceptions.AuthenticationFailed('User inactive or deleted.') return user, token
def _get_audio_filter_cmd(self): """ Return filter_complex command and output labels needed """ all_filters = [] output_labels = [] for audio_stream in self.streams['audio'].values(): if self.ffmpeg_normalize.normalization_type == 'ebu': stream_filter = audio_stream.get_second_pass_opts_ebu() else: stream_filter = audio_stream.get_second_pass_opts_peakrms() input_label = '[0:{}]'.format(audio_stream.stream_id) output_label = '[norm{}]'.format(audio_stream.stream_id) output_labels.append(output_label) all_filters.append(input_label + stream_filter + output_label) filter_complex_cmd = ';'.join(all_filters) return filter_complex_cmd, output_labels
Return filter_complex command and output labels needed
Below is the the instruction that describes the task: ### Input: Return filter_complex command and output labels needed ### Response: def _get_audio_filter_cmd(self): """ Return filter_complex command and output labels needed """ all_filters = [] output_labels = [] for audio_stream in self.streams['audio'].values(): if self.ffmpeg_normalize.normalization_type == 'ebu': stream_filter = audio_stream.get_second_pass_opts_ebu() else: stream_filter = audio_stream.get_second_pass_opts_peakrms() input_label = '[0:{}]'.format(audio_stream.stream_id) output_label = '[norm{}]'.format(audio_stream.stream_id) output_labels.append(output_label) all_filters.append(input_label + stream_filter + output_label) filter_complex_cmd = ';'.join(all_filters) return filter_complex_cmd, output_labels
def get_dimension(self, dataset, dimension): """The method is getting information about dimension with items""" path = '/api/1.0/meta/dataset/{}/dimension/{}' return self._api_get(definition.Dimension, path.format(dataset, dimension))
The method is getting information about dimension with items
Below is the the instruction that describes the task: ### Input: The method is getting information about dimension with items ### Response: def get_dimension(self, dataset, dimension): """The method is getting information about dimension with items""" path = '/api/1.0/meta/dataset/{}/dimension/{}' return self._api_get(definition.Dimension, path.format(dataset, dimension))
def bracketOrder( self, action: str, quantity: float, limitPrice: float, takeProfitPrice: float, stopLossPrice: float, **kwargs) -> BracketOrder: """ Create a limit order that is bracketed by a take-profit order and a stop-loss order. Submit the bracket like: .. code-block:: python for o in bracket: ib.placeOrder(contract, o) https://interactivebrokers.github.io/tws-api/bracket_order.html Args: action: 'BUY' or 'SELL'. quantity: Size of order. limitPrice: Limit price of entry order. takeProfitPrice: Limit price of profit order. stopLossPrice: Stop price of loss order. """ assert action in ('BUY', 'SELL') reverseAction = 'BUY' if action == 'SELL' else 'SELL' parent = LimitOrder( action, quantity, limitPrice, orderId=self.client.getReqId(), transmit=False, **kwargs) takeProfit = LimitOrder( reverseAction, quantity, takeProfitPrice, orderId=self.client.getReqId(), transmit=False, parentId=parent.orderId, **kwargs) stopLoss = StopOrder( reverseAction, quantity, stopLossPrice, orderId=self.client.getReqId(), transmit=True, parentId=parent.orderId, **kwargs) return BracketOrder(parent, takeProfit, stopLoss)
Create a limit order that is bracketed by a take-profit order and a stop-loss order. Submit the bracket like: .. code-block:: python for o in bracket: ib.placeOrder(contract, o) https://interactivebrokers.github.io/tws-api/bracket_order.html Args: action: 'BUY' or 'SELL'. quantity: Size of order. limitPrice: Limit price of entry order. takeProfitPrice: Limit price of profit order. stopLossPrice: Stop price of loss order.
Below is the the instruction that describes the task: ### Input: Create a limit order that is bracketed by a take-profit order and a stop-loss order. Submit the bracket like: .. code-block:: python for o in bracket: ib.placeOrder(contract, o) https://interactivebrokers.github.io/tws-api/bracket_order.html Args: action: 'BUY' or 'SELL'. quantity: Size of order. limitPrice: Limit price of entry order. takeProfitPrice: Limit price of profit order. stopLossPrice: Stop price of loss order. ### Response: def bracketOrder( self, action: str, quantity: float, limitPrice: float, takeProfitPrice: float, stopLossPrice: float, **kwargs) -> BracketOrder: """ Create a limit order that is bracketed by a take-profit order and a stop-loss order. Submit the bracket like: .. code-block:: python for o in bracket: ib.placeOrder(contract, o) https://interactivebrokers.github.io/tws-api/bracket_order.html Args: action: 'BUY' or 'SELL'. quantity: Size of order. limitPrice: Limit price of entry order. takeProfitPrice: Limit price of profit order. stopLossPrice: Stop price of loss order. """ assert action in ('BUY', 'SELL') reverseAction = 'BUY' if action == 'SELL' else 'SELL' parent = LimitOrder( action, quantity, limitPrice, orderId=self.client.getReqId(), transmit=False, **kwargs) takeProfit = LimitOrder( reverseAction, quantity, takeProfitPrice, orderId=self.client.getReqId(), transmit=False, parentId=parent.orderId, **kwargs) stopLoss = StopOrder( reverseAction, quantity, stopLossPrice, orderId=self.client.getReqId(), transmit=True, parentId=parent.orderId, **kwargs) return BracketOrder(parent, takeProfit, stopLoss)
def reset_from_scratch(self): """ restart from scratch, this is to be used if a job is restarted with more resources after a crash """ # Move output files produced in workdir to _reset otherwise check_status continues # to see the task as crashed even if the job did not run # Create reset directory if not already done. reset_dir = os.path.join(self.workdir, "_reset") reset_file = os.path.join(reset_dir, "_counter") if not os.path.exists(reset_dir): os.mkdir(reset_dir) num_reset = 1 else: with open(reset_file, "rt") as fh: num_reset = 1 + int(fh.read()) # Move files to reset and append digit with reset index. def move_file(f): if not f.exists: return try: f.move(os.path.join(reset_dir, f.basename + "_" + str(num_reset))) except OSError as exc: logger.warning("Couldn't move file {}. exc: {}".format(f, str(exc))) for fname in ("output_file", "log_file", "stderr_file", "qout_file", "qerr_file", "mpiabort_file"): move_file(getattr(self, fname)) with open(reset_file, "wt") as fh: fh.write(str(num_reset)) self.start_lockfile.remove() # Reset datetimes self.datetimes.reset() return self._restart(submit=False)
restart from scratch, this is to be used if a job is restarted with more resources after a crash
Below is the the instruction that describes the task: ### Input: restart from scratch, this is to be used if a job is restarted with more resources after a crash ### Response: def reset_from_scratch(self): """ restart from scratch, this is to be used if a job is restarted with more resources after a crash """ # Move output files produced in workdir to _reset otherwise check_status continues # to see the task as crashed even if the job did not run # Create reset directory if not already done. reset_dir = os.path.join(self.workdir, "_reset") reset_file = os.path.join(reset_dir, "_counter") if not os.path.exists(reset_dir): os.mkdir(reset_dir) num_reset = 1 else: with open(reset_file, "rt") as fh: num_reset = 1 + int(fh.read()) # Move files to reset and append digit with reset index. def move_file(f): if not f.exists: return try: f.move(os.path.join(reset_dir, f.basename + "_" + str(num_reset))) except OSError as exc: logger.warning("Couldn't move file {}. exc: {}".format(f, str(exc))) for fname in ("output_file", "log_file", "stderr_file", "qout_file", "qerr_file", "mpiabort_file"): move_file(getattr(self, fname)) with open(reset_file, "wt") as fh: fh.write(str(num_reset)) self.start_lockfile.remove() # Reset datetimes self.datetimes.reset() return self._restart(submit=False)
def get_block_from_consensus( self, consensus_hash ): """ Get the block number with the given consensus hash. Return None if there is no such block. """ query = 'SELECT block_id FROM snapshots WHERE consensus_hash = ?;' args = (consensus_hash,) con = self.db_open(self.impl, self.working_dir) rows = self.db_query_execute(con, query, args, verbose=False) res = None for r in rows: res = r['block_id'] con.close() return res
Get the block number with the given consensus hash. Return None if there is no such block.
Below is the the instruction that describes the task: ### Input: Get the block number with the given consensus hash. Return None if there is no such block. ### Response: def get_block_from_consensus( self, consensus_hash ): """ Get the block number with the given consensus hash. Return None if there is no such block. """ query = 'SELECT block_id FROM snapshots WHERE consensus_hash = ?;' args = (consensus_hash,) con = self.db_open(self.impl, self.working_dir) rows = self.db_query_execute(con, query, args, verbose=False) res = None for r in rows: res = r['block_id'] con.close() return res
def delete_Variable(self,name): ''' pops a variable from class and delete it from parameter list :parameter name: name of the parameter to delete ''' self.message(1,'Deleting variable {0}'.format(name)) self.par_list=self.par_list[self.par_list != name] return self.__dict__.pop(name)
pops a variable from class and delete it from parameter list :parameter name: name of the parameter to delete
Below is the the instruction that describes the task: ### Input: pops a variable from class and delete it from parameter list :parameter name: name of the parameter to delete ### Response: def delete_Variable(self,name): ''' pops a variable from class and delete it from parameter list :parameter name: name of the parameter to delete ''' self.message(1,'Deleting variable {0}'.format(name)) self.par_list=self.par_list[self.par_list != name] return self.__dict__.pop(name)
def config(show, reset, **params): """Inspect and configure parameters in your local sentinelhub configuration file \b Example: sentinelhub.config --show sentinelhub.config --instance_id <new instance id> sentinelhub.config --max_download_attempts 5 --download_sleep_time 20 --download_timeout_seconds 120 """ sh_config = SHConfig() if reset: sh_config.reset() for param, value in params.items(): if value is not None: try: value = int(value) except ValueError: if value.lower() == 'true': value = True elif value.lower() == 'false': value = False if getattr(sh_config, param) != value: setattr(sh_config, param, value) old_config = SHConfig() sh_config.save() for param in sh_config.get_params(): if sh_config[param] != old_config[param]: value = sh_config[param] if isinstance(value, str): value = "'{}'".format(value) click.echo("The value of parameter '{}' was updated to {}".format(param, value)) if show: click.echo(str(sh_config)) click.echo('Configuration file location: {}'.format(sh_config.get_config_location()))
Inspect and configure parameters in your local sentinelhub configuration file \b Example: sentinelhub.config --show sentinelhub.config --instance_id <new instance id> sentinelhub.config --max_download_attempts 5 --download_sleep_time 20 --download_timeout_seconds 120
Below is the the instruction that describes the task: ### Input: Inspect and configure parameters in your local sentinelhub configuration file \b Example: sentinelhub.config --show sentinelhub.config --instance_id <new instance id> sentinelhub.config --max_download_attempts 5 --download_sleep_time 20 --download_timeout_seconds 120 ### Response: def config(show, reset, **params): """Inspect and configure parameters in your local sentinelhub configuration file \b Example: sentinelhub.config --show sentinelhub.config --instance_id <new instance id> sentinelhub.config --max_download_attempts 5 --download_sleep_time 20 --download_timeout_seconds 120 """ sh_config = SHConfig() if reset: sh_config.reset() for param, value in params.items(): if value is not None: try: value = int(value) except ValueError: if value.lower() == 'true': value = True elif value.lower() == 'false': value = False if getattr(sh_config, param) != value: setattr(sh_config, param, value) old_config = SHConfig() sh_config.save() for param in sh_config.get_params(): if sh_config[param] != old_config[param]: value = sh_config[param] if isinstance(value, str): value = "'{}'".format(value) click.echo("The value of parameter '{}' was updated to {}".format(param, value)) if show: click.echo(str(sh_config)) click.echo('Configuration file location: {}'.format(sh_config.get_config_location()))
def fork_detached_process (): """Fork this process, creating a subprocess detached from the current context. Returns a :class:`pwkit.Holder` instance with information about what happened. Its fields are: whoami A string, either "original" or "forked" depending on which process we are. pipe An open binary file descriptor. It is readable by the original process and writable by the forked one. This can be used to pass information from the forked process to the one that launched it. forkedpid The PID of the forked process. Note that this process is *not* a child of the original one, so waitpid() and friends may not be used on it. Example:: from pwkit import cli info = cli.fork_detached_process () if info.whoami == 'original': message = info.pipe.readline ().decode ('utf-8') if not len (message): cli.die ('forked process (PID %d) appears to have died', info.forkedpid) info.pipe.close () print ('forked process said:', message) else: info.pipe.write ('hello world'.encode ('utf-8')) info.pipe.close () As always, the *vital* thing to understand is that immediately after a call to this function, you have **two** nearly-identical but **entirely independent** programs that are now both running simultaneously. Until you execute some kind of ``if`` statement, the only difference between the two processes is the value of the ``info.whoami`` field and whether ``info.pipe`` is readable or writeable. This function uses :func:`os.fork` twice and also calls :func:`os.setsid` in between the two invocations, which creates new session and process groups for the forked subprocess. It does *not* perform other operations that you might want, such as changing the current directory, dropping privileges, closing file descriptors, and so on. For more discussion of best practices when it comes to “daemonizing” processes, see (stalled) `PEP 3143`_. .. _PEP 3143: https://www.python.org/dev/peps/pep-3143/ """ import os, struct from .. import Holder payload = struct.Struct ('L') info = Holder () readfd, writefd = os.pipe () pid1 = os.fork () if pid1 > 0: info.whoami = 'original' info.pipe = os.fdopen (readfd, 'rb') os.close (writefd) retcode = os.waitpid (pid1, 0)[1] if retcode: raise Exception ('child process exited with error code %d' % retcode) (info.forkedpid,) = payload.unpack (info.pipe.read (payload.size)) else: # We're the intermediate child process. Start new session and process # groups, detaching us from TTY signals and whatnot. os.setsid () pid2 = os.fork () if pid2 > 0: # We're the intermediate process; we're all done os._exit (0) # If we get here, we're the detached child process. info.whoami = 'forked' info.pipe = os.fdopen (writefd, 'wb') os.close (readfd) info.forkedpid = os.getpid () info.pipe.write (payload.pack (info.forkedpid)) return info
Fork this process, creating a subprocess detached from the current context. Returns a :class:`pwkit.Holder` instance with information about what happened. Its fields are: whoami A string, either "original" or "forked" depending on which process we are. pipe An open binary file descriptor. It is readable by the original process and writable by the forked one. This can be used to pass information from the forked process to the one that launched it. forkedpid The PID of the forked process. Note that this process is *not* a child of the original one, so waitpid() and friends may not be used on it. Example:: from pwkit import cli info = cli.fork_detached_process () if info.whoami == 'original': message = info.pipe.readline ().decode ('utf-8') if not len (message): cli.die ('forked process (PID %d) appears to have died', info.forkedpid) info.pipe.close () print ('forked process said:', message) else: info.pipe.write ('hello world'.encode ('utf-8')) info.pipe.close () As always, the *vital* thing to understand is that immediately after a call to this function, you have **two** nearly-identical but **entirely independent** programs that are now both running simultaneously. Until you execute some kind of ``if`` statement, the only difference between the two processes is the value of the ``info.whoami`` field and whether ``info.pipe`` is readable or writeable. This function uses :func:`os.fork` twice and also calls :func:`os.setsid` in between the two invocations, which creates new session and process groups for the forked subprocess. It does *not* perform other operations that you might want, such as changing the current directory, dropping privileges, closing file descriptors, and so on. For more discussion of best practices when it comes to “daemonizing” processes, see (stalled) `PEP 3143`_. .. _PEP 3143: https://www.python.org/dev/peps/pep-3143/
Below is the the instruction that describes the task: ### Input: Fork this process, creating a subprocess detached from the current context. Returns a :class:`pwkit.Holder` instance with information about what happened. Its fields are: whoami A string, either "original" or "forked" depending on which process we are. pipe An open binary file descriptor. It is readable by the original process and writable by the forked one. This can be used to pass information from the forked process to the one that launched it. forkedpid The PID of the forked process. Note that this process is *not* a child of the original one, so waitpid() and friends may not be used on it. Example:: from pwkit import cli info = cli.fork_detached_process () if info.whoami == 'original': message = info.pipe.readline ().decode ('utf-8') if not len (message): cli.die ('forked process (PID %d) appears to have died', info.forkedpid) info.pipe.close () print ('forked process said:', message) else: info.pipe.write ('hello world'.encode ('utf-8')) info.pipe.close () As always, the *vital* thing to understand is that immediately after a call to this function, you have **two** nearly-identical but **entirely independent** programs that are now both running simultaneously. Until you execute some kind of ``if`` statement, the only difference between the two processes is the value of the ``info.whoami`` field and whether ``info.pipe`` is readable or writeable. This function uses :func:`os.fork` twice and also calls :func:`os.setsid` in between the two invocations, which creates new session and process groups for the forked subprocess. It does *not* perform other operations that you might want, such as changing the current directory, dropping privileges, closing file descriptors, and so on. For more discussion of best practices when it comes to “daemonizing” processes, see (stalled) `PEP 3143`_. .. _PEP 3143: https://www.python.org/dev/peps/pep-3143/ ### Response: def fork_detached_process (): """Fork this process, creating a subprocess detached from the current context. Returns a :class:`pwkit.Holder` instance with information about what happened. Its fields are: whoami A string, either "original" or "forked" depending on which process we are. pipe An open binary file descriptor. It is readable by the original process and writable by the forked one. This can be used to pass information from the forked process to the one that launched it. forkedpid The PID of the forked process. Note that this process is *not* a child of the original one, so waitpid() and friends may not be used on it. Example:: from pwkit import cli info = cli.fork_detached_process () if info.whoami == 'original': message = info.pipe.readline ().decode ('utf-8') if not len (message): cli.die ('forked process (PID %d) appears to have died', info.forkedpid) info.pipe.close () print ('forked process said:', message) else: info.pipe.write ('hello world'.encode ('utf-8')) info.pipe.close () As always, the *vital* thing to understand is that immediately after a call to this function, you have **two** nearly-identical but **entirely independent** programs that are now both running simultaneously. Until you execute some kind of ``if`` statement, the only difference between the two processes is the value of the ``info.whoami`` field and whether ``info.pipe`` is readable or writeable. This function uses :func:`os.fork` twice and also calls :func:`os.setsid` in between the two invocations, which creates new session and process groups for the forked subprocess. It does *not* perform other operations that you might want, such as changing the current directory, dropping privileges, closing file descriptors, and so on. For more discussion of best practices when it comes to “daemonizing” processes, see (stalled) `PEP 3143`_. .. _PEP 3143: https://www.python.org/dev/peps/pep-3143/ """ import os, struct from .. import Holder payload = struct.Struct ('L') info = Holder () readfd, writefd = os.pipe () pid1 = os.fork () if pid1 > 0: info.whoami = 'original' info.pipe = os.fdopen (readfd, 'rb') os.close (writefd) retcode = os.waitpid (pid1, 0)[1] if retcode: raise Exception ('child process exited with error code %d' % retcode) (info.forkedpid,) = payload.unpack (info.pipe.read (payload.size)) else: # We're the intermediate child process. Start new session and process # groups, detaching us from TTY signals and whatnot. os.setsid () pid2 = os.fork () if pid2 > 0: # We're the intermediate process; we're all done os._exit (0) # If we get here, we're the detached child process. info.whoami = 'forked' info.pipe = os.fdopen (writefd, 'wb') os.close (readfd) info.forkedpid = os.getpid () info.pipe.write (payload.pack (info.forkedpid)) return info
def O2_sat(P_air, temp): """Calculate saturaed oxygen concentration in mg/L for 278 K < T < 318 K :param P_air: Air pressure with appropriate units :type P_air: float :param temp: Water temperature with appropriate units :type temp: float :return: Saturated oxygen concentration in mg/L :rtype: float :Examples: >>> from aguaclara.research.environmental_processes_analysis import O2_sat >>> from aguaclara.core.units import unit_registry as u >>> round(O2_sat(1*u.atm , 300*u.kelvin), 7) <Quantity(8.0931572, 'milligram / liter')> """ fraction_O2 = 0.21 P_O2 = P_air * fraction_O2 return ((P_O2.to(u.atm).magnitude) * u.mg/u.L*np.exp(1727 / temp.to(u.K).magnitude - 2.105))
Calculate saturaed oxygen concentration in mg/L for 278 K < T < 318 K :param P_air: Air pressure with appropriate units :type P_air: float :param temp: Water temperature with appropriate units :type temp: float :return: Saturated oxygen concentration in mg/L :rtype: float :Examples: >>> from aguaclara.research.environmental_processes_analysis import O2_sat >>> from aguaclara.core.units import unit_registry as u >>> round(O2_sat(1*u.atm , 300*u.kelvin), 7) <Quantity(8.0931572, 'milligram / liter')>
Below is the the instruction that describes the task: ### Input: Calculate saturaed oxygen concentration in mg/L for 278 K < T < 318 K :param P_air: Air pressure with appropriate units :type P_air: float :param temp: Water temperature with appropriate units :type temp: float :return: Saturated oxygen concentration in mg/L :rtype: float :Examples: >>> from aguaclara.research.environmental_processes_analysis import O2_sat >>> from aguaclara.core.units import unit_registry as u >>> round(O2_sat(1*u.atm , 300*u.kelvin), 7) <Quantity(8.0931572, 'milligram / liter')> ### Response: def O2_sat(P_air, temp): """Calculate saturaed oxygen concentration in mg/L for 278 K < T < 318 K :param P_air: Air pressure with appropriate units :type P_air: float :param temp: Water temperature with appropriate units :type temp: float :return: Saturated oxygen concentration in mg/L :rtype: float :Examples: >>> from aguaclara.research.environmental_processes_analysis import O2_sat >>> from aguaclara.core.units import unit_registry as u >>> round(O2_sat(1*u.atm , 300*u.kelvin), 7) <Quantity(8.0931572, 'milligram / liter')> """ fraction_O2 = 0.21 P_O2 = P_air * fraction_O2 return ((P_O2.to(u.atm).magnitude) * u.mg/u.L*np.exp(1727 / temp.to(u.K).magnitude - 2.105))
def crc16_nojit(s, crc=0): """CRC16 implementation acording to CCITT standards.""" for ch in bytearray(s): # bytearray's elements are integers in both python 2 and 3 crc = ((crc << 8) & 0xFFFF) ^ _crc16_tab[((crc >> 8) & 0xFF) ^ (ch & 0xFF)] crc &= 0xFFFF return crc
CRC16 implementation acording to CCITT standards.
Below is the the instruction that describes the task: ### Input: CRC16 implementation acording to CCITT standards. ### Response: def crc16_nojit(s, crc=0): """CRC16 implementation acording to CCITT standards.""" for ch in bytearray(s): # bytearray's elements are integers in both python 2 and 3 crc = ((crc << 8) & 0xFFFF) ^ _crc16_tab[((crc >> 8) & 0xFF) ^ (ch & 0xFF)] crc &= 0xFFFF return crc
def get_msgbuf(self): '''create a binary message buffer for a message''' values = [] for i in range(len(self.fmt.columns)): if i >= len(self.fmt.msg_mults): continue mul = self.fmt.msg_mults[i] name = self.fmt.columns[i] if name == 'Mode' and 'ModeNum' in self.fmt.columns: name = 'ModeNum' v = self.__getattr__(name) if mul is not None: v /= mul values.append(v) return struct.pack("BBB", 0xA3, 0x95, self.fmt.type) + struct.pack(self.fmt.msg_struct, *values)
create a binary message buffer for a message
Below is the the instruction that describes the task: ### Input: create a binary message buffer for a message ### Response: def get_msgbuf(self): '''create a binary message buffer for a message''' values = [] for i in range(len(self.fmt.columns)): if i >= len(self.fmt.msg_mults): continue mul = self.fmt.msg_mults[i] name = self.fmt.columns[i] if name == 'Mode' and 'ModeNum' in self.fmt.columns: name = 'ModeNum' v = self.__getattr__(name) if mul is not None: v /= mul values.append(v) return struct.pack("BBB", 0xA3, 0x95, self.fmt.type) + struct.pack(self.fmt.msg_struct, *values)
def connect(self, url, **options): """ Connect to url. url is websocket url scheme. ie. ws://host:port/resource You can customize using 'options'. If you set "header" list object, you can set your own custom header. >>> ws = WebSocket() >>> ws.connect("ws://echo.websocket.org/", ... header=["User-Agent: MyProgram", ... "x-custom: header"]) timeout: socket timeout time. This value is integer. if you set None for this value, it means "use default_timeout value" options: "header" -> custom http header list or dict. "cookie" -> cookie value. "origin" -> custom origin url. "suppress_origin" -> suppress outputting origin header. "host" -> custom host header string. "http_proxy_host" - http proxy host name. "http_proxy_port" - http proxy port. If not set, set to 80. "http_no_proxy" - host names, which doesn't use proxy. "http_proxy_auth" - http proxy auth information. tuple of username and password. default is None "redirect_limit" -> number of redirects to follow. "subprotocols" - array of available sub protocols. default is None. "socket" - pre-initialized stream socket. """ # FIXME: "subprotocols" are getting lost, not passed down # FIXME: "header", "cookie", "origin" and "host" too self.sock_opt.timeout = options.get('timeout', self.sock_opt.timeout) self.sock, addrs = connect(url, self.sock_opt, proxy_info(**options), options.pop('socket', None)) try: self.handshake_response = handshake(self.sock, *addrs, **options) for attempt in range(options.pop('redirect_limit', 3)): if self.handshake_response.status in SUPPORTED_REDIRECT_STATUSES: url = self.handshake_response.headers['location'] self.sock.close() self.sock, addrs = connect(url, self.sock_opt, proxy_info(**options), options.pop('socket', None)) self.handshake_response = handshake(self.sock, *addrs, **options) self.connected = True except: if self.sock: self.sock.close() self.sock = None raise
Connect to url. url is websocket url scheme. ie. ws://host:port/resource You can customize using 'options'. If you set "header" list object, you can set your own custom header. >>> ws = WebSocket() >>> ws.connect("ws://echo.websocket.org/", ... header=["User-Agent: MyProgram", ... "x-custom: header"]) timeout: socket timeout time. This value is integer. if you set None for this value, it means "use default_timeout value" options: "header" -> custom http header list or dict. "cookie" -> cookie value. "origin" -> custom origin url. "suppress_origin" -> suppress outputting origin header. "host" -> custom host header string. "http_proxy_host" - http proxy host name. "http_proxy_port" - http proxy port. If not set, set to 80. "http_no_proxy" - host names, which doesn't use proxy. "http_proxy_auth" - http proxy auth information. tuple of username and password. default is None "redirect_limit" -> number of redirects to follow. "subprotocols" - array of available sub protocols. default is None. "socket" - pre-initialized stream socket.
Below is the the instruction that describes the task: ### Input: Connect to url. url is websocket url scheme. ie. ws://host:port/resource You can customize using 'options'. If you set "header" list object, you can set your own custom header. >>> ws = WebSocket() >>> ws.connect("ws://echo.websocket.org/", ... header=["User-Agent: MyProgram", ... "x-custom: header"]) timeout: socket timeout time. This value is integer. if you set None for this value, it means "use default_timeout value" options: "header" -> custom http header list or dict. "cookie" -> cookie value. "origin" -> custom origin url. "suppress_origin" -> suppress outputting origin header. "host" -> custom host header string. "http_proxy_host" - http proxy host name. "http_proxy_port" - http proxy port. If not set, set to 80. "http_no_proxy" - host names, which doesn't use proxy. "http_proxy_auth" - http proxy auth information. tuple of username and password. default is None "redirect_limit" -> number of redirects to follow. "subprotocols" - array of available sub protocols. default is None. "socket" - pre-initialized stream socket. ### Response: def connect(self, url, **options): """ Connect to url. url is websocket url scheme. ie. ws://host:port/resource You can customize using 'options'. If you set "header" list object, you can set your own custom header. >>> ws = WebSocket() >>> ws.connect("ws://echo.websocket.org/", ... header=["User-Agent: MyProgram", ... "x-custom: header"]) timeout: socket timeout time. This value is integer. if you set None for this value, it means "use default_timeout value" options: "header" -> custom http header list or dict. "cookie" -> cookie value. "origin" -> custom origin url. "suppress_origin" -> suppress outputting origin header. "host" -> custom host header string. "http_proxy_host" - http proxy host name. "http_proxy_port" - http proxy port. If not set, set to 80. "http_no_proxy" - host names, which doesn't use proxy. "http_proxy_auth" - http proxy auth information. tuple of username and password. default is None "redirect_limit" -> number of redirects to follow. "subprotocols" - array of available sub protocols. default is None. "socket" - pre-initialized stream socket. """ # FIXME: "subprotocols" are getting lost, not passed down # FIXME: "header", "cookie", "origin" and "host" too self.sock_opt.timeout = options.get('timeout', self.sock_opt.timeout) self.sock, addrs = connect(url, self.sock_opt, proxy_info(**options), options.pop('socket', None)) try: self.handshake_response = handshake(self.sock, *addrs, **options) for attempt in range(options.pop('redirect_limit', 3)): if self.handshake_response.status in SUPPORTED_REDIRECT_STATUSES: url = self.handshake_response.headers['location'] self.sock.close() self.sock, addrs = connect(url, self.sock_opt, proxy_info(**options), options.pop('socket', None)) self.handshake_response = handshake(self.sock, *addrs, **options) self.connected = True except: if self.sock: self.sock.close() self.sock = None raise
def _config_options(self): """Apply options set in attributes to Treeview""" self._config_sortable(self._sortable) self._config_drag_cols(self._drag_cols)
Apply options set in attributes to Treeview
Below is the the instruction that describes the task: ### Input: Apply options set in attributes to Treeview ### Response: def _config_options(self): """Apply options set in attributes to Treeview""" self._config_sortable(self._sortable) self._config_drag_cols(self._drag_cols)
def url_request(target_url, output_file): """ Use urllib to download the requested file from the target URL. Use the click progress bar to print download progress :param target_url: URL from which the file is to be downloaded :param output_file: Name and path of local copy of file """ # Create the request request = urllib.request.urlopen(target_url) # Open the destination file to write with open(output_file, 'wb') as targets: # Calculate the total file size - will be used by the progress bar total_length = int(request.headers.get('content-length')) # Create a click progress bar using the total length calculated above with click.progressbar(length=total_length, label='Downloading files') as bar: while True: # Break up the download into chunks of 4096 bytes data = request.read(4096) # Break the loop when the download finishes/errors if not data: break # Write the chunk to file targets.write(data) # Update the progress bar bar.update(len(data))
Use urllib to download the requested file from the target URL. Use the click progress bar to print download progress :param target_url: URL from which the file is to be downloaded :param output_file: Name and path of local copy of file
Below is the the instruction that describes the task: ### Input: Use urllib to download the requested file from the target URL. Use the click progress bar to print download progress :param target_url: URL from which the file is to be downloaded :param output_file: Name and path of local copy of file ### Response: def url_request(target_url, output_file): """ Use urllib to download the requested file from the target URL. Use the click progress bar to print download progress :param target_url: URL from which the file is to be downloaded :param output_file: Name and path of local copy of file """ # Create the request request = urllib.request.urlopen(target_url) # Open the destination file to write with open(output_file, 'wb') as targets: # Calculate the total file size - will be used by the progress bar total_length = int(request.headers.get('content-length')) # Create a click progress bar using the total length calculated above with click.progressbar(length=total_length, label='Downloading files') as bar: while True: # Break up the download into chunks of 4096 bytes data = request.read(4096) # Break the loop when the download finishes/errors if not data: break # Write the chunk to file targets.write(data) # Update the progress bar bar.update(len(data))
def get_object(self, *args, **kwargs): """ Should memoize the object to avoid multiple query if get_object is used many times in the view """ self.category_instance = get_object_or_404(Category, slug=self.kwargs['category_slug']) return get_object_or_404(Post, thread__id=self.kwargs['thread_id'], thread__category=self.category_instance, pk=self.kwargs['post_id'])
Should memoize the object to avoid multiple query if get_object is used many times in the view
Below is the the instruction that describes the task: ### Input: Should memoize the object to avoid multiple query if get_object is used many times in the view ### Response: def get_object(self, *args, **kwargs): """ Should memoize the object to avoid multiple query if get_object is used many times in the view """ self.category_instance = get_object_or_404(Category, slug=self.kwargs['category_slug']) return get_object_or_404(Post, thread__id=self.kwargs['thread_id'], thread__category=self.category_instance, pk=self.kwargs['post_id'])
def check_match(self, **kwargs): """ Check if the package name matches against a project that is blacklisted in the configuration. Parameters ========== name: str The normalized package name of the package/project to check against the blacklist. Returns ======= bool: True if it matches, False otherwise. """ if not self.whitelist_package_names: return False name = kwargs.get("name", None) if not name: return False if name in self.whitelist_package_names: logger.info(f"Package {name!r} is whitelisted") return False return True
Check if the package name matches against a project that is blacklisted in the configuration. Parameters ========== name: str The normalized package name of the package/project to check against the blacklist. Returns ======= bool: True if it matches, False otherwise.
Below is the the instruction that describes the task: ### Input: Check if the package name matches against a project that is blacklisted in the configuration. Parameters ========== name: str The normalized package name of the package/project to check against the blacklist. Returns ======= bool: True if it matches, False otherwise. ### Response: def check_match(self, **kwargs): """ Check if the package name matches against a project that is blacklisted in the configuration. Parameters ========== name: str The normalized package name of the package/project to check against the blacklist. Returns ======= bool: True if it matches, False otherwise. """ if not self.whitelist_package_names: return False name = kwargs.get("name", None) if not name: return False if name in self.whitelist_package_names: logger.info(f"Package {name!r} is whitelisted") return False return True
def clear_commentarea_cache(comment): """ Clean the plugin output cache of a rendered plugin. """ parent = comment.content_object for instance in CommentsAreaItem.objects.parent(parent): instance.clear_cache()
Clean the plugin output cache of a rendered plugin.
Below is the the instruction that describes the task: ### Input: Clean the plugin output cache of a rendered plugin. ### Response: def clear_commentarea_cache(comment): """ Clean the plugin output cache of a rendered plugin. """ parent = comment.content_object for instance in CommentsAreaItem.objects.parent(parent): instance.clear_cache()
def get_aad_token(endpoint, no_verify): #pylint: disable-msg=too-many-locals """Get AAD token""" from azure.servicefabric.service_fabric_client_ap_is import ( ServiceFabricClientAPIs ) from sfctl.auth import ClientCertAuthentication from sfctl.config import set_aad_metadata auth = ClientCertAuthentication(None, None, no_verify) client = ServiceFabricClientAPIs(auth, base_url=endpoint) aad_metadata = client.get_aad_metadata() if aad_metadata.type != "aad": raise CLIError("Not AAD cluster") aad_resource = aad_metadata.metadata tenant_id = aad_resource.tenant authority_uri = aad_resource.login + '/' + tenant_id context = adal.AuthenticationContext(authority_uri, api_version=None) cluster_id = aad_resource.cluster client_id = aad_resource.client set_aad_metadata(authority_uri, cluster_id, client_id) code = context.acquire_user_code(cluster_id, client_id) print(code['message']) token = context.acquire_token_with_device_code( cluster_id, code, client_id) print("Succeed!") return token, context.cache
Get AAD token
Below is the the instruction that describes the task: ### Input: Get AAD token ### Response: def get_aad_token(endpoint, no_verify): #pylint: disable-msg=too-many-locals """Get AAD token""" from azure.servicefabric.service_fabric_client_ap_is import ( ServiceFabricClientAPIs ) from sfctl.auth import ClientCertAuthentication from sfctl.config import set_aad_metadata auth = ClientCertAuthentication(None, None, no_verify) client = ServiceFabricClientAPIs(auth, base_url=endpoint) aad_metadata = client.get_aad_metadata() if aad_metadata.type != "aad": raise CLIError("Not AAD cluster") aad_resource = aad_metadata.metadata tenant_id = aad_resource.tenant authority_uri = aad_resource.login + '/' + tenant_id context = adal.AuthenticationContext(authority_uri, api_version=None) cluster_id = aad_resource.cluster client_id = aad_resource.client set_aad_metadata(authority_uri, cluster_id, client_id) code = context.acquire_user_code(cluster_id, client_id) print(code['message']) token = context.acquire_token_with_device_code( cluster_id, code, client_id) print("Succeed!") return token, context.cache
def send_select_and_operate_command(self, command, index, callback=asiodnp3.PrintingCommandCallback.Get(), config=opendnp3.TaskConfig().Default()): """ Select and operate a single command :param command: command to operate :param index: index of the command :param callback: callback that will be invoked upon completion or failure :param config: optional configuration that controls normal callbacks and allows the user to be specified for SA """ self.master.SelectAndOperate(command, index, callback, config)
Select and operate a single command :param command: command to operate :param index: index of the command :param callback: callback that will be invoked upon completion or failure :param config: optional configuration that controls normal callbacks and allows the user to be specified for SA
Below is the the instruction that describes the task: ### Input: Select and operate a single command :param command: command to operate :param index: index of the command :param callback: callback that will be invoked upon completion or failure :param config: optional configuration that controls normal callbacks and allows the user to be specified for SA ### Response: def send_select_and_operate_command(self, command, index, callback=asiodnp3.PrintingCommandCallback.Get(), config=opendnp3.TaskConfig().Default()): """ Select and operate a single command :param command: command to operate :param index: index of the command :param callback: callback that will be invoked upon completion or failure :param config: optional configuration that controls normal callbacks and allows the user to be specified for SA """ self.master.SelectAndOperate(command, index, callback, config)
def progressbar(index, total, percentage_step=10, logger='print', log_level=logging.INFO, reprint=True, time=True, length=20, fmt_string=None, reset=False): """Plots a progress bar to the given `logger` for large for loops. To be used inside a for-loop at the end of the loop: .. code-block:: python for irun in range(42): my_costly_job() # Your expensive function progressbar(index=irun, total=42, reprint=True) # shows a growing progressbar There is no initialisation of the progressbar necessary before the for-loop. The progressbar will be reset automatically if used in another for-loop. :param index: Current index of for-loop :param total: Total size of for-loop :param percentage_step: Steps with which the bar should be plotted :param logger: Logger to write to - with level INFO. If string 'print' is given, the print statement is used. Use ``None`` if you don't want to print or log the progressbar statement. :param log_level: Log level with which to log. :param reprint: If no new line should be plotted but carriage return (works only for printing) :param time: If the remaining time should be estimated and displayed :param length: Length of the bar in `=` signs. :param fmt_string: A string which contains exactly one `%s` in order to incorporate the progressbar. If such a string is given, ``fmt_string % progressbar`` is printed/logged. :param reset: If the progressbar should be restarted. If progressbar is called with a lower index than the one before, the progressbar is automatically restarted. :return: The progressbar string or `None` if the string has not been updated. """ return _progressbar(index=index, total=total, percentage_step=percentage_step, logger=logger, log_level=log_level, reprint=reprint, time=time, length=length, fmt_string=fmt_string, reset=reset)
Plots a progress bar to the given `logger` for large for loops. To be used inside a for-loop at the end of the loop: .. code-block:: python for irun in range(42): my_costly_job() # Your expensive function progressbar(index=irun, total=42, reprint=True) # shows a growing progressbar There is no initialisation of the progressbar necessary before the for-loop. The progressbar will be reset automatically if used in another for-loop. :param index: Current index of for-loop :param total: Total size of for-loop :param percentage_step: Steps with which the bar should be plotted :param logger: Logger to write to - with level INFO. If string 'print' is given, the print statement is used. Use ``None`` if you don't want to print or log the progressbar statement. :param log_level: Log level with which to log. :param reprint: If no new line should be plotted but carriage return (works only for printing) :param time: If the remaining time should be estimated and displayed :param length: Length of the bar in `=` signs. :param fmt_string: A string which contains exactly one `%s` in order to incorporate the progressbar. If such a string is given, ``fmt_string % progressbar`` is printed/logged. :param reset: If the progressbar should be restarted. If progressbar is called with a lower index than the one before, the progressbar is automatically restarted. :return: The progressbar string or `None` if the string has not been updated.
Below is the the instruction that describes the task: ### Input: Plots a progress bar to the given `logger` for large for loops. To be used inside a for-loop at the end of the loop: .. code-block:: python for irun in range(42): my_costly_job() # Your expensive function progressbar(index=irun, total=42, reprint=True) # shows a growing progressbar There is no initialisation of the progressbar necessary before the for-loop. The progressbar will be reset automatically if used in another for-loop. :param index: Current index of for-loop :param total: Total size of for-loop :param percentage_step: Steps with which the bar should be plotted :param logger: Logger to write to - with level INFO. If string 'print' is given, the print statement is used. Use ``None`` if you don't want to print or log the progressbar statement. :param log_level: Log level with which to log. :param reprint: If no new line should be plotted but carriage return (works only for printing) :param time: If the remaining time should be estimated and displayed :param length: Length of the bar in `=` signs. :param fmt_string: A string which contains exactly one `%s` in order to incorporate the progressbar. If such a string is given, ``fmt_string % progressbar`` is printed/logged. :param reset: If the progressbar should be restarted. If progressbar is called with a lower index than the one before, the progressbar is automatically restarted. :return: The progressbar string or `None` if the string has not been updated. ### Response: def progressbar(index, total, percentage_step=10, logger='print', log_level=logging.INFO, reprint=True, time=True, length=20, fmt_string=None, reset=False): """Plots a progress bar to the given `logger` for large for loops. To be used inside a for-loop at the end of the loop: .. code-block:: python for irun in range(42): my_costly_job() # Your expensive function progressbar(index=irun, total=42, reprint=True) # shows a growing progressbar There is no initialisation of the progressbar necessary before the for-loop. The progressbar will be reset automatically if used in another for-loop. :param index: Current index of for-loop :param total: Total size of for-loop :param percentage_step: Steps with which the bar should be plotted :param logger: Logger to write to - with level INFO. If string 'print' is given, the print statement is used. Use ``None`` if you don't want to print or log the progressbar statement. :param log_level: Log level with which to log. :param reprint: If no new line should be plotted but carriage return (works only for printing) :param time: If the remaining time should be estimated and displayed :param length: Length of the bar in `=` signs. :param fmt_string: A string which contains exactly one `%s` in order to incorporate the progressbar. If such a string is given, ``fmt_string % progressbar`` is printed/logged. :param reset: If the progressbar should be restarted. If progressbar is called with a lower index than the one before, the progressbar is automatically restarted. :return: The progressbar string or `None` if the string has not been updated. """ return _progressbar(index=index, total=total, percentage_step=percentage_step, logger=logger, log_level=log_level, reprint=reprint, time=time, length=length, fmt_string=fmt_string, reset=reset)
def named_function(name): """Gets a fully named module-global object.""" name_parts = name.split('.') module = named_object('.'.join(name_parts[:-1])) func = getattr(module, name_parts[-1]) if hasattr(func, 'original_func'): func = func.original_func return func
Gets a fully named module-global object.
Below is the the instruction that describes the task: ### Input: Gets a fully named module-global object. ### Response: def named_function(name): """Gets a fully named module-global object.""" name_parts = name.split('.') module = named_object('.'.join(name_parts[:-1])) func = getattr(module, name_parts[-1]) if hasattr(func, 'original_func'): func = func.original_func return func
def _generate_timezone(date, local_tz): """ input : date : date type local_tz : bool offset generated from _calculate_offset offset in seconds offset = 0 -> +00:00 offset = 1800 -> +00:30 offset = -3600 -> -01:00 """ offset = _calculate_offset(date, local_tz) hour = abs(offset) // 3600 minute = abs(offset) % 3600 // 60 if offset < 0: return '%c%02d:%02d' % ("-", hour, minute) else: return '%c%02d:%02d' % ("+", hour, minute)
input : date : date type local_tz : bool offset generated from _calculate_offset offset in seconds offset = 0 -> +00:00 offset = 1800 -> +00:30 offset = -3600 -> -01:00
Below is the the instruction that describes the task: ### Input: input : date : date type local_tz : bool offset generated from _calculate_offset offset in seconds offset = 0 -> +00:00 offset = 1800 -> +00:30 offset = -3600 -> -01:00 ### Response: def _generate_timezone(date, local_tz): """ input : date : date type local_tz : bool offset generated from _calculate_offset offset in seconds offset = 0 -> +00:00 offset = 1800 -> +00:30 offset = -3600 -> -01:00 """ offset = _calculate_offset(date, local_tz) hour = abs(offset) // 3600 minute = abs(offset) % 3600 // 60 if offset < 0: return '%c%02d:%02d' % ("-", hour, minute) else: return '%c%02d:%02d' % ("+", hour, minute)
def play_move_msg(self, move_msg): """Another play move function for move message. Parameters ---------- move_msg : string a valid message should be in: "[move type]: [X], [Y]" """ move_type, move_x, move_y = self.parse_move(move_msg) self.play_move(move_type, move_x, move_y)
Another play move function for move message. Parameters ---------- move_msg : string a valid message should be in: "[move type]: [X], [Y]"
Below is the the instruction that describes the task: ### Input: Another play move function for move message. Parameters ---------- move_msg : string a valid message should be in: "[move type]: [X], [Y]" ### Response: def play_move_msg(self, move_msg): """Another play move function for move message. Parameters ---------- move_msg : string a valid message should be in: "[move type]: [X], [Y]" """ move_type, move_x, move_y = self.parse_move(move_msg) self.play_move(move_type, move_x, move_y)
def __summary(self): """A plaintext summary of the Action, useful for debugging.""" text = "Time: %s\n" % self.when text += "Comitter: %s\n" % self.editor inst = self.timemachine.presently if self.action_type == "dl": text += "Deleted %s\n" % inst._object_type_text() elif self.action_type == "cr": text += "Created %s\n" % inst._object_type_text() else: text += "Modified %s\n" % inst._object_type_text() text += self._details(nohtml=True) return text
A plaintext summary of the Action, useful for debugging.
Below is the the instruction that describes the task: ### Input: A plaintext summary of the Action, useful for debugging. ### Response: def __summary(self): """A plaintext summary of the Action, useful for debugging.""" text = "Time: %s\n" % self.when text += "Comitter: %s\n" % self.editor inst = self.timemachine.presently if self.action_type == "dl": text += "Deleted %s\n" % inst._object_type_text() elif self.action_type == "cr": text += "Created %s\n" % inst._object_type_text() else: text += "Modified %s\n" % inst._object_type_text() text += self._details(nohtml=True) return text
def make_signature(params, hmac_key): """ Calculate a HMAC-SHA-1 (using hmac_key) of all the params except "h=". Returns base64 encoded signature as string. """ # produce a list of "key=value" for all entries in params except `h' pairs = [x + "=" + ''.join(params[x]) for x in sorted(params.keys()) if x != "h"] sha = hmac.new(hmac_key, '&'.join(pairs), hashlib.sha1) return base64.b64encode(sha.digest())
Calculate a HMAC-SHA-1 (using hmac_key) of all the params except "h=". Returns base64 encoded signature as string.
Below is the the instruction that describes the task: ### Input: Calculate a HMAC-SHA-1 (using hmac_key) of all the params except "h=". Returns base64 encoded signature as string. ### Response: def make_signature(params, hmac_key): """ Calculate a HMAC-SHA-1 (using hmac_key) of all the params except "h=". Returns base64 encoded signature as string. """ # produce a list of "key=value" for all entries in params except `h' pairs = [x + "=" + ''.join(params[x]) for x in sorted(params.keys()) if x != "h"] sha = hmac.new(hmac_key, '&'.join(pairs), hashlib.sha1) return base64.b64encode(sha.digest())
def validate_port_or_colon_separated_port_range(port_range): """Accepts a port number or a single-colon separated range.""" if port_range.count(':') > 1: raise ValidationError(_("One colon allowed in port range")) ports = port_range.split(':') for port in ports: validate_port_range(port)
Accepts a port number or a single-colon separated range.
Below is the the instruction that describes the task: ### Input: Accepts a port number or a single-colon separated range. ### Response: def validate_port_or_colon_separated_port_range(port_range): """Accepts a port number or a single-colon separated range.""" if port_range.count(':') > 1: raise ValidationError(_("One colon allowed in port range")) ports = port_range.split(':') for port in ports: validate_port_range(port)
def present(name, **kwargs): ''' Ensures that the host group exists, eventually creates new host group. .. versionadded:: 2016.3.0 :param name: name of the host group :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) .. code-block:: yaml create_testing_host_group: zabbix_hostgroup.present: - name: 'My hostgroup name' ''' connection_args = {} if '_connection_user' in kwargs: connection_args['_connection_user'] = kwargs['_connection_user'] if '_connection_password' in kwargs: connection_args['_connection_password'] = kwargs['_connection_password'] if '_connection_url' in kwargs: connection_args['_connection_url'] = kwargs['_connection_url'] ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} # Comment and change messages comment_hostgroup_created = 'Host group {0} created.'.format(name) comment_hostgroup_notcreated = 'Unable to create host group: {0}. '.format(name) comment_hostgroup_exists = 'Host group {0} already exists.'.format(name) changes_hostgroup_created = {name: {'old': 'Host group {0} does not exist.'.format(name), 'new': 'Host group {0} created.'.format(name), } } hostgroup_exists = __salt__['zabbix.hostgroup_exists'](name, **connection_args) # Dry run, test=true mode if __opts__['test']: if hostgroup_exists: ret['result'] = True ret['comment'] = comment_hostgroup_exists else: ret['result'] = None ret['comment'] = comment_hostgroup_created ret['changes'] = changes_hostgroup_created return ret if hostgroup_exists: ret['result'] = True ret['comment'] = comment_hostgroup_exists else: hostgroup_create = __salt__['zabbix.hostgroup_create'](name, **connection_args) if 'error' not in hostgroup_create: ret['result'] = True ret['comment'] = comment_hostgroup_created ret['changes'] = changes_hostgroup_created else: ret['result'] = False ret['comment'] = comment_hostgroup_notcreated + six.text_type(hostgroup_create['error']) return ret
Ensures that the host group exists, eventually creates new host group. .. versionadded:: 2016.3.0 :param name: name of the host group :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) .. code-block:: yaml create_testing_host_group: zabbix_hostgroup.present: - name: 'My hostgroup name'
Below is the the instruction that describes the task: ### Input: Ensures that the host group exists, eventually creates new host group. .. versionadded:: 2016.3.0 :param name: name of the host group :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) .. code-block:: yaml create_testing_host_group: zabbix_hostgroup.present: - name: 'My hostgroup name' ### Response: def present(name, **kwargs): ''' Ensures that the host group exists, eventually creates new host group. .. versionadded:: 2016.3.0 :param name: name of the host group :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) .. code-block:: yaml create_testing_host_group: zabbix_hostgroup.present: - name: 'My hostgroup name' ''' connection_args = {} if '_connection_user' in kwargs: connection_args['_connection_user'] = kwargs['_connection_user'] if '_connection_password' in kwargs: connection_args['_connection_password'] = kwargs['_connection_password'] if '_connection_url' in kwargs: connection_args['_connection_url'] = kwargs['_connection_url'] ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} # Comment and change messages comment_hostgroup_created = 'Host group {0} created.'.format(name) comment_hostgroup_notcreated = 'Unable to create host group: {0}. '.format(name) comment_hostgroup_exists = 'Host group {0} already exists.'.format(name) changes_hostgroup_created = {name: {'old': 'Host group {0} does not exist.'.format(name), 'new': 'Host group {0} created.'.format(name), } } hostgroup_exists = __salt__['zabbix.hostgroup_exists'](name, **connection_args) # Dry run, test=true mode if __opts__['test']: if hostgroup_exists: ret['result'] = True ret['comment'] = comment_hostgroup_exists else: ret['result'] = None ret['comment'] = comment_hostgroup_created ret['changes'] = changes_hostgroup_created return ret if hostgroup_exists: ret['result'] = True ret['comment'] = comment_hostgroup_exists else: hostgroup_create = __salt__['zabbix.hostgroup_create'](name, **connection_args) if 'error' not in hostgroup_create: ret['result'] = True ret['comment'] = comment_hostgroup_created ret['changes'] = changes_hostgroup_created else: ret['result'] = False ret['comment'] = comment_hostgroup_notcreated + six.text_type(hostgroup_create['error']) return ret
def addTab(self, tab, *args): """ Adds a tab to the tab widget, this function set the parent_tab_widget attribute on the tab instance. """ tab.parent_tab_widget = self super(BaseTabWidget, self).addTab(tab, *args)
Adds a tab to the tab widget, this function set the parent_tab_widget attribute on the tab instance.
Below is the the instruction that describes the task: ### Input: Adds a tab to the tab widget, this function set the parent_tab_widget attribute on the tab instance. ### Response: def addTab(self, tab, *args): """ Adds a tab to the tab widget, this function set the parent_tab_widget attribute on the tab instance. """ tab.parent_tab_widget = self super(BaseTabWidget, self).addTab(tab, *args)
def __is_outside_of_builddir(project, path_to_check): """Check if a project lies outside of its expected directory.""" bdir = project.builddir cprefix = os.path.commonprefix([path_to_check, bdir]) return cprefix != bdir
Check if a project lies outside of its expected directory.
Below is the the instruction that describes the task: ### Input: Check if a project lies outside of its expected directory. ### Response: def __is_outside_of_builddir(project, path_to_check): """Check if a project lies outside of its expected directory.""" bdir = project.builddir cprefix = os.path.commonprefix([path_to_check, bdir]) return cprefix != bdir
def draw_tree(ax, tx, rmargin=.3, treecolor="k", leafcolor="k", supportcolor="k", outgroup=None, reroot=True, gffdir=None, sizes=None, trunc_name=None, SH=None, scutoff=0, barcodefile=None, leafcolorfile=None, leaffont=12): """ main function for drawing phylogenetic tree """ t = Tree(tx) if reroot: if outgroup: R = t.get_common_ancestor(*outgroup) else: # Calculate the midpoint node R = t.get_midpoint_outgroup() if R != t: t.set_outgroup(R) farthest, max_dist = t.get_farthest_leaf() margin = .05 xstart = margin ystart = 1 - margin canvas = 1 - rmargin - 2 * margin tip = .005 # scale the tree scale = canvas / max_dist num_leaves = len(t.get_leaf_names()) yinterval = canvas / (num_leaves + 1) # get exons structures, if any structures = {} if gffdir: gffiles = glob("{0}/*.gff*".format(gffdir)) setups, ratio = get_setups(gffiles, canvas=rmargin / 2, noUTR=True) structures = dict((a, (b, c)) for a, b, c in setups) if sizes: sizes = Sizes(sizes).mapping if barcodefile: barcodemap = DictFile(barcodefile, delimiter="\t") if leafcolorfile: leafcolors = DictFile(leafcolorfile, delimiter="\t") coords = {} i = 0 for n in t.traverse("postorder"): dist = n.get_distance(t) xx = xstart + scale * dist if n.is_leaf(): yy = ystart - i * yinterval i += 1 if trunc_name: name = truncate_name(n.name, rule=trunc_name) else: name = n.name if barcodefile: name = decode_name(name, barcodemap) sname = name.replace("_", "-") try: lc = leafcolors[n.name] except Exception: lc = leafcolor else: # if color is given as "R,G,B" if "," in lc: lc = map(float, lc.split(",")) ax.text(xx + tip, yy, sname, va="center", fontstyle="italic", size=leaffont, color=lc) gname = n.name.split("_")[0] if gname in structures: mrnabed, cdsbeds = structures[gname] ExonGlyph(ax, 1 - rmargin / 2, yy, mrnabed, cdsbeds, align="right", ratio=ratio) if sizes and gname in sizes: size = sizes[gname] size = size / 3 - 1 # base pair converted to amino acid size = "{0}aa".format(size) ax.text(1 - rmargin / 2 + tip, yy, size, size=leaffont) else: children = [coords[x] for x in n.get_children()] children_x, children_y = zip(*children) min_y, max_y = min(children_y), max(children_y) # plot the vertical bar ax.plot((xx, xx), (min_y, max_y), "-", color=treecolor) # plot the horizontal bar for cx, cy in children: ax.plot((xx, cx), (cy, cy), "-", color=treecolor) yy = sum(children_y) * 1. / len(children_y) support = n.support if support > 1: support = support / 100. if not n.is_root(): if support > scutoff / 100.: ax.text(xx, yy+.005, "{0:d}".format(int(abs(support * 100))), ha="right", size=leaffont, color=supportcolor) coords[n] = (xx, yy) # scale bar br = .1 x1 = xstart + .1 x2 = x1 + br * scale yy = ystart - i * yinterval ax.plot([x1, x1], [yy - tip, yy + tip], "-", color=treecolor) ax.plot([x2, x2], [yy - tip, yy + tip], "-", color=treecolor) ax.plot([x1, x2], [yy, yy], "-", color=treecolor) ax.text((x1 + x2) / 2, yy - tip, "{0:g}".format(br), va="top", ha="center", size=leaffont, color=treecolor) if SH is not None: xs = x1 ys = (margin + yy) / 2. ax.text(xs, ys, "SH test against ref tree: {0}" .format(SH), ha="left", size=leaffont, color="g") normalize_axes(ax)
main function for drawing phylogenetic tree
Below is the the instruction that describes the task: ### Input: main function for drawing phylogenetic tree ### Response: def draw_tree(ax, tx, rmargin=.3, treecolor="k", leafcolor="k", supportcolor="k", outgroup=None, reroot=True, gffdir=None, sizes=None, trunc_name=None, SH=None, scutoff=0, barcodefile=None, leafcolorfile=None, leaffont=12): """ main function for drawing phylogenetic tree """ t = Tree(tx) if reroot: if outgroup: R = t.get_common_ancestor(*outgroup) else: # Calculate the midpoint node R = t.get_midpoint_outgroup() if R != t: t.set_outgroup(R) farthest, max_dist = t.get_farthest_leaf() margin = .05 xstart = margin ystart = 1 - margin canvas = 1 - rmargin - 2 * margin tip = .005 # scale the tree scale = canvas / max_dist num_leaves = len(t.get_leaf_names()) yinterval = canvas / (num_leaves + 1) # get exons structures, if any structures = {} if gffdir: gffiles = glob("{0}/*.gff*".format(gffdir)) setups, ratio = get_setups(gffiles, canvas=rmargin / 2, noUTR=True) structures = dict((a, (b, c)) for a, b, c in setups) if sizes: sizes = Sizes(sizes).mapping if barcodefile: barcodemap = DictFile(barcodefile, delimiter="\t") if leafcolorfile: leafcolors = DictFile(leafcolorfile, delimiter="\t") coords = {} i = 0 for n in t.traverse("postorder"): dist = n.get_distance(t) xx = xstart + scale * dist if n.is_leaf(): yy = ystart - i * yinterval i += 1 if trunc_name: name = truncate_name(n.name, rule=trunc_name) else: name = n.name if barcodefile: name = decode_name(name, barcodemap) sname = name.replace("_", "-") try: lc = leafcolors[n.name] except Exception: lc = leafcolor else: # if color is given as "R,G,B" if "," in lc: lc = map(float, lc.split(",")) ax.text(xx + tip, yy, sname, va="center", fontstyle="italic", size=leaffont, color=lc) gname = n.name.split("_")[0] if gname in structures: mrnabed, cdsbeds = structures[gname] ExonGlyph(ax, 1 - rmargin / 2, yy, mrnabed, cdsbeds, align="right", ratio=ratio) if sizes and gname in sizes: size = sizes[gname] size = size / 3 - 1 # base pair converted to amino acid size = "{0}aa".format(size) ax.text(1 - rmargin / 2 + tip, yy, size, size=leaffont) else: children = [coords[x] for x in n.get_children()] children_x, children_y = zip(*children) min_y, max_y = min(children_y), max(children_y) # plot the vertical bar ax.plot((xx, xx), (min_y, max_y), "-", color=treecolor) # plot the horizontal bar for cx, cy in children: ax.plot((xx, cx), (cy, cy), "-", color=treecolor) yy = sum(children_y) * 1. / len(children_y) support = n.support if support > 1: support = support / 100. if not n.is_root(): if support > scutoff / 100.: ax.text(xx, yy+.005, "{0:d}".format(int(abs(support * 100))), ha="right", size=leaffont, color=supportcolor) coords[n] = (xx, yy) # scale bar br = .1 x1 = xstart + .1 x2 = x1 + br * scale yy = ystart - i * yinterval ax.plot([x1, x1], [yy - tip, yy + tip], "-", color=treecolor) ax.plot([x2, x2], [yy - tip, yy + tip], "-", color=treecolor) ax.plot([x1, x2], [yy, yy], "-", color=treecolor) ax.text((x1 + x2) / 2, yy - tip, "{0:g}".format(br), va="top", ha="center", size=leaffont, color=treecolor) if SH is not None: xs = x1 ys = (margin + yy) / 2. ax.text(xs, ys, "SH test against ref tree: {0}" .format(SH), ha="left", size=leaffont, color="g") normalize_axes(ax)
def classify(X, domain=None): """ Text classification Parameters ========== X: {unicode, str} raw sentence domain: {None, 'bank'} domain of text * None: general domain * bank: bank domain Returns ======= tokens: list categories of sentence Examples -------- >>> # -*- coding: utf-8 -*- >>> from underthesea import classify >>> sentence = "HLV ngoại đòi gần tỷ mỗi tháng dẫn dắt tuyển Việt Nam" >>> classify(sentence) ['The thao'] >>> sentence = "Tôi rất thích cách phục vụ của nhân viên BIDV" >>> classify(sentence, domain='bank') ('CUSTOMER SUPPORT',) """ if X == "": return None if domain == 'bank': return bank.classify(X) # domain is general clf = FastTextPredictor.Instance() return clf.predict(X)
Text classification Parameters ========== X: {unicode, str} raw sentence domain: {None, 'bank'} domain of text * None: general domain * bank: bank domain Returns ======= tokens: list categories of sentence Examples -------- >>> # -*- coding: utf-8 -*- >>> from underthesea import classify >>> sentence = "HLV ngoại đòi gần tỷ mỗi tháng dẫn dắt tuyển Việt Nam" >>> classify(sentence) ['The thao'] >>> sentence = "Tôi rất thích cách phục vụ của nhân viên BIDV" >>> classify(sentence, domain='bank') ('CUSTOMER SUPPORT',)
Below is the the instruction that describes the task: ### Input: Text classification Parameters ========== X: {unicode, str} raw sentence domain: {None, 'bank'} domain of text * None: general domain * bank: bank domain Returns ======= tokens: list categories of sentence Examples -------- >>> # -*- coding: utf-8 -*- >>> from underthesea import classify >>> sentence = "HLV ngoại đòi gần tỷ mỗi tháng dẫn dắt tuyển Việt Nam" >>> classify(sentence) ['The thao'] >>> sentence = "Tôi rất thích cách phục vụ của nhân viên BIDV" >>> classify(sentence, domain='bank') ('CUSTOMER SUPPORT',) ### Response: def classify(X, domain=None): """ Text classification Parameters ========== X: {unicode, str} raw sentence domain: {None, 'bank'} domain of text * None: general domain * bank: bank domain Returns ======= tokens: list categories of sentence Examples -------- >>> # -*- coding: utf-8 -*- >>> from underthesea import classify >>> sentence = "HLV ngoại đòi gần tỷ mỗi tháng dẫn dắt tuyển Việt Nam" >>> classify(sentence) ['The thao'] >>> sentence = "Tôi rất thích cách phục vụ của nhân viên BIDV" >>> classify(sentence, domain='bank') ('CUSTOMER SUPPORT',) """ if X == "": return None if domain == 'bank': return bank.classify(X) # domain is general clf = FastTextPredictor.Instance() return clf.predict(X)
def fromstring(cls, dis_string): """Create a DisRSTTree instance from a string containing a *.dis parse.""" temp = tempfile.NamedTemporaryFile(delete=False) temp.write(dis_string) temp.close() dis_tree = cls(dis_filepath=temp.name) os.unlink(temp.name) return dis_tree
Create a DisRSTTree instance from a string containing a *.dis parse.
Below is the the instruction that describes the task: ### Input: Create a DisRSTTree instance from a string containing a *.dis parse. ### Response: def fromstring(cls, dis_string): """Create a DisRSTTree instance from a string containing a *.dis parse.""" temp = tempfile.NamedTemporaryFile(delete=False) temp.write(dis_string) temp.close() dis_tree = cls(dis_filepath=temp.name) os.unlink(temp.name) return dis_tree
def soup(self): ''' Returns HTML as a BeautifulSoup element. ''' components_soup = Tag(name=self.tagname, builder=BUILDER) components_soup.attrs = self.attributes for c in flatten(self.components): if hasattr(c, 'soup'): components_soup.append(c.soup()) elif type(c) in (str, ): # components_soup.append(BeautifulSoup(str(c))) components_soup.append(str(c)) # else: # Component should not be integrated # pass return components_soup
Returns HTML as a BeautifulSoup element.
Below is the the instruction that describes the task: ### Input: Returns HTML as a BeautifulSoup element. ### Response: def soup(self): ''' Returns HTML as a BeautifulSoup element. ''' components_soup = Tag(name=self.tagname, builder=BUILDER) components_soup.attrs = self.attributes for c in flatten(self.components): if hasattr(c, 'soup'): components_soup.append(c.soup()) elif type(c) in (str, ): # components_soup.append(BeautifulSoup(str(c))) components_soup.append(str(c)) # else: # Component should not be integrated # pass return components_soup
def perform_command(self): """ Perform command and return the appropriate exit code. :rtype: int """ self.log(u"This function should be overloaded in derived classes") self.log([u"Invoked with %s", self.actual_arguments]) return self.NO_ERROR_EXIT_CODE
Perform command and return the appropriate exit code. :rtype: int
Below is the the instruction that describes the task: ### Input: Perform command and return the appropriate exit code. :rtype: int ### Response: def perform_command(self): """ Perform command and return the appropriate exit code. :rtype: int """ self.log(u"This function should be overloaded in derived classes") self.log([u"Invoked with %s", self.actual_arguments]) return self.NO_ERROR_EXIT_CODE
def has_next(self): """ Checks for one more item than last on this page. """ try: next_item = self.paginator.object_list[self.paginator.per_page] except IndexError: return False return True
Checks for one more item than last on this page.
Below is the the instruction that describes the task: ### Input: Checks for one more item than last on this page. ### Response: def has_next(self): """ Checks for one more item than last on this page. """ try: next_item = self.paginator.object_list[self.paginator.per_page] except IndexError: return False return True
def multi_load(inputs, ac_parser=None, ac_template=False, ac_context=None, **options): r""" Load multiple config files. .. note:: :func:`load` is a preferable alternative and this API should be used only if there is a need to emphasize given inputs are multiple ones. The first argument 'inputs' may be a list of a file paths or a glob pattern specifying them or a pathlib.Path object represents file[s] or a namedtuple 'anyconfig.globals.IOInfo' object represents some inputs to load some data from. About glob patterns, for example, is, if a.yml, b.yml and c.yml are in the dir /etc/foo/conf.d/, the followings give same results:: multi_load(["/etc/foo/conf.d/a.yml", "/etc/foo/conf.d/b.yml", "/etc/foo/conf.d/c.yml", ]) multi_load("/etc/foo/conf.d/*.yml") :param inputs: A list of file path or a glob pattern such as r'/a/b/\*.json'to list of files, file or file-like object or pathlib.Path object represents the file or a namedtuple 'anyconfig.globals.IOInfo' object represents some inputs to load some data from :param ac_parser: Forced parser type or parser object :param ac_template: Assume configuration file may be a template file and try to compile it AAR if True :param ac_context: Mapping object presents context to instantiate template :param options: Optional keyword arguments: - ac_dict, ac_ordered, ac_schema and ac_query are the options common in :func:`single_load`, :func:`multi_load`, :func:`load`: and :func:`loads`. See the descriptions of them in :func:`single_load`. - Options specific to this function and :func:`load`: - ac_merge (merge): Specify strategy of how to merge results loaded from multiple configuration files. See the doc of :mod:`anyconfig.dicts` for more details of strategies. The default is anyconfig.dicts.MS_DICTS. - ac_marker (marker): Globbing marker to detect paths patterns. - Common backend options: - ignore_missing: Ignore and just return empty result if given file 'path' does not exist. - Backend specific options such as {"indent": 2} for JSON backend :return: Mapping object or any query result might be primitive objects :raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError """ marker = options.setdefault("ac_marker", options.get("marker", '*')) schema = _maybe_schema(ac_template=ac_template, ac_context=ac_context, **options) options["ac_schema"] = None # Avoid to load schema more than twice. paths = anyconfig.utils.expand_paths(inputs, marker=marker) if anyconfig.utils.are_same_file_types(paths): ac_parser = find(paths[0], forced_type=ac_parser) cnf = ac_context for path in paths: opts = options.copy() cups = _single_load(path, ac_parser=ac_parser, ac_template=ac_template, ac_context=cnf, **opts) if cups: if cnf is None: cnf = cups else: merge(cnf, cups, **options) if cnf is None: return anyconfig.dicts.convert_to({}, **options) cnf = _try_validate(cnf, schema, **options) return anyconfig.query.query(cnf, **options)
r""" Load multiple config files. .. note:: :func:`load` is a preferable alternative and this API should be used only if there is a need to emphasize given inputs are multiple ones. The first argument 'inputs' may be a list of a file paths or a glob pattern specifying them or a pathlib.Path object represents file[s] or a namedtuple 'anyconfig.globals.IOInfo' object represents some inputs to load some data from. About glob patterns, for example, is, if a.yml, b.yml and c.yml are in the dir /etc/foo/conf.d/, the followings give same results:: multi_load(["/etc/foo/conf.d/a.yml", "/etc/foo/conf.d/b.yml", "/etc/foo/conf.d/c.yml", ]) multi_load("/etc/foo/conf.d/*.yml") :param inputs: A list of file path or a glob pattern such as r'/a/b/\*.json'to list of files, file or file-like object or pathlib.Path object represents the file or a namedtuple 'anyconfig.globals.IOInfo' object represents some inputs to load some data from :param ac_parser: Forced parser type or parser object :param ac_template: Assume configuration file may be a template file and try to compile it AAR if True :param ac_context: Mapping object presents context to instantiate template :param options: Optional keyword arguments: - ac_dict, ac_ordered, ac_schema and ac_query are the options common in :func:`single_load`, :func:`multi_load`, :func:`load`: and :func:`loads`. See the descriptions of them in :func:`single_load`. - Options specific to this function and :func:`load`: - ac_merge (merge): Specify strategy of how to merge results loaded from multiple configuration files. See the doc of :mod:`anyconfig.dicts` for more details of strategies. The default is anyconfig.dicts.MS_DICTS. - ac_marker (marker): Globbing marker to detect paths patterns. - Common backend options: - ignore_missing: Ignore and just return empty result if given file 'path' does not exist. - Backend specific options such as {"indent": 2} for JSON backend :return: Mapping object or any query result might be primitive objects :raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError
Below is the the instruction that describes the task: ### Input: r""" Load multiple config files. .. note:: :func:`load` is a preferable alternative and this API should be used only if there is a need to emphasize given inputs are multiple ones. The first argument 'inputs' may be a list of a file paths or a glob pattern specifying them or a pathlib.Path object represents file[s] or a namedtuple 'anyconfig.globals.IOInfo' object represents some inputs to load some data from. About glob patterns, for example, is, if a.yml, b.yml and c.yml are in the dir /etc/foo/conf.d/, the followings give same results:: multi_load(["/etc/foo/conf.d/a.yml", "/etc/foo/conf.d/b.yml", "/etc/foo/conf.d/c.yml", ]) multi_load("/etc/foo/conf.d/*.yml") :param inputs: A list of file path or a glob pattern such as r'/a/b/\*.json'to list of files, file or file-like object or pathlib.Path object represents the file or a namedtuple 'anyconfig.globals.IOInfo' object represents some inputs to load some data from :param ac_parser: Forced parser type or parser object :param ac_template: Assume configuration file may be a template file and try to compile it AAR if True :param ac_context: Mapping object presents context to instantiate template :param options: Optional keyword arguments: - ac_dict, ac_ordered, ac_schema and ac_query are the options common in :func:`single_load`, :func:`multi_load`, :func:`load`: and :func:`loads`. See the descriptions of them in :func:`single_load`. - Options specific to this function and :func:`load`: - ac_merge (merge): Specify strategy of how to merge results loaded from multiple configuration files. See the doc of :mod:`anyconfig.dicts` for more details of strategies. The default is anyconfig.dicts.MS_DICTS. - ac_marker (marker): Globbing marker to detect paths patterns. - Common backend options: - ignore_missing: Ignore and just return empty result if given file 'path' does not exist. - Backend specific options such as {"indent": 2} for JSON backend :return: Mapping object or any query result might be primitive objects :raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError ### Response: def multi_load(inputs, ac_parser=None, ac_template=False, ac_context=None, **options): r""" Load multiple config files. .. note:: :func:`load` is a preferable alternative and this API should be used only if there is a need to emphasize given inputs are multiple ones. The first argument 'inputs' may be a list of a file paths or a glob pattern specifying them or a pathlib.Path object represents file[s] or a namedtuple 'anyconfig.globals.IOInfo' object represents some inputs to load some data from. About glob patterns, for example, is, if a.yml, b.yml and c.yml are in the dir /etc/foo/conf.d/, the followings give same results:: multi_load(["/etc/foo/conf.d/a.yml", "/etc/foo/conf.d/b.yml", "/etc/foo/conf.d/c.yml", ]) multi_load("/etc/foo/conf.d/*.yml") :param inputs: A list of file path or a glob pattern such as r'/a/b/\*.json'to list of files, file or file-like object or pathlib.Path object represents the file or a namedtuple 'anyconfig.globals.IOInfo' object represents some inputs to load some data from :param ac_parser: Forced parser type or parser object :param ac_template: Assume configuration file may be a template file and try to compile it AAR if True :param ac_context: Mapping object presents context to instantiate template :param options: Optional keyword arguments: - ac_dict, ac_ordered, ac_schema and ac_query are the options common in :func:`single_load`, :func:`multi_load`, :func:`load`: and :func:`loads`. See the descriptions of them in :func:`single_load`. - Options specific to this function and :func:`load`: - ac_merge (merge): Specify strategy of how to merge results loaded from multiple configuration files. See the doc of :mod:`anyconfig.dicts` for more details of strategies. The default is anyconfig.dicts.MS_DICTS. - ac_marker (marker): Globbing marker to detect paths patterns. - Common backend options: - ignore_missing: Ignore and just return empty result if given file 'path' does not exist. - Backend specific options such as {"indent": 2} for JSON backend :return: Mapping object or any query result might be primitive objects :raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError """ marker = options.setdefault("ac_marker", options.get("marker", '*')) schema = _maybe_schema(ac_template=ac_template, ac_context=ac_context, **options) options["ac_schema"] = None # Avoid to load schema more than twice. paths = anyconfig.utils.expand_paths(inputs, marker=marker) if anyconfig.utils.are_same_file_types(paths): ac_parser = find(paths[0], forced_type=ac_parser) cnf = ac_context for path in paths: opts = options.copy() cups = _single_load(path, ac_parser=ac_parser, ac_template=ac_template, ac_context=cnf, **opts) if cups: if cnf is None: cnf = cups else: merge(cnf, cups, **options) if cnf is None: return anyconfig.dicts.convert_to({}, **options) cnf = _try_validate(cnf, schema, **options) return anyconfig.query.query(cnf, **options)
def create_db_in_shard(db_name, shard, client=None): """ In a sharded cluster, create a database in a particular shard. """ client = client or pymongo.MongoClient() # flush the router config to ensure it's not stale res = client.admin.command('flushRouterConfig') if not res.get('ok'): raise RuntimeError("unable to flush router config") if shard not in get_ids(client.config.shards): raise ValueError(f"Unknown shard {shard}") if db_name in get_ids(client.config.databases): raise ValueError("database already exists") # MongoDB doesn't have a 'create database' command, so insert an # item into a collection and then drop the collection. client[db_name].foo.insert({'foo': 1}) client[db_name].foo.drop() if client[db_name].collection_names(): raise ValueError("database has collections") primary = client['config'].databases.find_one(db_name)['primary'] if primary != shard: res = client.admin.command( 'movePrimary', value=db_name, to=shard) if not res.get('ok'): raise RuntimeError(str(res)) return ( f"Successfully created {db_name} in {shard} via {client.nodes} " f"from {hostname}")
In a sharded cluster, create a database in a particular shard.
Below is the the instruction that describes the task: ### Input: In a sharded cluster, create a database in a particular shard. ### Response: def create_db_in_shard(db_name, shard, client=None): """ In a sharded cluster, create a database in a particular shard. """ client = client or pymongo.MongoClient() # flush the router config to ensure it's not stale res = client.admin.command('flushRouterConfig') if not res.get('ok'): raise RuntimeError("unable to flush router config") if shard not in get_ids(client.config.shards): raise ValueError(f"Unknown shard {shard}") if db_name in get_ids(client.config.databases): raise ValueError("database already exists") # MongoDB doesn't have a 'create database' command, so insert an # item into a collection and then drop the collection. client[db_name].foo.insert({'foo': 1}) client[db_name].foo.drop() if client[db_name].collection_names(): raise ValueError("database has collections") primary = client['config'].databases.find_one(db_name)['primary'] if primary != shard: res = client.admin.command( 'movePrimary', value=db_name, to=shard) if not res.get('ok'): raise RuntimeError(str(res)) return ( f"Successfully created {db_name} in {shard} via {client.nodes} " f"from {hostname}")
def batch_call(self, calls): """Experimental, use at your own peril.""" req = self.protocol.create_batch_request() for call_args in calls: req.append(self.protocol.create_request(*call_args)) return self._send_and_handle_reply(req)
Experimental, use at your own peril.
Below is the the instruction that describes the task: ### Input: Experimental, use at your own peril. ### Response: def batch_call(self, calls): """Experimental, use at your own peril.""" req = self.protocol.create_batch_request() for call_args in calls: req.append(self.protocol.create_request(*call_args)) return self._send_and_handle_reply(req)
def authorize(self, cidr_ip=None, ec2_group=None): """ Add a new rule to this DBSecurity group. You need to pass in either a CIDR block to authorize or and EC2 SecurityGroup. @type cidr_ip: string @param cidr_ip: A valid CIDR IP range to authorize @type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup>` @rtype: bool @return: True if successful. """ if isinstance(ec2_group, SecurityGroup): group_name = ec2_group.name group_owner_id = ec2_group.owner_id else: group_name = None group_owner_id = None return self.connection.authorize_dbsecurity_group(self.name, cidr_ip, group_name, group_owner_id)
Add a new rule to this DBSecurity group. You need to pass in either a CIDR block to authorize or and EC2 SecurityGroup. @type cidr_ip: string @param cidr_ip: A valid CIDR IP range to authorize @type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup>` @rtype: bool @return: True if successful.
Below is the the instruction that describes the task: ### Input: Add a new rule to this DBSecurity group. You need to pass in either a CIDR block to authorize or and EC2 SecurityGroup. @type cidr_ip: string @param cidr_ip: A valid CIDR IP range to authorize @type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup>` @rtype: bool @return: True if successful. ### Response: def authorize(self, cidr_ip=None, ec2_group=None): """ Add a new rule to this DBSecurity group. You need to pass in either a CIDR block to authorize or and EC2 SecurityGroup. @type cidr_ip: string @param cidr_ip: A valid CIDR IP range to authorize @type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup>` @rtype: bool @return: True if successful. """ if isinstance(ec2_group, SecurityGroup): group_name = ec2_group.name group_owner_id = ec2_group.owner_id else: group_name = None group_owner_id = None return self.connection.authorize_dbsecurity_group(self.name, cidr_ip, group_name, group_owner_id)
def get_input(prompt, default=None, choices=None, option_value=None): """ If option_value is not None, then return it. Otherwise get the result from input. """ if option_value is not None: return option_value choices = choices or [] while 1: r = input(prompt+' ').strip() if not r and default is not None: return default if choices: if r not in choices: r = None else: break else: break return r
If option_value is not None, then return it. Otherwise get the result from input.
Below is the the instruction that describes the task: ### Input: If option_value is not None, then return it. Otherwise get the result from input. ### Response: def get_input(prompt, default=None, choices=None, option_value=None): """ If option_value is not None, then return it. Otherwise get the result from input. """ if option_value is not None: return option_value choices = choices or [] while 1: r = input(prompt+' ').strip() if not r and default is not None: return default if choices: if r not in choices: r = None else: break else: break return r
def _check_quantiles(self, val): """ Validate the quantiles passed in. Returns the np array if valid. """ if len(val) != 3 or not is_monotonic(val) or not np.all(val < 1): raise YellowbrickValueError( "quantiles must be a sequence of three " "monotonically increasing values less than 1" ) return np.asarray(val)
Validate the quantiles passed in. Returns the np array if valid.
Below is the the instruction that describes the task: ### Input: Validate the quantiles passed in. Returns the np array if valid. ### Response: def _check_quantiles(self, val): """ Validate the quantiles passed in. Returns the np array if valid. """ if len(val) != 3 or not is_monotonic(val) or not np.all(val < 1): raise YellowbrickValueError( "quantiles must be a sequence of three " "monotonically increasing values less than 1" ) return np.asarray(val)
def tokenize(self, token_list): """Produces the list of integer indices corresponding to a token list.""" return [ self._vocab_dict.get(token, self._vocab_dict[self.UNK]) for token in token_list ]
Produces the list of integer indices corresponding to a token list.
Below is the the instruction that describes the task: ### Input: Produces the list of integer indices corresponding to a token list. ### Response: def tokenize(self, token_list): """Produces the list of integer indices corresponding to a token list.""" return [ self._vocab_dict.get(token, self._vocab_dict[self.UNK]) for token in token_list ]
def _normalize_bbox(self, bbox, size): """Returns this bbox normalized to match the ratio of the given size.""" bbox_ratio = float(bbox.width) / float(bbox.height) size_ratio = float(size[0]) / float(size[1]) if round(size_ratio, 4) == round(bbox_ratio, 4): return bbox else: if bbox.height * size_ratio >= bbox.width: diff = bbox.height*size_ratio - bbox.width return BBox((bbox.xmin - diff/2, bbox.ymin, bbox.xmax + diff/2, bbox.ymax), bbox.projection) else: diff = abs(bbox.width/size_ratio - bbox.height) return BBox((bbox.xmin, bbox.ymin - diff/2, bbox.xmax, bbox.ymax + diff/2), bbox.projection)
Returns this bbox normalized to match the ratio of the given size.
Below is the the instruction that describes the task: ### Input: Returns this bbox normalized to match the ratio of the given size. ### Response: def _normalize_bbox(self, bbox, size): """Returns this bbox normalized to match the ratio of the given size.""" bbox_ratio = float(bbox.width) / float(bbox.height) size_ratio = float(size[0]) / float(size[1]) if round(size_ratio, 4) == round(bbox_ratio, 4): return bbox else: if bbox.height * size_ratio >= bbox.width: diff = bbox.height*size_ratio - bbox.width return BBox((bbox.xmin - diff/2, bbox.ymin, bbox.xmax + diff/2, bbox.ymax), bbox.projection) else: diff = abs(bbox.width/size_ratio - bbox.height) return BBox((bbox.xmin, bbox.ymin - diff/2, bbox.xmax, bbox.ymax + diff/2), bbox.projection)
def _maybe_run_matchers(self, text, run_matchers): """ OverlayedText should be smart enough to not run twice the same matchers but this is an extra handle of control over that. """ if run_matchers is True or \ (run_matchers is not False and text not in self._overlayed_already): text.overlay(self.matchers) self._overlayed_already.append(text)
OverlayedText should be smart enough to not run twice the same matchers but this is an extra handle of control over that.
Below is the the instruction that describes the task: ### Input: OverlayedText should be smart enough to not run twice the same matchers but this is an extra handle of control over that. ### Response: def _maybe_run_matchers(self, text, run_matchers): """ OverlayedText should be smart enough to not run twice the same matchers but this is an extra handle of control over that. """ if run_matchers is True or \ (run_matchers is not False and text not in self._overlayed_already): text.overlay(self.matchers) self._overlayed_already.append(text)
def get_transaction(self, transaction_id, **params): """https://developers.coinbase.com/api/v2#show-a-transaction""" return self.api_client.get_transaction(self.id, transaction_id, **params)
https://developers.coinbase.com/api/v2#show-a-transaction
Below is the the instruction that describes the task: ### Input: https://developers.coinbase.com/api/v2#show-a-transaction ### Response: def get_transaction(self, transaction_id, **params): """https://developers.coinbase.com/api/v2#show-a-transaction""" return self.api_client.get_transaction(self.id, transaction_id, **params)
def stream_info(self, stream_id): ''' get stream info ''' response, status_code = self.__pod__.Streams.get_v2_room_id_info( sessionToken=self.__session__, id=stream_id ).result() self.logger.debug('%s: %s' % (status_code, response)) return status_code, response
get stream info
Below is the the instruction that describes the task: ### Input: get stream info ### Response: def stream_info(self, stream_id): ''' get stream info ''' response, status_code = self.__pod__.Streams.get_v2_room_id_info( sessionToken=self.__session__, id=stream_id ).result() self.logger.debug('%s: %s' % (status_code, response)) return status_code, response
def UpdateFlow(self, client_id, flow_id, flow_obj=db.Database.unchanged, flow_state=db.Database.unchanged, client_crash_info=db.Database.unchanged, pending_termination=db.Database.unchanged, processing_on=db.Database.unchanged, processing_since=db.Database.unchanged, processing_deadline=db.Database.unchanged, cursor=None): """Updates flow objects in the database.""" updates = [] args = [] if flow_obj != db.Database.unchanged: updates.append("flow=%s") args.append(flow_obj.SerializeToString()) updates.append("flow_state=%s") args.append(int(flow_obj.flow_state)) updates.append("user_cpu_time_used_micros=%s") args.append( db_utils.SecondsToMicros(flow_obj.cpu_time_used.user_cpu_time)) updates.append("system_cpu_time_used_micros=%s") args.append( db_utils.SecondsToMicros(flow_obj.cpu_time_used.system_cpu_time)) updates.append("network_bytes_sent=%s") args.append(flow_obj.network_bytes_sent) updates.append("num_replies_sent=%s") args.append(flow_obj.num_replies_sent) if flow_state != db.Database.unchanged: updates.append("flow_state=%s") args.append(int(flow_state)) if client_crash_info != db.Database.unchanged: updates.append("client_crash_info=%s") args.append(client_crash_info.SerializeToString()) if pending_termination != db.Database.unchanged: updates.append("pending_termination=%s") args.append(pending_termination.SerializeToString()) if processing_on != db.Database.unchanged: updates.append("processing_on=%s") args.append(processing_on) if processing_since != db.Database.unchanged: updates.append("processing_since=FROM_UNIXTIME(%s)") args.append(mysql_utils.RDFDatetimeToTimestamp(processing_since)) if processing_deadline != db.Database.unchanged: updates.append("processing_deadline=FROM_UNIXTIME(%s)") args.append(mysql_utils.RDFDatetimeToTimestamp(processing_deadline)) if not updates: return query = "UPDATE flows SET last_update=NOW(6), " query += ", ".join(updates) query += " WHERE client_id=%s AND flow_id=%s" args.append(db_utils.ClientIDToInt(client_id)) args.append(db_utils.FlowIDToInt(flow_id)) updated = cursor.execute(query, args) if updated == 0: raise db.UnknownFlowError(client_id, flow_id)
Updates flow objects in the database.
Below is the the instruction that describes the task: ### Input: Updates flow objects in the database. ### Response: def UpdateFlow(self, client_id, flow_id, flow_obj=db.Database.unchanged, flow_state=db.Database.unchanged, client_crash_info=db.Database.unchanged, pending_termination=db.Database.unchanged, processing_on=db.Database.unchanged, processing_since=db.Database.unchanged, processing_deadline=db.Database.unchanged, cursor=None): """Updates flow objects in the database.""" updates = [] args = [] if flow_obj != db.Database.unchanged: updates.append("flow=%s") args.append(flow_obj.SerializeToString()) updates.append("flow_state=%s") args.append(int(flow_obj.flow_state)) updates.append("user_cpu_time_used_micros=%s") args.append( db_utils.SecondsToMicros(flow_obj.cpu_time_used.user_cpu_time)) updates.append("system_cpu_time_used_micros=%s") args.append( db_utils.SecondsToMicros(flow_obj.cpu_time_used.system_cpu_time)) updates.append("network_bytes_sent=%s") args.append(flow_obj.network_bytes_sent) updates.append("num_replies_sent=%s") args.append(flow_obj.num_replies_sent) if flow_state != db.Database.unchanged: updates.append("flow_state=%s") args.append(int(flow_state)) if client_crash_info != db.Database.unchanged: updates.append("client_crash_info=%s") args.append(client_crash_info.SerializeToString()) if pending_termination != db.Database.unchanged: updates.append("pending_termination=%s") args.append(pending_termination.SerializeToString()) if processing_on != db.Database.unchanged: updates.append("processing_on=%s") args.append(processing_on) if processing_since != db.Database.unchanged: updates.append("processing_since=FROM_UNIXTIME(%s)") args.append(mysql_utils.RDFDatetimeToTimestamp(processing_since)) if processing_deadline != db.Database.unchanged: updates.append("processing_deadline=FROM_UNIXTIME(%s)") args.append(mysql_utils.RDFDatetimeToTimestamp(processing_deadline)) if not updates: return query = "UPDATE flows SET last_update=NOW(6), " query += ", ".join(updates) query += " WHERE client_id=%s AND flow_id=%s" args.append(db_utils.ClientIDToInt(client_id)) args.append(db_utils.FlowIDToInt(flow_id)) updated = cursor.execute(query, args) if updated == 0: raise db.UnknownFlowError(client_id, flow_id)
def _check_items_limit(self): """ Raise ItemsLimitReached if the writer reached the configured items limit. """ if self.items_limit and self.items_limit == self.get_metadata('items_count'): raise ItemsLimitReached('Finishing job after items_limit reached:' ' {} items written.'.format(self.get_metadata('items_count')))
Raise ItemsLimitReached if the writer reached the configured items limit.
Below is the the instruction that describes the task: ### Input: Raise ItemsLimitReached if the writer reached the configured items limit. ### Response: def _check_items_limit(self): """ Raise ItemsLimitReached if the writer reached the configured items limit. """ if self.items_limit and self.items_limit == self.get_metadata('items_count'): raise ItemsLimitReached('Finishing job after items_limit reached:' ' {} items written.'.format(self.get_metadata('items_count')))
def has_role(user, *roles, **kwargs): """ Judge is the user belongs to the role, and if does, then return the role object if not then return False. kwargs will be passed to role_func. """ Role = get_model('role') if isinstance(user, (unicode, str)): User = get_model('user') user = User.get(User.c.username==user) for role in roles: if isinstance(role, (str, unicode)): role = Role.get(Role.c.name==role) if not role: continue name = role.name func = __role_funcs__.get(name, None) if func: if isinstance(func, (unicode, str)): func = import_attr(func) assert callable(func) para = kwargs.copy() para['user'] = user flag = call_func(func, para) if flag: return role flag = role.users.has(user) if flag: return role flag = role.usergroups_has_user(user) if flag: return role return False
Judge is the user belongs to the role, and if does, then return the role object if not then return False. kwargs will be passed to role_func.
Below is the the instruction that describes the task: ### Input: Judge is the user belongs to the role, and if does, then return the role object if not then return False. kwargs will be passed to role_func. ### Response: def has_role(user, *roles, **kwargs): """ Judge is the user belongs to the role, and if does, then return the role object if not then return False. kwargs will be passed to role_func. """ Role = get_model('role') if isinstance(user, (unicode, str)): User = get_model('user') user = User.get(User.c.username==user) for role in roles: if isinstance(role, (str, unicode)): role = Role.get(Role.c.name==role) if not role: continue name = role.name func = __role_funcs__.get(name, None) if func: if isinstance(func, (unicode, str)): func = import_attr(func) assert callable(func) para = kwargs.copy() para['user'] = user flag = call_func(func, para) if flag: return role flag = role.users.has(user) if flag: return role flag = role.usergroups_has_user(user) if flag: return role return False
def get_loss_maps(dstore, kind): """ :param dstore: a DataStore instance :param kind: 'rlzs' or 'stats' """ oq = dstore['oqparam'] name = 'loss_maps-%s' % kind if name in dstore: # event_based risk return _to_loss_maps(dstore[name].value, oq.loss_maps_dt()) name = 'loss_curves-%s' % kind if name in dstore: # classical_risk # the loss maps are built on the fly from the loss curves loss_curves = dstore[name] loss_maps = scientific.broadcast( scientific.loss_maps, loss_curves, oq.conditional_loss_poes) return loss_maps raise KeyError('loss_maps/loss_curves missing in %s' % dstore)
:param dstore: a DataStore instance :param kind: 'rlzs' or 'stats'
Below is the the instruction that describes the task: ### Input: :param dstore: a DataStore instance :param kind: 'rlzs' or 'stats' ### Response: def get_loss_maps(dstore, kind): """ :param dstore: a DataStore instance :param kind: 'rlzs' or 'stats' """ oq = dstore['oqparam'] name = 'loss_maps-%s' % kind if name in dstore: # event_based risk return _to_loss_maps(dstore[name].value, oq.loss_maps_dt()) name = 'loss_curves-%s' % kind if name in dstore: # classical_risk # the loss maps are built on the fly from the loss curves loss_curves = dstore[name] loss_maps = scientific.broadcast( scientific.loss_maps, loss_curves, oq.conditional_loss_poes) return loss_maps raise KeyError('loss_maps/loss_curves missing in %s' % dstore)
def clear(self): """ This is a convenience method to clear a batch statement for reuse. *Note:* it should not be used concurrently with uncompleted execution futures executing the same ``BatchStatement``. """ del self._statements_and_parameters[:] self.keyspace = None self.routing_key = None if self.custom_payload: self.custom_payload.clear()
This is a convenience method to clear a batch statement for reuse. *Note:* it should not be used concurrently with uncompleted execution futures executing the same ``BatchStatement``.
Below is the the instruction that describes the task: ### Input: This is a convenience method to clear a batch statement for reuse. *Note:* it should not be used concurrently with uncompleted execution futures executing the same ``BatchStatement``. ### Response: def clear(self): """ This is a convenience method to clear a batch statement for reuse. *Note:* it should not be used concurrently with uncompleted execution futures executing the same ``BatchStatement``. """ del self._statements_and_parameters[:] self.keyspace = None self.routing_key = None if self.custom_payload: self.custom_payload.clear()
def enable_hostgroup_host_notifications(self, hostgroup): """Enable host notifications for a hostgroup Format of the line that triggers function call:: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name> :param hostgroup: hostgroup to enable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None """ for host_id in hostgroup.get_hosts(): if host_id in self.daemon.hosts: self.enable_host_notifications(self.daemon.hosts[host_id])
Enable host notifications for a hostgroup Format of the line that triggers function call:: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name> :param hostgroup: hostgroup to enable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None
Below is the the instruction that describes the task: ### Input: Enable host notifications for a hostgroup Format of the line that triggers function call:: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name> :param hostgroup: hostgroup to enable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None ### Response: def enable_hostgroup_host_notifications(self, hostgroup): """Enable host notifications for a hostgroup Format of the line that triggers function call:: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name> :param hostgroup: hostgroup to enable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None """ for host_id in hostgroup.get_hosts(): if host_id in self.daemon.hosts: self.enable_host_notifications(self.daemon.hosts[host_id])
def get_collections(self): """Helper function for determining what the clusters/chains/other collections are.""" if not self.quiet: print print "Finding " + self.current_collection_type + "s..." self.compute_collections() if not self.quiet: print self.current_similarity_measure, self.current_collection_type, "information:" table_contents = [("Collection","Indices","Size")] for (i, j, k) in zip(self.collection_indices,self.collection_sizes,self.collection_list): table_contents.append(([unit.text for unit in k], i, j)) print_table(table_contents)
Helper function for determining what the clusters/chains/other collections are.
Below is the the instruction that describes the task: ### Input: Helper function for determining what the clusters/chains/other collections are. ### Response: def get_collections(self): """Helper function for determining what the clusters/chains/other collections are.""" if not self.quiet: print print "Finding " + self.current_collection_type + "s..." self.compute_collections() if not self.quiet: print self.current_similarity_measure, self.current_collection_type, "information:" table_contents = [("Collection","Indices","Size")] for (i, j, k) in zip(self.collection_indices,self.collection_sizes,self.collection_list): table_contents.append(([unit.text for unit in k], i, j)) print_table(table_contents)
def potential_cloud_pixels(self): """Determine potential cloud pixels (PCPs) Combine basic spectral testsr to get a premliminary cloud mask First pass, section 3.1.1 in Zhu and Woodcock 2012 Equation 6 (Zhu and Woodcock, 2012) Parameters ---------- ndvi: ndarray ndsi: ndarray blue: ndarray green: ndarray red: ndarray nir: ndarray swir1: ndarray swir2: ndarray cirrus: ndarray tirs1: ndarray Output ------ ndarray: potential cloud mask, boolean """ eq1 = self.basic_test() eq2 = self.whiteness_test() eq3 = self.hot_test() eq4 = self.nirswir_test() if self.sat == 'LC8': cir = self.cirrus_test() return (eq1 & eq2 & eq3 & eq4) | cir else: return eq1 & eq2 & eq3 & eq4
Determine potential cloud pixels (PCPs) Combine basic spectral testsr to get a premliminary cloud mask First pass, section 3.1.1 in Zhu and Woodcock 2012 Equation 6 (Zhu and Woodcock, 2012) Parameters ---------- ndvi: ndarray ndsi: ndarray blue: ndarray green: ndarray red: ndarray nir: ndarray swir1: ndarray swir2: ndarray cirrus: ndarray tirs1: ndarray Output ------ ndarray: potential cloud mask, boolean
Below is the the instruction that describes the task: ### Input: Determine potential cloud pixels (PCPs) Combine basic spectral testsr to get a premliminary cloud mask First pass, section 3.1.1 in Zhu and Woodcock 2012 Equation 6 (Zhu and Woodcock, 2012) Parameters ---------- ndvi: ndarray ndsi: ndarray blue: ndarray green: ndarray red: ndarray nir: ndarray swir1: ndarray swir2: ndarray cirrus: ndarray tirs1: ndarray Output ------ ndarray: potential cloud mask, boolean ### Response: def potential_cloud_pixels(self): """Determine potential cloud pixels (PCPs) Combine basic spectral testsr to get a premliminary cloud mask First pass, section 3.1.1 in Zhu and Woodcock 2012 Equation 6 (Zhu and Woodcock, 2012) Parameters ---------- ndvi: ndarray ndsi: ndarray blue: ndarray green: ndarray red: ndarray nir: ndarray swir1: ndarray swir2: ndarray cirrus: ndarray tirs1: ndarray Output ------ ndarray: potential cloud mask, boolean """ eq1 = self.basic_test() eq2 = self.whiteness_test() eq3 = self.hot_test() eq4 = self.nirswir_test() if self.sat == 'LC8': cir = self.cirrus_test() return (eq1 & eq2 & eq3 & eq4) | cir else: return eq1 & eq2 & eq3 & eq4
def transfer_file(self): """SCP transfer file.""" if self.direction == "put": self.put_file() elif self.direction == "get": self.get_file()
SCP transfer file.
Below is the the instruction that describes the task: ### Input: SCP transfer file. ### Response: def transfer_file(self): """SCP transfer file.""" if self.direction == "put": self.put_file() elif self.direction == "get": self.get_file()
def cols_rename(df,col_names, new_col_names): """ Rename a set of columns in a DataFrame Parameters: df - DataFrame DataFrame to operate on col_names - list of strings names of columns to change new_col_names - list of strings new names for old columns (order should be same as col_names) """ assert len(col_names) == len(new_col_names) for old_name,new_name in zip(col_names,new_col_names): col_rename(df,old_name,new_name)
Rename a set of columns in a DataFrame Parameters: df - DataFrame DataFrame to operate on col_names - list of strings names of columns to change new_col_names - list of strings new names for old columns (order should be same as col_names)
Below is the the instruction that describes the task: ### Input: Rename a set of columns in a DataFrame Parameters: df - DataFrame DataFrame to operate on col_names - list of strings names of columns to change new_col_names - list of strings new names for old columns (order should be same as col_names) ### Response: def cols_rename(df,col_names, new_col_names): """ Rename a set of columns in a DataFrame Parameters: df - DataFrame DataFrame to operate on col_names - list of strings names of columns to change new_col_names - list of strings new names for old columns (order should be same as col_names) """ assert len(col_names) == len(new_col_names) for old_name,new_name in zip(col_names,new_col_names): col_rename(df,old_name,new_name)
def register_up(self): """Called by WorkerThread objects to register themselves. Acquire the condition variable for the WorkerThread objects. Increment the running-thread count. If we are the last thread to start, set status to 'up'. This allows startall() to complete if it was called with wait=True. """ with self.regcond: self.runningcount += 1 tid = thread.get_ident() self.tids.append(tid) self.logger.debug("register_up: (%d) count is %d" % (tid, self.runningcount)) if self.runningcount == self.numthreads: self.status = 'up' self.regcond.notify()
Called by WorkerThread objects to register themselves. Acquire the condition variable for the WorkerThread objects. Increment the running-thread count. If we are the last thread to start, set status to 'up'. This allows startall() to complete if it was called with wait=True.
Below is the the instruction that describes the task: ### Input: Called by WorkerThread objects to register themselves. Acquire the condition variable for the WorkerThread objects. Increment the running-thread count. If we are the last thread to start, set status to 'up'. This allows startall() to complete if it was called with wait=True. ### Response: def register_up(self): """Called by WorkerThread objects to register themselves. Acquire the condition variable for the WorkerThread objects. Increment the running-thread count. If we are the last thread to start, set status to 'up'. This allows startall() to complete if it was called with wait=True. """ with self.regcond: self.runningcount += 1 tid = thread.get_ident() self.tids.append(tid) self.logger.debug("register_up: (%d) count is %d" % (tid, self.runningcount)) if self.runningcount == self.numthreads: self.status = 'up' self.regcond.notify()
def start_group(self, scol, typ): """Start a new group""" return Group(parent=self, level=scol, typ=typ)
Start a new group
Below is the the instruction that describes the task: ### Input: Start a new group ### Response: def start_group(self, scol, typ): """Start a new group""" return Group(parent=self, level=scol, typ=typ)
def _GetMessage(self, event_object): """Returns a properly formatted message string. Args: event_object: the event object (instance od EventObject). Returns: A formatted message string. """ # TODO: move this somewhere where the mediator can be instantiated once. formatter_mediator = formatters_mediator.FormatterMediator() result = '' try: result, _ = formatters_manager.FormattersManager.GetMessageStrings( formatter_mediator, event_object) except KeyError as exception: logging.warning( 'Unable to correctly assemble event with error: {0!s}'.format( exception)) return result
Returns a properly formatted message string. Args: event_object: the event object (instance od EventObject). Returns: A formatted message string.
Below is the the instruction that describes the task: ### Input: Returns a properly formatted message string. Args: event_object: the event object (instance od EventObject). Returns: A formatted message string. ### Response: def _GetMessage(self, event_object): """Returns a properly formatted message string. Args: event_object: the event object (instance od EventObject). Returns: A formatted message string. """ # TODO: move this somewhere where the mediator can be instantiated once. formatter_mediator = formatters_mediator.FormatterMediator() result = '' try: result, _ = formatters_manager.FormattersManager.GetMessageStrings( formatter_mediator, event_object) except KeyError as exception: logging.warning( 'Unable to correctly assemble event with error: {0!s}'.format( exception)) return result
def get(self, chargeback_id, **params): """Verify the chargeback ID and retrieve the chargeback from the API.""" if not chargeback_id or not chargeback_id.startswith(self.RESOURCE_ID_PREFIX): raise IdentifierError( "Invalid chargeback ID: '{id}'. A chargeback ID should start with '{prefix}'.".format( id=chargeback_id, prefix=self.RESOURCE_ID_PREFIX) ) return super(Chargebacks, self).get(chargeback_id, **params)
Verify the chargeback ID and retrieve the chargeback from the API.
Below is the the instruction that describes the task: ### Input: Verify the chargeback ID and retrieve the chargeback from the API. ### Response: def get(self, chargeback_id, **params): """Verify the chargeback ID and retrieve the chargeback from the API.""" if not chargeback_id or not chargeback_id.startswith(self.RESOURCE_ID_PREFIX): raise IdentifierError( "Invalid chargeback ID: '{id}'. A chargeback ID should start with '{prefix}'.".format( id=chargeback_id, prefix=self.RESOURCE_ID_PREFIX) ) return super(Chargebacks, self).get(chargeback_id, **params)
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: OutgoingCallerIdContext for this OutgoingCallerIdInstance :rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdContext """ if self._context is None: self._context = OutgoingCallerIdContext( self._version, account_sid=self._solution['account_sid'], sid=self._solution['sid'], ) return self._context
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: OutgoingCallerIdContext for this OutgoingCallerIdInstance :rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdContext
Below is the the instruction that describes the task: ### Input: Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: OutgoingCallerIdContext for this OutgoingCallerIdInstance :rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdContext ### Response: def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: OutgoingCallerIdContext for this OutgoingCallerIdInstance :rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdContext """ if self._context is None: self._context = OutgoingCallerIdContext( self._version, account_sid=self._solution['account_sid'], sid=self._solution['sid'], ) return self._context
def handleHttpPost(request, endpoint): """ Handles the specified HTTP POST request, which maps to the specified protocol handler endpoint and protocol request class. """ if request.mimetype and request.mimetype != MIMETYPE: raise exceptions.UnsupportedMediaTypeException() request = request.get_data() if request == '' or request is None: request = '{}' responseStr = endpoint(request) return getFlaskResponse(responseStr)
Handles the specified HTTP POST request, which maps to the specified protocol handler endpoint and protocol request class.
Below is the the instruction that describes the task: ### Input: Handles the specified HTTP POST request, which maps to the specified protocol handler endpoint and protocol request class. ### Response: def handleHttpPost(request, endpoint): """ Handles the specified HTTP POST request, which maps to the specified protocol handler endpoint and protocol request class. """ if request.mimetype and request.mimetype != MIMETYPE: raise exceptions.UnsupportedMediaTypeException() request = request.get_data() if request == '' or request is None: request = '{}' responseStr = endpoint(request) return getFlaskResponse(responseStr)
def aghmean(nums): """Return arithmetic-geometric-harmonic mean. Iterates over arithmetic, geometric, & harmonic means until they converge to a single value (rounded to 12 digits), following the method described in :cite:`Raissouli:2009`. Parameters ---------- nums : list A series of numbers Returns ------- float The arithmetic-geometric-harmonic mean of nums Examples -------- >>> aghmean([1, 2, 3, 4]) 2.198327159900212 >>> aghmean([1, 2]) 1.4142135623731884 >>> aghmean([0, 5, 1000]) 335.0 """ m_a = amean(nums) m_g = gmean(nums) m_h = hmean(nums) if math.isnan(m_a) or math.isnan(m_g) or math.isnan(m_h): return float('nan') while round(m_a, 12) != round(m_g, 12) and round(m_g, 12) != round( m_h, 12 ): m_a, m_g, m_h = ( (m_a + m_g + m_h) / 3, (m_a * m_g * m_h) ** (1 / 3), 3 / (1 / m_a + 1 / m_g + 1 / m_h), ) return m_a
Return arithmetic-geometric-harmonic mean. Iterates over arithmetic, geometric, & harmonic means until they converge to a single value (rounded to 12 digits), following the method described in :cite:`Raissouli:2009`. Parameters ---------- nums : list A series of numbers Returns ------- float The arithmetic-geometric-harmonic mean of nums Examples -------- >>> aghmean([1, 2, 3, 4]) 2.198327159900212 >>> aghmean([1, 2]) 1.4142135623731884 >>> aghmean([0, 5, 1000]) 335.0
Below is the the instruction that describes the task: ### Input: Return arithmetic-geometric-harmonic mean. Iterates over arithmetic, geometric, & harmonic means until they converge to a single value (rounded to 12 digits), following the method described in :cite:`Raissouli:2009`. Parameters ---------- nums : list A series of numbers Returns ------- float The arithmetic-geometric-harmonic mean of nums Examples -------- >>> aghmean([1, 2, 3, 4]) 2.198327159900212 >>> aghmean([1, 2]) 1.4142135623731884 >>> aghmean([0, 5, 1000]) 335.0 ### Response: def aghmean(nums): """Return arithmetic-geometric-harmonic mean. Iterates over arithmetic, geometric, & harmonic means until they converge to a single value (rounded to 12 digits), following the method described in :cite:`Raissouli:2009`. Parameters ---------- nums : list A series of numbers Returns ------- float The arithmetic-geometric-harmonic mean of nums Examples -------- >>> aghmean([1, 2, 3, 4]) 2.198327159900212 >>> aghmean([1, 2]) 1.4142135623731884 >>> aghmean([0, 5, 1000]) 335.0 """ m_a = amean(nums) m_g = gmean(nums) m_h = hmean(nums) if math.isnan(m_a) or math.isnan(m_g) or math.isnan(m_h): return float('nan') while round(m_a, 12) != round(m_g, 12) and round(m_g, 12) != round( m_h, 12 ): m_a, m_g, m_h = ( (m_a + m_g + m_h) / 3, (m_a * m_g * m_h) ** (1 / 3), 3 / (1 / m_a + 1 / m_g + 1 / m_h), ) return m_a
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False): """Configure environment for DeepMind-style Atari. """ if episode_life: env = EpisodicLifeEnv(env) if 'FIRE' in env.unwrapped.get_action_meanings(): env = FireResetEnv(env) env = WarpFrame(env) if scale: env = ScaledFloatFrame(env) if clip_rewards: env = ClipRewardEnv(env) if frame_stack: env = FrameStack(env, 4) return env
Configure environment for DeepMind-style Atari.
Below is the the instruction that describes the task: ### Input: Configure environment for DeepMind-style Atari. ### Response: def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False): """Configure environment for DeepMind-style Atari. """ if episode_life: env = EpisodicLifeEnv(env) if 'FIRE' in env.unwrapped.get_action_meanings(): env = FireResetEnv(env) env = WarpFrame(env) if scale: env = ScaledFloatFrame(env) if clip_rewards: env = ClipRewardEnv(env) if frame_stack: env = FrameStack(env, 4) return env
def acl(self): """Get the access control list for this workspace.""" r = fapi.get_workspace_acl(self.namespace, self.name, self.api_url) fapi._check_response_code(r, 200) return r.json()
Get the access control list for this workspace.
Below is the the instruction that describes the task: ### Input: Get the access control list for this workspace. ### Response: def acl(self): """Get the access control list for this workspace.""" r = fapi.get_workspace_acl(self.namespace, self.name, self.api_url) fapi._check_response_code(r, 200) return r.json()
def remote_upload(self, remote_url, folder_id=None, headers=None): """Used to make a remote file upload to openload.co Note: If folder_id is not provided, the file will be uploaded to ``Home`` folder. Args: remote_url (str): direct link of file to be remotely downloaded. folder_id (:obj:`str`, optional): folder-ID to upload to. headers (:obj:`dict`, optional): additional HTTP headers (e.g. Cookies or HTTP Basic-Auth) Returns: dict: dictionary containing ("id": uploaded file id, "folderid"). :: { "id": "12", "folderid": "4248" } """ kwargs = {'folder': folder_id, 'headers': headers} params = {'url': remote_url} params.update({key: value for key, value in kwargs.items() if value}) return self._get('remotedl/add', params=params)
Used to make a remote file upload to openload.co Note: If folder_id is not provided, the file will be uploaded to ``Home`` folder. Args: remote_url (str): direct link of file to be remotely downloaded. folder_id (:obj:`str`, optional): folder-ID to upload to. headers (:obj:`dict`, optional): additional HTTP headers (e.g. Cookies or HTTP Basic-Auth) Returns: dict: dictionary containing ("id": uploaded file id, "folderid"). :: { "id": "12", "folderid": "4248" }
Below is the the instruction that describes the task: ### Input: Used to make a remote file upload to openload.co Note: If folder_id is not provided, the file will be uploaded to ``Home`` folder. Args: remote_url (str): direct link of file to be remotely downloaded. folder_id (:obj:`str`, optional): folder-ID to upload to. headers (:obj:`dict`, optional): additional HTTP headers (e.g. Cookies or HTTP Basic-Auth) Returns: dict: dictionary containing ("id": uploaded file id, "folderid"). :: { "id": "12", "folderid": "4248" } ### Response: def remote_upload(self, remote_url, folder_id=None, headers=None): """Used to make a remote file upload to openload.co Note: If folder_id is not provided, the file will be uploaded to ``Home`` folder. Args: remote_url (str): direct link of file to be remotely downloaded. folder_id (:obj:`str`, optional): folder-ID to upload to. headers (:obj:`dict`, optional): additional HTTP headers (e.g. Cookies or HTTP Basic-Auth) Returns: dict: dictionary containing ("id": uploaded file id, "folderid"). :: { "id": "12", "folderid": "4248" } """ kwargs = {'folder': folder_id, 'headers': headers} params = {'url': remote_url} params.update({key: value for key, value in kwargs.items() if value}) return self._get('remotedl/add', params=params)
def compute_bounds(feed: "Feed") -> Tuple: """ Return the tuple (min longitude, min latitude, max longitude, max latitude) where the longitudes and latitude vary across all the Feed's stop coordinates. """ lons, lats = feed.stops["stop_lon"], feed.stops["stop_lat"] return lons.min(), lats.min(), lons.max(), lats.max()
Return the tuple (min longitude, min latitude, max longitude, max latitude) where the longitudes and latitude vary across all the Feed's stop coordinates.
Below is the the instruction that describes the task: ### Input: Return the tuple (min longitude, min latitude, max longitude, max latitude) where the longitudes and latitude vary across all the Feed's stop coordinates. ### Response: def compute_bounds(feed: "Feed") -> Tuple: """ Return the tuple (min longitude, min latitude, max longitude, max latitude) where the longitudes and latitude vary across all the Feed's stop coordinates. """ lons, lats = feed.stops["stop_lon"], feed.stops["stop_lat"] return lons.min(), lats.min(), lons.max(), lats.max()
def connect( self, host_or_hosts, port_or_ports=7051, rpc_timeout=None, admin_timeout=None, ): """ Pass-through connection interface to the Kudu client Parameters ---------- host_or_hosts : string or list of strings If you have multiple Kudu masters for HA, pass a list port_or_ports : int or list of int, default 7051 If you pass multiple host names, pass multiple ports rpc_timeout : kudu.TimeDelta See Kudu client documentation for details admin_timeout : kudu.TimeDelta See Kudu client documentation for details Returns ------- None """ self.client = kudu.connect( host_or_hosts, port_or_ports, rpc_timeout_ms=rpc_timeout, admin_timeout_ms=admin_timeout, )
Pass-through connection interface to the Kudu client Parameters ---------- host_or_hosts : string or list of strings If you have multiple Kudu masters for HA, pass a list port_or_ports : int or list of int, default 7051 If you pass multiple host names, pass multiple ports rpc_timeout : kudu.TimeDelta See Kudu client documentation for details admin_timeout : kudu.TimeDelta See Kudu client documentation for details Returns ------- None
Below is the the instruction that describes the task: ### Input: Pass-through connection interface to the Kudu client Parameters ---------- host_or_hosts : string or list of strings If you have multiple Kudu masters for HA, pass a list port_or_ports : int or list of int, default 7051 If you pass multiple host names, pass multiple ports rpc_timeout : kudu.TimeDelta See Kudu client documentation for details admin_timeout : kudu.TimeDelta See Kudu client documentation for details Returns ------- None ### Response: def connect( self, host_or_hosts, port_or_ports=7051, rpc_timeout=None, admin_timeout=None, ): """ Pass-through connection interface to the Kudu client Parameters ---------- host_or_hosts : string or list of strings If you have multiple Kudu masters for HA, pass a list port_or_ports : int or list of int, default 7051 If you pass multiple host names, pass multiple ports rpc_timeout : kudu.TimeDelta See Kudu client documentation for details admin_timeout : kudu.TimeDelta See Kudu client documentation for details Returns ------- None """ self.client = kudu.connect( host_or_hosts, port_or_ports, rpc_timeout_ms=rpc_timeout, admin_timeout_ms=admin_timeout, )
async def print_what_is_playing(loop): """Find a device and print what is playing.""" print('Discovering devices on network...') atvs = await pyatv.scan_for_apple_tvs(loop, timeout=5) if not atvs: print('no device found', file=sys.stderr) return print('Connecting to {0}'.format(atvs[0].address)) atv = pyatv.connect_to_apple_tv(atvs[0], loop) try: playing = await atv.metadata.playing() print('Currently playing:') print(playing) finally: # Do not forget to logout await atv.logout()
Find a device and print what is playing.
Below is the the instruction that describes the task: ### Input: Find a device and print what is playing. ### Response: async def print_what_is_playing(loop): """Find a device and print what is playing.""" print('Discovering devices on network...') atvs = await pyatv.scan_for_apple_tvs(loop, timeout=5) if not atvs: print('no device found', file=sys.stderr) return print('Connecting to {0}'.format(atvs[0].address)) atv = pyatv.connect_to_apple_tv(atvs[0], loop) try: playing = await atv.metadata.playing() print('Currently playing:') print(playing) finally: # Do not forget to logout await atv.logout()
def convert_dict_to_compatible_tensor(values, targets): """Converts dict `values` in tensors that are compatible with `targets`. Args: values: A dict to objects to convert with same keys as `targets`. targets: A dict returned by `parse_tensor_info_map`. Returns: A map with the same keys as `values` but values converted into Tensor/SparseTensors that can be fed into `protomap`. Raises: TypeError: If it fails to convert. """ result = {} for key, value in sorted(values.items()): result[key] = _convert_to_compatible_tensor( value, targets[key], error_prefix="Can't convert %r" % key) return result
Converts dict `values` in tensors that are compatible with `targets`. Args: values: A dict to objects to convert with same keys as `targets`. targets: A dict returned by `parse_tensor_info_map`. Returns: A map with the same keys as `values` but values converted into Tensor/SparseTensors that can be fed into `protomap`. Raises: TypeError: If it fails to convert.
Below is the the instruction that describes the task: ### Input: Converts dict `values` in tensors that are compatible with `targets`. Args: values: A dict to objects to convert with same keys as `targets`. targets: A dict returned by `parse_tensor_info_map`. Returns: A map with the same keys as `values` but values converted into Tensor/SparseTensors that can be fed into `protomap`. Raises: TypeError: If it fails to convert. ### Response: def convert_dict_to_compatible_tensor(values, targets): """Converts dict `values` in tensors that are compatible with `targets`. Args: values: A dict to objects to convert with same keys as `targets`. targets: A dict returned by `parse_tensor_info_map`. Returns: A map with the same keys as `values` but values converted into Tensor/SparseTensors that can be fed into `protomap`. Raises: TypeError: If it fails to convert. """ result = {} for key, value in sorted(values.items()): result[key] = _convert_to_compatible_tensor( value, targets[key], error_prefix="Can't convert %r" % key) return result
def _maybe_add_read_preference(spec, read_preference): """Add $readPreference to spec when appropriate.""" mode = read_preference.mode tag_sets = read_preference.tag_sets max_staleness = read_preference.max_staleness # Only add $readPreference if it's something other than primary to avoid # problems with mongos versions that don't support read preferences. Also, # for maximum backwards compatibility, don't add $readPreference for # secondaryPreferred unless tags or maxStalenessSeconds are in use (setting # the slaveOkay bit has the same effect). if mode and ( mode != ReadPreference.SECONDARY_PREFERRED.mode or tag_sets != [{}] or max_staleness != -1): if "$query" not in spec: spec = SON([("$query", spec)]) spec["$readPreference"] = read_preference.document return spec
Add $readPreference to spec when appropriate.
Below is the the instruction that describes the task: ### Input: Add $readPreference to spec when appropriate. ### Response: def _maybe_add_read_preference(spec, read_preference): """Add $readPreference to spec when appropriate.""" mode = read_preference.mode tag_sets = read_preference.tag_sets max_staleness = read_preference.max_staleness # Only add $readPreference if it's something other than primary to avoid # problems with mongos versions that don't support read preferences. Also, # for maximum backwards compatibility, don't add $readPreference for # secondaryPreferred unless tags or maxStalenessSeconds are in use (setting # the slaveOkay bit has the same effect). if mode and ( mode != ReadPreference.SECONDARY_PREFERRED.mode or tag_sets != [{}] or max_staleness != -1): if "$query" not in spec: spec = SON([("$query", spec)]) spec["$readPreference"] = read_preference.document return spec
def arcsine(x, null=(-np.inf, np.inf)): ''' arcsine(x) is equivalent to asin(x) except that it also works on sparse arrays. The optional argument null (default, (-numpy.inf, numpy.inf)) may be specified to indicate what value(s) should be assigned when x < -1 or x > 1. If only one number is given, then it is used for both values; otherwise the first value corresponds to <-1 and the second to >1. If null is None, then an error is raised when invalid values are encountered. ''' if sps.issparse(x): x = x.copy() x.data = arcsine(x.data, null=null, rtol=rtol, atol=atol) return x else: x = np.asarray(x) try: (nln,nlp) = null except Exception: (nln,nlp) = (null,null) ii = None if nln is None else np.where(x < -1) jj = None if nlp is None else np.where(x > 1) if ii: x[ii] = 0 if jj: x[jj] = 0 x = np.arcsin(x) if ii: x[ii] = nln if jj: x[jj] = nlp return x
arcsine(x) is equivalent to asin(x) except that it also works on sparse arrays. The optional argument null (default, (-numpy.inf, numpy.inf)) may be specified to indicate what value(s) should be assigned when x < -1 or x > 1. If only one number is given, then it is used for both values; otherwise the first value corresponds to <-1 and the second to >1. If null is None, then an error is raised when invalid values are encountered.
Below is the the instruction that describes the task: ### Input: arcsine(x) is equivalent to asin(x) except that it also works on sparse arrays. The optional argument null (default, (-numpy.inf, numpy.inf)) may be specified to indicate what value(s) should be assigned when x < -1 or x > 1. If only one number is given, then it is used for both values; otherwise the first value corresponds to <-1 and the second to >1. If null is None, then an error is raised when invalid values are encountered. ### Response: def arcsine(x, null=(-np.inf, np.inf)): ''' arcsine(x) is equivalent to asin(x) except that it also works on sparse arrays. The optional argument null (default, (-numpy.inf, numpy.inf)) may be specified to indicate what value(s) should be assigned when x < -1 or x > 1. If only one number is given, then it is used for both values; otherwise the first value corresponds to <-1 and the second to >1. If null is None, then an error is raised when invalid values are encountered. ''' if sps.issparse(x): x = x.copy() x.data = arcsine(x.data, null=null, rtol=rtol, atol=atol) return x else: x = np.asarray(x) try: (nln,nlp) = null except Exception: (nln,nlp) = (null,null) ii = None if nln is None else np.where(x < -1) jj = None if nlp is None else np.where(x > 1) if ii: x[ii] = 0 if jj: x[jj] = 0 x = np.arcsin(x) if ii: x[ii] = nln if jj: x[jj] = nlp return x
def acceptable(value, capitalize=False): """Convert a string into something that can be used as a valid python variable name""" name = regexes['punctuation'].sub("", regexes['joins'].sub("_", value)) # Clean up irregularities in underscores. name = regexes['repeated_underscore'].sub("_", name.strip('_')) if capitalize: # We don't use python's built in capitalize method here because it # turns all upper chars into lower chars if not at the start of # the string and we only want to change the first character. name_parts = [] for word in name.split('_'): name_parts.append(word[0].upper()) if len(word) > 1: name_parts.append(word[1:]) name = ''.join(name_parts) return name
Convert a string into something that can be used as a valid python variable name
Below is the the instruction that describes the task: ### Input: Convert a string into something that can be used as a valid python variable name ### Response: def acceptable(value, capitalize=False): """Convert a string into something that can be used as a valid python variable name""" name = regexes['punctuation'].sub("", regexes['joins'].sub("_", value)) # Clean up irregularities in underscores. name = regexes['repeated_underscore'].sub("_", name.strip('_')) if capitalize: # We don't use python's built in capitalize method here because it # turns all upper chars into lower chars if not at the start of # the string and we only want to change the first character. name_parts = [] for word in name.split('_'): name_parts.append(word[0].upper()) if len(word) > 1: name_parts.append(word[1:]) name = ''.join(name_parts) return name
def GetFileEntryByPathSpec(self, path_spec): """Retrieves a file entry for a path specification. Args: path_spec (PathSpec): path specification. Returns: TSKFileEntry: a file entry or None if not available. """ # Opening a file by inode number is faster than opening a file by location. tsk_file = None inode = getattr(path_spec, 'inode', None) location = getattr(path_spec, 'location', None) root_inode = self.GetRootInode() if (location == self.LOCATION_ROOT or (inode is not None and root_inode is not None and inode == root_inode)): tsk_file = self._tsk_file_system.open(self.LOCATION_ROOT) return tsk_file_entry.TSKFileEntry( self._resolver_context, self, path_spec, tsk_file=tsk_file, is_root=True) try: if inode is not None: tsk_file = self._tsk_file_system.open_meta(inode=inode) elif location is not None: tsk_file = self._tsk_file_system.open(location) except IOError: pass if tsk_file is None: return None # TODO: is there a way to determine the parent inode number here? return tsk_file_entry.TSKFileEntry( self._resolver_context, self, path_spec, tsk_file=tsk_file)
Retrieves a file entry for a path specification. Args: path_spec (PathSpec): path specification. Returns: TSKFileEntry: a file entry or None if not available.
Below is the the instruction that describes the task: ### Input: Retrieves a file entry for a path specification. Args: path_spec (PathSpec): path specification. Returns: TSKFileEntry: a file entry or None if not available. ### Response: def GetFileEntryByPathSpec(self, path_spec): """Retrieves a file entry for a path specification. Args: path_spec (PathSpec): path specification. Returns: TSKFileEntry: a file entry or None if not available. """ # Opening a file by inode number is faster than opening a file by location. tsk_file = None inode = getattr(path_spec, 'inode', None) location = getattr(path_spec, 'location', None) root_inode = self.GetRootInode() if (location == self.LOCATION_ROOT or (inode is not None and root_inode is not None and inode == root_inode)): tsk_file = self._tsk_file_system.open(self.LOCATION_ROOT) return tsk_file_entry.TSKFileEntry( self._resolver_context, self, path_spec, tsk_file=tsk_file, is_root=True) try: if inode is not None: tsk_file = self._tsk_file_system.open_meta(inode=inode) elif location is not None: tsk_file = self._tsk_file_system.open(location) except IOError: pass if tsk_file is None: return None # TODO: is there a way to determine the parent inode number here? return tsk_file_entry.TSKFileEntry( self._resolver_context, self, path_spec, tsk_file=tsk_file)
def __load(arff): """ load liac-arff to pandas DataFrame :param dict arff:arff dict created liac-arff :rtype: DataFrame :return: pandas DataFrame """ attrs = arff['attributes'] attrs_t = [] for attr in attrs: if isinstance(attr[1], list): attrs_t.append("%s@{%s}" % (attr[0], ','.join(attr[1]))) else: attrs_t.append("%s@%s" % (attr[0], attr[1])) df = pd.DataFrame(data=arff['data'], columns=attrs_t) return df
load liac-arff to pandas DataFrame :param dict arff:arff dict created liac-arff :rtype: DataFrame :return: pandas DataFrame
Below is the the instruction that describes the task: ### Input: load liac-arff to pandas DataFrame :param dict arff:arff dict created liac-arff :rtype: DataFrame :return: pandas DataFrame ### Response: def __load(arff): """ load liac-arff to pandas DataFrame :param dict arff:arff dict created liac-arff :rtype: DataFrame :return: pandas DataFrame """ attrs = arff['attributes'] attrs_t = [] for attr in attrs: if isinstance(attr[1], list): attrs_t.append("%s@{%s}" % (attr[0], ','.join(attr[1]))) else: attrs_t.append("%s@%s" % (attr[0], attr[1])) df = pd.DataFrame(data=arff['data'], columns=attrs_t) return df
def Dict(self, name, initial=None, **extra): """The dictionary datatype (Hash). :param name: The name of the dictionary. :keyword initial: Initial contents. :keyword \*\*extra: Initial contents as keyword arguments. The ``initial``, and ``**extra`` keyword arguments will be merged (keyword arguments has priority). See :class:`redish.types.Dict`. """ return types.Dict(name, self.api, initial=initial, **extra)
The dictionary datatype (Hash). :param name: The name of the dictionary. :keyword initial: Initial contents. :keyword \*\*extra: Initial contents as keyword arguments. The ``initial``, and ``**extra`` keyword arguments will be merged (keyword arguments has priority). See :class:`redish.types.Dict`.
Below is the the instruction that describes the task: ### Input: The dictionary datatype (Hash). :param name: The name of the dictionary. :keyword initial: Initial contents. :keyword \*\*extra: Initial contents as keyword arguments. The ``initial``, and ``**extra`` keyword arguments will be merged (keyword arguments has priority). See :class:`redish.types.Dict`. ### Response: def Dict(self, name, initial=None, **extra): """The dictionary datatype (Hash). :param name: The name of the dictionary. :keyword initial: Initial contents. :keyword \*\*extra: Initial contents as keyword arguments. The ``initial``, and ``**extra`` keyword arguments will be merged (keyword arguments has priority). See :class:`redish.types.Dict`. """ return types.Dict(name, self.api, initial=initial, **extra)
def load(self, game_json=None, mode=None): """ Load a game from a serialized JSON representation. The game expects a well defined structure as follows (Note JSON string format): '{ "guesses_made": int, "key": "str:a 4 word", "status": "str: one of playing, won, lost", "mode": { "digits": int, "digit_type": DigitWord.DIGIT | DigitWord.HEXDIGIT, "mode": GameMode(), "priority": int, "help_text": str, "instruction_text": str, "guesses_allowed": int }, "ttl": int, "answer": [int|str0, int|str1, ..., int|strN] }' * "mode" will be cast to a GameMode object * "answer" will be cast to a DigitWord object :param game_json: The source JSON - MUST be a string :param mode: A mode (str or GameMode) for the game being loaded :return: A game object """ if game_json is None: # New game_json if mode is not None: if isinstance(mode, str): _game_object = GameObject(mode=self._match_mode(mode=mode)) elif isinstance(mode, GameMode): _game_object = GameObject(mode=mode) else: raise TypeError("Game mode must be a GameMode or string") else: _game_object = GameObject(mode=self._game_modes[0]) _game_object.status = self.GAME_PLAYING else: if not isinstance(game_json, str): raise TypeError("Game must be passed as a serialized JSON string.") game_dict = json.loads(game_json) if not 'mode' in game_dict: raise ValueError("Mode is not provided in JSON; game_json cannot be loaded!") _mode = GameMode(**game_dict["mode"]) _game_object = GameObject(mode=_mode, source_game=game_dict) self.game = copy.deepcopy(_game_object)
Load a game from a serialized JSON representation. The game expects a well defined structure as follows (Note JSON string format): '{ "guesses_made": int, "key": "str:a 4 word", "status": "str: one of playing, won, lost", "mode": { "digits": int, "digit_type": DigitWord.DIGIT | DigitWord.HEXDIGIT, "mode": GameMode(), "priority": int, "help_text": str, "instruction_text": str, "guesses_allowed": int }, "ttl": int, "answer": [int|str0, int|str1, ..., int|strN] }' * "mode" will be cast to a GameMode object * "answer" will be cast to a DigitWord object :param game_json: The source JSON - MUST be a string :param mode: A mode (str or GameMode) for the game being loaded :return: A game object
Below is the the instruction that describes the task: ### Input: Load a game from a serialized JSON representation. The game expects a well defined structure as follows (Note JSON string format): '{ "guesses_made": int, "key": "str:a 4 word", "status": "str: one of playing, won, lost", "mode": { "digits": int, "digit_type": DigitWord.DIGIT | DigitWord.HEXDIGIT, "mode": GameMode(), "priority": int, "help_text": str, "instruction_text": str, "guesses_allowed": int }, "ttl": int, "answer": [int|str0, int|str1, ..., int|strN] }' * "mode" will be cast to a GameMode object * "answer" will be cast to a DigitWord object :param game_json: The source JSON - MUST be a string :param mode: A mode (str or GameMode) for the game being loaded :return: A game object ### Response: def load(self, game_json=None, mode=None): """ Load a game from a serialized JSON representation. The game expects a well defined structure as follows (Note JSON string format): '{ "guesses_made": int, "key": "str:a 4 word", "status": "str: one of playing, won, lost", "mode": { "digits": int, "digit_type": DigitWord.DIGIT | DigitWord.HEXDIGIT, "mode": GameMode(), "priority": int, "help_text": str, "instruction_text": str, "guesses_allowed": int }, "ttl": int, "answer": [int|str0, int|str1, ..., int|strN] }' * "mode" will be cast to a GameMode object * "answer" will be cast to a DigitWord object :param game_json: The source JSON - MUST be a string :param mode: A mode (str or GameMode) for the game being loaded :return: A game object """ if game_json is None: # New game_json if mode is not None: if isinstance(mode, str): _game_object = GameObject(mode=self._match_mode(mode=mode)) elif isinstance(mode, GameMode): _game_object = GameObject(mode=mode) else: raise TypeError("Game mode must be a GameMode or string") else: _game_object = GameObject(mode=self._game_modes[0]) _game_object.status = self.GAME_PLAYING else: if not isinstance(game_json, str): raise TypeError("Game must be passed as a serialized JSON string.") game_dict = json.loads(game_json) if not 'mode' in game_dict: raise ValueError("Mode is not provided in JSON; game_json cannot be loaded!") _mode = GameMode(**game_dict["mode"]) _game_object = GameObject(mode=_mode, source_game=game_dict) self.game = copy.deepcopy(_game_object)
def set_header(self, msg): """ Set second head line text """ self.s.move(1, 0) self.overwrite_line(msg, attr=curses.A_NORMAL)
Set second head line text
Below is the the instruction that describes the task: ### Input: Set second head line text ### Response: def set_header(self, msg): """ Set second head line text """ self.s.move(1, 0) self.overwrite_line(msg, attr=curses.A_NORMAL)
def get_stp_mst_detail_output_msti_port_oper_bpdu_guard(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") msti = ET.SubElement(output, "msti") instance_id_key = ET.SubElement(msti, "instance-id") instance_id_key.text = kwargs.pop('instance_id') port = ET.SubElement(msti, "port") oper_bpdu_guard = ET.SubElement(port, "oper-bpdu-guard") oper_bpdu_guard.text = kwargs.pop('oper_bpdu_guard') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
Below is the the instruction that describes the task: ### Input: Auto Generated Code ### Response: def get_stp_mst_detail_output_msti_port_oper_bpdu_guard(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") msti = ET.SubElement(output, "msti") instance_id_key = ET.SubElement(msti, "instance-id") instance_id_key.text = kwargs.pop('instance_id') port = ET.SubElement(msti, "port") oper_bpdu_guard = ET.SubElement(port, "oper-bpdu-guard") oper_bpdu_guard.text = kwargs.pop('oper_bpdu_guard') callback = kwargs.pop('callback', self._callback) return callback(config)
def in_query(expression): """Match any of the values that exist in an array specified in query.""" def _in(index, expression=expression): """Return store key for documents that satisfy expression.""" ev = expression() if callable(expression) else expression try: iter(ev) except TypeError: raise AttributeError('$in argument must be an iterable!') hashed_ev = [index.get_hash_for(v) for v in ev] store_keys = set() for value in hashed_ev: store_keys |= set(index.get_keys_for(value)) return list(store_keys) return _in
Match any of the values that exist in an array specified in query.
Below is the the instruction that describes the task: ### Input: Match any of the values that exist in an array specified in query. ### Response: def in_query(expression): """Match any of the values that exist in an array specified in query.""" def _in(index, expression=expression): """Return store key for documents that satisfy expression.""" ev = expression() if callable(expression) else expression try: iter(ev) except TypeError: raise AttributeError('$in argument must be an iterable!') hashed_ev = [index.get_hash_for(v) for v in ev] store_keys = set() for value in hashed_ev: store_keys |= set(index.get_keys_for(value)) return list(store_keys) return _in
def export_to_tf_tensor(self, x, laid_out_x): """Turn a Tensor into a tf.Tensor. Args: x: a Tensor laid_out_x: a LaidOutTensor Returns: a tf.Tensor """ return self.combine_slices(laid_out_x.all_slices, x.shape)
Turn a Tensor into a tf.Tensor. Args: x: a Tensor laid_out_x: a LaidOutTensor Returns: a tf.Tensor
Below is the the instruction that describes the task: ### Input: Turn a Tensor into a tf.Tensor. Args: x: a Tensor laid_out_x: a LaidOutTensor Returns: a tf.Tensor ### Response: def export_to_tf_tensor(self, x, laid_out_x): """Turn a Tensor into a tf.Tensor. Args: x: a Tensor laid_out_x: a LaidOutTensor Returns: a tf.Tensor """ return self.combine_slices(laid_out_x.all_slices, x.shape)
def to_view(self, view_name): """ Create a View from this Query. Args: view_name: the name of the View either as a string or a 3-part tuple (projectid, datasetid, name). Returns: A View for the Query. """ # Do the import here to avoid circular dependencies at top-level. from . import _view return _view.View(view_name, self._context).create(self._sql)
Create a View from this Query. Args: view_name: the name of the View either as a string or a 3-part tuple (projectid, datasetid, name). Returns: A View for the Query.
Below is the the instruction that describes the task: ### Input: Create a View from this Query. Args: view_name: the name of the View either as a string or a 3-part tuple (projectid, datasetid, name). Returns: A View for the Query. ### Response: def to_view(self, view_name): """ Create a View from this Query. Args: view_name: the name of the View either as a string or a 3-part tuple (projectid, datasetid, name). Returns: A View for the Query. """ # Do the import here to avoid circular dependencies at top-level. from . import _view return _view.View(view_name, self._context).create(self._sql)
def subelement(element, xpath, tag, text, **kwargs): """ Searches element matching the *xpath* in *parent* and replaces it's *tag*, *text* and *kwargs* attributes. If the element in *xpath* is not found a new child element is created with *kwargs* attributes and added. Returns the found/created element. """ subelm = element.find(xpath) if subelm is None: subelm = etree.SubElement(element, tag) else: subelm.tag = tag subelm.text = text for attr, value in kwargs.items(): subelm.set(attr, value) return subelm
Searches element matching the *xpath* in *parent* and replaces it's *tag*, *text* and *kwargs* attributes. If the element in *xpath* is not found a new child element is created with *kwargs* attributes and added. Returns the found/created element.
Below is the the instruction that describes the task: ### Input: Searches element matching the *xpath* in *parent* and replaces it's *tag*, *text* and *kwargs* attributes. If the element in *xpath* is not found a new child element is created with *kwargs* attributes and added. Returns the found/created element. ### Response: def subelement(element, xpath, tag, text, **kwargs): """ Searches element matching the *xpath* in *parent* and replaces it's *tag*, *text* and *kwargs* attributes. If the element in *xpath* is not found a new child element is created with *kwargs* attributes and added. Returns the found/created element. """ subelm = element.find(xpath) if subelm is None: subelm = etree.SubElement(element, tag) else: subelm.tag = tag subelm.text = text for attr, value in kwargs.items(): subelm.set(attr, value) return subelm
def remove(self, transport): """ removes a transport if a member of this group """ if transport.uid in self.transports: del (self.transports[transport.uid])
removes a transport if a member of this group
Below is the the instruction that describes the task: ### Input: removes a transport if a member of this group ### Response: def remove(self, transport): """ removes a transport if a member of this group """ if transport.uid in self.transports: del (self.transports[transport.uid])
def _paint_carto_legend(ax, values, legend_values, legend_labels, scale_func, legend_kwargs): """ Creates a legend and attaches it to the axis. Meant to be used when a ``legend=True`` parameter is passed. Parameters ---------- ax : matplotlib.Axes instance The ``matplotlib.Axes`` instance on which a legend is being painted. values : list A list of values being plotted. May be either a list of int types or a list of unique entities in the data column (e.g. as generated via ``numpy.unique(data)``. This parameter is meant to be the same as that returned by the ``_discrete_colorize`` method. legend_values : list, optional If a legend is specified, equal intervals will be used for the "points" in the legend by default. However, particularly if your scale is non-linear, oftentimes this isn't what you want. If this variable is provided as well, the values included in the input will be used by the legend instead. legend_labels : list, optional If a legend is specified, this parameter can be used to control what names will be attached to scale_func : ufunc The scaling function being used. legend_kwargs : dict Keyword arguments which will be passed to the matplotlib legend instance on initialization. This parameter is provided to allow fine-tuning of legend placement at the top level of a plot method, as legends are very finicky. Returns ------- None. """ # Set up the legend values. if legend_values is not None: display_values = legend_values else: display_values = np.linspace(np.max(values), np.min(values), num=5) display_labels = legend_labels if (legend_labels is not None) else display_values # Paint patches. patches = [] for value in display_values: patches.append(mpl.lines.Line2D([0], [0], linestyle='None', marker="o", markersize=(20*scale_func(value))**(1/2), markerfacecolor='None')) if legend_kwargs is None: legend_kwargs = dict() ax.legend(patches, display_labels, numpoints=1, fancybox=True, **legend_kwargs)
Creates a legend and attaches it to the axis. Meant to be used when a ``legend=True`` parameter is passed. Parameters ---------- ax : matplotlib.Axes instance The ``matplotlib.Axes`` instance on which a legend is being painted. values : list A list of values being plotted. May be either a list of int types or a list of unique entities in the data column (e.g. as generated via ``numpy.unique(data)``. This parameter is meant to be the same as that returned by the ``_discrete_colorize`` method. legend_values : list, optional If a legend is specified, equal intervals will be used for the "points" in the legend by default. However, particularly if your scale is non-linear, oftentimes this isn't what you want. If this variable is provided as well, the values included in the input will be used by the legend instead. legend_labels : list, optional If a legend is specified, this parameter can be used to control what names will be attached to scale_func : ufunc The scaling function being used. legend_kwargs : dict Keyword arguments which will be passed to the matplotlib legend instance on initialization. This parameter is provided to allow fine-tuning of legend placement at the top level of a plot method, as legends are very finicky. Returns ------- None.
Below is the the instruction that describes the task: ### Input: Creates a legend and attaches it to the axis. Meant to be used when a ``legend=True`` parameter is passed. Parameters ---------- ax : matplotlib.Axes instance The ``matplotlib.Axes`` instance on which a legend is being painted. values : list A list of values being plotted. May be either a list of int types or a list of unique entities in the data column (e.g. as generated via ``numpy.unique(data)``. This parameter is meant to be the same as that returned by the ``_discrete_colorize`` method. legend_values : list, optional If a legend is specified, equal intervals will be used for the "points" in the legend by default. However, particularly if your scale is non-linear, oftentimes this isn't what you want. If this variable is provided as well, the values included in the input will be used by the legend instead. legend_labels : list, optional If a legend is specified, this parameter can be used to control what names will be attached to scale_func : ufunc The scaling function being used. legend_kwargs : dict Keyword arguments which will be passed to the matplotlib legend instance on initialization. This parameter is provided to allow fine-tuning of legend placement at the top level of a plot method, as legends are very finicky. Returns ------- None. ### Response: def _paint_carto_legend(ax, values, legend_values, legend_labels, scale_func, legend_kwargs): """ Creates a legend and attaches it to the axis. Meant to be used when a ``legend=True`` parameter is passed. Parameters ---------- ax : matplotlib.Axes instance The ``matplotlib.Axes`` instance on which a legend is being painted. values : list A list of values being plotted. May be either a list of int types or a list of unique entities in the data column (e.g. as generated via ``numpy.unique(data)``. This parameter is meant to be the same as that returned by the ``_discrete_colorize`` method. legend_values : list, optional If a legend is specified, equal intervals will be used for the "points" in the legend by default. However, particularly if your scale is non-linear, oftentimes this isn't what you want. If this variable is provided as well, the values included in the input will be used by the legend instead. legend_labels : list, optional If a legend is specified, this parameter can be used to control what names will be attached to scale_func : ufunc The scaling function being used. legend_kwargs : dict Keyword arguments which will be passed to the matplotlib legend instance on initialization. This parameter is provided to allow fine-tuning of legend placement at the top level of a plot method, as legends are very finicky. Returns ------- None. """ # Set up the legend values. if legend_values is not None: display_values = legend_values else: display_values = np.linspace(np.max(values), np.min(values), num=5) display_labels = legend_labels if (legend_labels is not None) else display_values # Paint patches. patches = [] for value in display_values: patches.append(mpl.lines.Line2D([0], [0], linestyle='None', marker="o", markersize=(20*scale_func(value))**(1/2), markerfacecolor='None')) if legend_kwargs is None: legend_kwargs = dict() ax.legend(patches, display_labels, numpoints=1, fancybox=True, **legend_kwargs)
def get_least_orbits(atom_index, cell, site_symmetry, symprec=1e-5): """Find least orbits for a centering atom""" orbits = _get_orbits(atom_index, cell, site_symmetry, symprec) mapping = np.arange(cell.get_number_of_atoms()) for i, orb in enumerate(orbits): for num in np.unique(orb): if mapping[num] > mapping[i]: mapping[num] = mapping[i] return np.unique(mapping)
Find least orbits for a centering atom
Below is the the instruction that describes the task: ### Input: Find least orbits for a centering atom ### Response: def get_least_orbits(atom_index, cell, site_symmetry, symprec=1e-5): """Find least orbits for a centering atom""" orbits = _get_orbits(atom_index, cell, site_symmetry, symprec) mapping = np.arange(cell.get_number_of_atoms()) for i, orb in enumerate(orbits): for num in np.unique(orb): if mapping[num] > mapping[i]: mapping[num] = mapping[i] return np.unique(mapping)
def get(self, name, handler, request=None): """Begin Fetch of current value of a PV :param name: A single name string or list of name strings :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param callable handler: Completion notification. Called with a Value, RemoteError, or Cancelled :returns: A object with a method cancel() which may be used to abort the operation. """ chan = self._channel(name) return _p4p.ClientOperation(chan, handler=unwrapHandler(handler, self._nt), pvRequest=wrapRequest(request), get=True, put=False)
Begin Fetch of current value of a PV :param name: A single name string or list of name strings :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param callable handler: Completion notification. Called with a Value, RemoteError, or Cancelled :returns: A object with a method cancel() which may be used to abort the operation.
Below is the the instruction that describes the task: ### Input: Begin Fetch of current value of a PV :param name: A single name string or list of name strings :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param callable handler: Completion notification. Called with a Value, RemoteError, or Cancelled :returns: A object with a method cancel() which may be used to abort the operation. ### Response: def get(self, name, handler, request=None): """Begin Fetch of current value of a PV :param name: A single name string or list of name strings :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param callable handler: Completion notification. Called with a Value, RemoteError, or Cancelled :returns: A object with a method cancel() which may be used to abort the operation. """ chan = self._channel(name) return _p4p.ClientOperation(chan, handler=unwrapHandler(handler, self._nt), pvRequest=wrapRequest(request), get=True, put=False)
def get_text_content_for_pmids(pmids): """Get text content for articles given a list of their pmids Parameters ---------- pmids : list of str Returns ------- text_content : list of str """ pmc_pmids = set(pmc_client.filter_pmids(pmids, source_type='fulltext')) pmc_ids = [] for pmid in pmc_pmids: pmc_id = pmc_client.id_lookup(pmid, idtype='pmid')['pmcid'] if pmc_id: pmc_ids.append(pmc_id) else: pmc_pmids.discard(pmid) pmc_xmls = [] failed = set() for pmc_id in pmc_ids: if pmc_id is not None: pmc_xmls.append(pmc_client.get_xml(pmc_id)) else: failed.append(pmid) time.sleep(0.5) remaining_pmids = set(pmids) - pmc_pmids | failed abstracts = [] for pmid in remaining_pmids: abstract = pubmed_client.get_abstract(pmid) abstracts.append(abstract) time.sleep(0.5) return [text_content for source in (pmc_xmls, abstracts) for text_content in source if text_content is not None]
Get text content for articles given a list of their pmids Parameters ---------- pmids : list of str Returns ------- text_content : list of str
Below is the the instruction that describes the task: ### Input: Get text content for articles given a list of their pmids Parameters ---------- pmids : list of str Returns ------- text_content : list of str ### Response: def get_text_content_for_pmids(pmids): """Get text content for articles given a list of their pmids Parameters ---------- pmids : list of str Returns ------- text_content : list of str """ pmc_pmids = set(pmc_client.filter_pmids(pmids, source_type='fulltext')) pmc_ids = [] for pmid in pmc_pmids: pmc_id = pmc_client.id_lookup(pmid, idtype='pmid')['pmcid'] if pmc_id: pmc_ids.append(pmc_id) else: pmc_pmids.discard(pmid) pmc_xmls = [] failed = set() for pmc_id in pmc_ids: if pmc_id is not None: pmc_xmls.append(pmc_client.get_xml(pmc_id)) else: failed.append(pmid) time.sleep(0.5) remaining_pmids = set(pmids) - pmc_pmids | failed abstracts = [] for pmid in remaining_pmids: abstract = pubmed_client.get_abstract(pmid) abstracts.append(abstract) time.sleep(0.5) return [text_content for source in (pmc_xmls, abstracts) for text_content in source if text_content is not None]