code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def execute_job(self, obj): """ Execute the BMDS model and parse outputs if successful. """ # get executable path exe = session.BMDS.get_model(obj["bmds_version"], obj["model_name"]).get_exe_path() # write dfile dfile = self.tempfiles.get_tempfile(prefix="bmds-dfile-", suffix=".(d)") with open(dfile, "w") as f: f.write(obj["dfile"]) outfile = self.get_outfile(dfile, obj["model_name"]) oo2 = outfile.replace(".out", ".002") proc = subprocess.Popen([exe, dfile], stdout=subprocess.PIPE, stderr=subprocess.PIPE) output = None stdout = "" stderr = "" try: stdout, stderr = proc.communicate(timeout=settings.BMDS_MODEL_TIMEOUT_SECONDS) if os.path.exists(outfile): with open(outfile, "r") as f: output = f.read() status = RunStatus.SUCCESS.value stdout = stdout.decode().strip() stderr = stderr.decode().strip() except subprocess.TimeoutExpired: proc.kill() status = RunStatus.FAILURE.value stdout, stderr = proc.communicate() finally: if os.path.exists(outfile): self.tempfiles.append(outfile) if os.path.exists(oo2): self.tempfiles.append(oo2) self.tempfiles.cleanup() return dict(status=status, output=output, stdout=stdout, stderr=stderr)
Execute the BMDS model and parse outputs if successful.
Below is the the instruction that describes the task: ### Input: Execute the BMDS model and parse outputs if successful. ### Response: def execute_job(self, obj): """ Execute the BMDS model and parse outputs if successful. """ # get executable path exe = session.BMDS.get_model(obj["bmds_version"], obj["model_name"]).get_exe_path() # write dfile dfile = self.tempfiles.get_tempfile(prefix="bmds-dfile-", suffix=".(d)") with open(dfile, "w") as f: f.write(obj["dfile"]) outfile = self.get_outfile(dfile, obj["model_name"]) oo2 = outfile.replace(".out", ".002") proc = subprocess.Popen([exe, dfile], stdout=subprocess.PIPE, stderr=subprocess.PIPE) output = None stdout = "" stderr = "" try: stdout, stderr = proc.communicate(timeout=settings.BMDS_MODEL_TIMEOUT_SECONDS) if os.path.exists(outfile): with open(outfile, "r") as f: output = f.read() status = RunStatus.SUCCESS.value stdout = stdout.decode().strip() stderr = stderr.decode().strip() except subprocess.TimeoutExpired: proc.kill() status = RunStatus.FAILURE.value stdout, stderr = proc.communicate() finally: if os.path.exists(outfile): self.tempfiles.append(outfile) if os.path.exists(oo2): self.tempfiles.append(oo2) self.tempfiles.cleanup() return dict(status=status, output=output, stdout=stdout, stderr=stderr)
def _preprocess_data_for_tabular_explain(self, df, categories): """Get preprocessed training set in numpy array, and categorical names from raw training data. LIME tabular explainer requires a training set to know the distribution of numeric and categorical values. The training set has to be numpy arrays, with all categorical values converted to indices. It also requires list of names for each category. """ df = df.copy() # Remove non tabular columns (text, image). for col in list(df.columns): if col not in (self._categorical_columns + self._numeric_columns): del df[col] # Convert categorical values into indices. for col_name, col_categories in zip(self._categorical_columns, categories): df[col_name] = df[col_name].apply( lambda x: col_categories.index(str(x)) if str(x) in col_categories else len(col_categories) - 1) # Make sure numeric values are really numeric for numeric_col in self._numeric_columns: df[numeric_col] = df[numeric_col].apply(lambda x: float(x)) return df.as_matrix(self._categorical_columns + self._numeric_columns)
Get preprocessed training set in numpy array, and categorical names from raw training data. LIME tabular explainer requires a training set to know the distribution of numeric and categorical values. The training set has to be numpy arrays, with all categorical values converted to indices. It also requires list of names for each category.
Below is the the instruction that describes the task: ### Input: Get preprocessed training set in numpy array, and categorical names from raw training data. LIME tabular explainer requires a training set to know the distribution of numeric and categorical values. The training set has to be numpy arrays, with all categorical values converted to indices. It also requires list of names for each category. ### Response: def _preprocess_data_for_tabular_explain(self, df, categories): """Get preprocessed training set in numpy array, and categorical names from raw training data. LIME tabular explainer requires a training set to know the distribution of numeric and categorical values. The training set has to be numpy arrays, with all categorical values converted to indices. It also requires list of names for each category. """ df = df.copy() # Remove non tabular columns (text, image). for col in list(df.columns): if col not in (self._categorical_columns + self._numeric_columns): del df[col] # Convert categorical values into indices. for col_name, col_categories in zip(self._categorical_columns, categories): df[col_name] = df[col_name].apply( lambda x: col_categories.index(str(x)) if str(x) in col_categories else len(col_categories) - 1) # Make sure numeric values are really numeric for numeric_col in self._numeric_columns: df[numeric_col] = df[numeric_col].apply(lambda x: float(x)) return df.as_matrix(self._categorical_columns + self._numeric_columns)
def stack_hist(ax, stacked_data, sty_cycle, bottoms=None, hist_func=None, labels=None, plot_func=None, plot_kwargs=None): """ ax : axes.Axes The axes to add artists too stacked_data : array or Mapping A (N, M) shaped array. The first dimension will be iterated over to compute histograms row-wise sty_cycle : Cycler or operable of dict Style to apply to each set bottoms : array, optional The initial positions of the bottoms, defaults to 0 hist_func : callable, optional Must have signature `bin_vals, bin_edges = f(data)`. `bin_edges` expected to be one longer than `bin_vals` labels : list of str, optional The label for each set. If not given and stacked data is an array defaults to 'default set {n}' If stacked_data is a mapping, and labels is None, default to the keys (which may come out in a random order). If stacked_data is a mapping and labels is given then only the columns listed by be plotted. plot_func : callable, optional Function to call to draw the histogram must have signature: ret = plot_func(ax, edges, top, bottoms=bottoms, label=label, **kwargs) plot_kwargs : dict, optional Any extra kwargs to pass through to the plotting function. This will be the same for all calls to the plotting function and will over-ride the values in cycle. Returns ------- arts : dict Dictionary of artists keyed on their labels """ # deal with default binning function if hist_func is None: hist_func = np.histogram # deal with default plotting function if plot_func is None: plot_func = filled_hist # deal with default if plot_kwargs is None: plot_kwargs = {} print(plot_kwargs) try: l_keys = stacked_data.keys() label_data = True if labels is None: labels = l_keys except AttributeError: label_data = False if labels is None: labels = itertools.repeat(None) if label_data: loop_iter = enumerate((stacked_data[lab], lab, s) for lab, s in zip(labels, sty_cycle)) else: loop_iter = enumerate(zip(stacked_data, labels, sty_cycle)) arts = {} for j, (data, label, sty) in loop_iter: if label is None: label = 'dflt set {n}'.format(n=j) label = sty.pop('label', label) vals, edges = hist_func(data) if bottoms is None: bottoms = np.zeros_like(vals) top = bottoms + vals print(sty) sty.update(plot_kwargs) print(sty) ret = plot_func(ax, edges, top, bottoms=bottoms, label=label, **sty) bottoms = top arts[label] = ret ax.legend(fontsize=10) return arts
ax : axes.Axes The axes to add artists too stacked_data : array or Mapping A (N, M) shaped array. The first dimension will be iterated over to compute histograms row-wise sty_cycle : Cycler or operable of dict Style to apply to each set bottoms : array, optional The initial positions of the bottoms, defaults to 0 hist_func : callable, optional Must have signature `bin_vals, bin_edges = f(data)`. `bin_edges` expected to be one longer than `bin_vals` labels : list of str, optional The label for each set. If not given and stacked data is an array defaults to 'default set {n}' If stacked_data is a mapping, and labels is None, default to the keys (which may come out in a random order). If stacked_data is a mapping and labels is given then only the columns listed by be plotted. plot_func : callable, optional Function to call to draw the histogram must have signature: ret = plot_func(ax, edges, top, bottoms=bottoms, label=label, **kwargs) plot_kwargs : dict, optional Any extra kwargs to pass through to the plotting function. This will be the same for all calls to the plotting function and will over-ride the values in cycle. Returns ------- arts : dict Dictionary of artists keyed on their labels
Below is the the instruction that describes the task: ### Input: ax : axes.Axes The axes to add artists too stacked_data : array or Mapping A (N, M) shaped array. The first dimension will be iterated over to compute histograms row-wise sty_cycle : Cycler or operable of dict Style to apply to each set bottoms : array, optional The initial positions of the bottoms, defaults to 0 hist_func : callable, optional Must have signature `bin_vals, bin_edges = f(data)`. `bin_edges` expected to be one longer than `bin_vals` labels : list of str, optional The label for each set. If not given and stacked data is an array defaults to 'default set {n}' If stacked_data is a mapping, and labels is None, default to the keys (which may come out in a random order). If stacked_data is a mapping and labels is given then only the columns listed by be plotted. plot_func : callable, optional Function to call to draw the histogram must have signature: ret = plot_func(ax, edges, top, bottoms=bottoms, label=label, **kwargs) plot_kwargs : dict, optional Any extra kwargs to pass through to the plotting function. This will be the same for all calls to the plotting function and will over-ride the values in cycle. Returns ------- arts : dict Dictionary of artists keyed on their labels ### Response: def stack_hist(ax, stacked_data, sty_cycle, bottoms=None, hist_func=None, labels=None, plot_func=None, plot_kwargs=None): """ ax : axes.Axes The axes to add artists too stacked_data : array or Mapping A (N, M) shaped array. The first dimension will be iterated over to compute histograms row-wise sty_cycle : Cycler or operable of dict Style to apply to each set bottoms : array, optional The initial positions of the bottoms, defaults to 0 hist_func : callable, optional Must have signature `bin_vals, bin_edges = f(data)`. `bin_edges` expected to be one longer than `bin_vals` labels : list of str, optional The label for each set. If not given and stacked data is an array defaults to 'default set {n}' If stacked_data is a mapping, and labels is None, default to the keys (which may come out in a random order). If stacked_data is a mapping and labels is given then only the columns listed by be plotted. plot_func : callable, optional Function to call to draw the histogram must have signature: ret = plot_func(ax, edges, top, bottoms=bottoms, label=label, **kwargs) plot_kwargs : dict, optional Any extra kwargs to pass through to the plotting function. This will be the same for all calls to the plotting function and will over-ride the values in cycle. Returns ------- arts : dict Dictionary of artists keyed on their labels """ # deal with default binning function if hist_func is None: hist_func = np.histogram # deal with default plotting function if plot_func is None: plot_func = filled_hist # deal with default if plot_kwargs is None: plot_kwargs = {} print(plot_kwargs) try: l_keys = stacked_data.keys() label_data = True if labels is None: labels = l_keys except AttributeError: label_data = False if labels is None: labels = itertools.repeat(None) if label_data: loop_iter = enumerate((stacked_data[lab], lab, s) for lab, s in zip(labels, sty_cycle)) else: loop_iter = enumerate(zip(stacked_data, labels, sty_cycle)) arts = {} for j, (data, label, sty) in loop_iter: if label is None: label = 'dflt set {n}'.format(n=j) label = sty.pop('label', label) vals, edges = hist_func(data) if bottoms is None: bottoms = np.zeros_like(vals) top = bottoms + vals print(sty) sty.update(plot_kwargs) print(sty) ret = plot_func(ax, edges, top, bottoms=bottoms, label=label, **sty) bottoms = top arts[label] = ret ax.legend(fontsize=10) return arts
def sampling_params(self): """Returns the sampling parameters. If ``sampling_transforms`` is None, this is the same as the ``variable_params``. """ if self.sampling_transforms is None: sampling_params = self.variable_params else: sampling_params = self.sampling_transforms.sampling_params return sampling_params
Returns the sampling parameters. If ``sampling_transforms`` is None, this is the same as the ``variable_params``.
Below is the the instruction that describes the task: ### Input: Returns the sampling parameters. If ``sampling_transforms`` is None, this is the same as the ``variable_params``. ### Response: def sampling_params(self): """Returns the sampling parameters. If ``sampling_transforms`` is None, this is the same as the ``variable_params``. """ if self.sampling_transforms is None: sampling_params = self.variable_params else: sampling_params = self.sampling_transforms.sampling_params return sampling_params
def load(self): """ We load the data from the key itself instead of fetching from some external data store. Opposite of _get_session_key(), raises BadSignature if signature fails. """ session_data = {} try: session_data = decode(self.session_key, settings.DJAODJIN_SECRET_KEY) self._session_key_data.update(session_data) LOGGER.debug("session data (from proxy): %s", session_data) # We have been able to decode the session data, let's # create Users and session keys expected by Django # contrib.auth backend. if 'username' in session_data: user = authenticate( request=session_data, remote_user=session_data['username']) if not user: raise ValueError("Cannot authenticate user.") session_data[SESSION_KEY] = user.id session_data[BACKEND_SESSION_KEY] = user.backend session_data[HASH_SESSION_KEY] = user.get_session_auth_hash() if self._local: session_data_local = self._local.load() LOGGER.debug("session data (local): %s", session_data_local) session_data.update(session_data_local) except Exception as err: #pylint:disable=broad-except LOGGER.debug("error: while loading session, %s", err) return {} return session_data
We load the data from the key itself instead of fetching from some external data store. Opposite of _get_session_key(), raises BadSignature if signature fails.
Below is the the instruction that describes the task: ### Input: We load the data from the key itself instead of fetching from some external data store. Opposite of _get_session_key(), raises BadSignature if signature fails. ### Response: def load(self): """ We load the data from the key itself instead of fetching from some external data store. Opposite of _get_session_key(), raises BadSignature if signature fails. """ session_data = {} try: session_data = decode(self.session_key, settings.DJAODJIN_SECRET_KEY) self._session_key_data.update(session_data) LOGGER.debug("session data (from proxy): %s", session_data) # We have been able to decode the session data, let's # create Users and session keys expected by Django # contrib.auth backend. if 'username' in session_data: user = authenticate( request=session_data, remote_user=session_data['username']) if not user: raise ValueError("Cannot authenticate user.") session_data[SESSION_KEY] = user.id session_data[BACKEND_SESSION_KEY] = user.backend session_data[HASH_SESSION_KEY] = user.get_session_auth_hash() if self._local: session_data_local = self._local.load() LOGGER.debug("session data (local): %s", session_data_local) session_data.update(session_data_local) except Exception as err: #pylint:disable=broad-except LOGGER.debug("error: while loading session, %s", err) return {} return session_data
def dispatch_to_index_op(op, left, right, index_class): """ Wrap Series left in the given index_class to delegate the operation op to the index implementation. DatetimeIndex and TimedeltaIndex perform type checking, timezone handling, overflow checks, etc. Parameters ---------- op : binary operator (operator.add, operator.sub, ...) left : Series right : object index_class : DatetimeIndex or TimedeltaIndex Returns ------- result : object, usually DatetimeIndex, TimedeltaIndex, or Series """ left_idx = index_class(left) # avoid accidentally allowing integer add/sub. For datetime64[tz] dtypes, # left_idx may inherit a freq from a cached DatetimeIndex. # See discussion in GH#19147. if getattr(left_idx, 'freq', None) is not None: left_idx = left_idx._shallow_copy(freq=None) try: result = op(left_idx, right) except NullFrequencyError: # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError # on add/sub of integers (or int-like). We re-raise as a TypeError. raise TypeError('incompatible type for a datetime/timedelta ' 'operation [{name}]'.format(name=op.__name__)) return result
Wrap Series left in the given index_class to delegate the operation op to the index implementation. DatetimeIndex and TimedeltaIndex perform type checking, timezone handling, overflow checks, etc. Parameters ---------- op : binary operator (operator.add, operator.sub, ...) left : Series right : object index_class : DatetimeIndex or TimedeltaIndex Returns ------- result : object, usually DatetimeIndex, TimedeltaIndex, or Series
Below is the the instruction that describes the task: ### Input: Wrap Series left in the given index_class to delegate the operation op to the index implementation. DatetimeIndex and TimedeltaIndex perform type checking, timezone handling, overflow checks, etc. Parameters ---------- op : binary operator (operator.add, operator.sub, ...) left : Series right : object index_class : DatetimeIndex or TimedeltaIndex Returns ------- result : object, usually DatetimeIndex, TimedeltaIndex, or Series ### Response: def dispatch_to_index_op(op, left, right, index_class): """ Wrap Series left in the given index_class to delegate the operation op to the index implementation. DatetimeIndex and TimedeltaIndex perform type checking, timezone handling, overflow checks, etc. Parameters ---------- op : binary operator (operator.add, operator.sub, ...) left : Series right : object index_class : DatetimeIndex or TimedeltaIndex Returns ------- result : object, usually DatetimeIndex, TimedeltaIndex, or Series """ left_idx = index_class(left) # avoid accidentally allowing integer add/sub. For datetime64[tz] dtypes, # left_idx may inherit a freq from a cached DatetimeIndex. # See discussion in GH#19147. if getattr(left_idx, 'freq', None) is not None: left_idx = left_idx._shallow_copy(freq=None) try: result = op(left_idx, right) except NullFrequencyError: # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError # on add/sub of integers (or int-like). We re-raise as a TypeError. raise TypeError('incompatible type for a datetime/timedelta ' 'operation [{name}]'.format(name=op.__name__)) return result
def add_gauge(self, gauge): """Add `gauge` to the registry. Raises a `ValueError` if another gauge with the same name already exists in the registry. :type gauge: class:`LongGauge`, class:`DoubleGauge`, :class:`opencensus.metrics.export.cumulative.LongCumulative`, :class:`opencensus.metrics.export.cumulative.DoubleCumulative`, :class:`DerivedLongGauge`, :class:`DerivedDoubleGauge` :class:`opencensus.metrics.export.cumulative.DerivedLongCumulative`, or :class:`opencensus.metrics.export.cumulative.DerivedDoubleCumulative` :param gauge: The gauge to add to the registry. """ if gauge is None: raise ValueError name = gauge.descriptor.name with self._gauges_lock: if name in self.gauges: raise ValueError( 'Another gauge named "{}" is already registered' .format(name)) self.gauges[name] = gauge
Add `gauge` to the registry. Raises a `ValueError` if another gauge with the same name already exists in the registry. :type gauge: class:`LongGauge`, class:`DoubleGauge`, :class:`opencensus.metrics.export.cumulative.LongCumulative`, :class:`opencensus.metrics.export.cumulative.DoubleCumulative`, :class:`DerivedLongGauge`, :class:`DerivedDoubleGauge` :class:`opencensus.metrics.export.cumulative.DerivedLongCumulative`, or :class:`opencensus.metrics.export.cumulative.DerivedDoubleCumulative` :param gauge: The gauge to add to the registry.
Below is the the instruction that describes the task: ### Input: Add `gauge` to the registry. Raises a `ValueError` if another gauge with the same name already exists in the registry. :type gauge: class:`LongGauge`, class:`DoubleGauge`, :class:`opencensus.metrics.export.cumulative.LongCumulative`, :class:`opencensus.metrics.export.cumulative.DoubleCumulative`, :class:`DerivedLongGauge`, :class:`DerivedDoubleGauge` :class:`opencensus.metrics.export.cumulative.DerivedLongCumulative`, or :class:`opencensus.metrics.export.cumulative.DerivedDoubleCumulative` :param gauge: The gauge to add to the registry. ### Response: def add_gauge(self, gauge): """Add `gauge` to the registry. Raises a `ValueError` if another gauge with the same name already exists in the registry. :type gauge: class:`LongGauge`, class:`DoubleGauge`, :class:`opencensus.metrics.export.cumulative.LongCumulative`, :class:`opencensus.metrics.export.cumulative.DoubleCumulative`, :class:`DerivedLongGauge`, :class:`DerivedDoubleGauge` :class:`opencensus.metrics.export.cumulative.DerivedLongCumulative`, or :class:`opencensus.metrics.export.cumulative.DerivedDoubleCumulative` :param gauge: The gauge to add to the registry. """ if gauge is None: raise ValueError name = gauge.descriptor.name with self._gauges_lock: if name in self.gauges: raise ValueError( 'Another gauge named "{}" is already registered' .format(name)) self.gauges[name] = gauge
def _parameterize_string(raw): """Substitute placeholders in a string using CloudFormation references Args: raw (`str`): String to be processed. Byte strings are not supported; decode them before passing them to this function. Returns: `str` | :class:`troposphere.GenericHelperFn`: An expression with placeholders from the input replaced, suitable to be passed to Troposphere to be included in CloudFormation template. This will be the input string without modification if no substitutions are found, and a composition of CloudFormation calls otherwise. """ parts = [] s_index = 0 for match in _PARAMETER_PATTERN.finditer(raw): parts.append(raw[s_index:match.start()]) parts.append({u"Ref": match.group(1)}) s_index = match.end() if not parts: return GenericHelperFn(raw) parts.append(raw[s_index:]) return GenericHelperFn({u"Fn::Join": [u"", parts]})
Substitute placeholders in a string using CloudFormation references Args: raw (`str`): String to be processed. Byte strings are not supported; decode them before passing them to this function. Returns: `str` | :class:`troposphere.GenericHelperFn`: An expression with placeholders from the input replaced, suitable to be passed to Troposphere to be included in CloudFormation template. This will be the input string without modification if no substitutions are found, and a composition of CloudFormation calls otherwise.
Below is the the instruction that describes the task: ### Input: Substitute placeholders in a string using CloudFormation references Args: raw (`str`): String to be processed. Byte strings are not supported; decode them before passing them to this function. Returns: `str` | :class:`troposphere.GenericHelperFn`: An expression with placeholders from the input replaced, suitable to be passed to Troposphere to be included in CloudFormation template. This will be the input string without modification if no substitutions are found, and a composition of CloudFormation calls otherwise. ### Response: def _parameterize_string(raw): """Substitute placeholders in a string using CloudFormation references Args: raw (`str`): String to be processed. Byte strings are not supported; decode them before passing them to this function. Returns: `str` | :class:`troposphere.GenericHelperFn`: An expression with placeholders from the input replaced, suitable to be passed to Troposphere to be included in CloudFormation template. This will be the input string without modification if no substitutions are found, and a composition of CloudFormation calls otherwise. """ parts = [] s_index = 0 for match in _PARAMETER_PATTERN.finditer(raw): parts.append(raw[s_index:match.start()]) parts.append({u"Ref": match.group(1)}) s_index = match.end() if not parts: return GenericHelperFn(raw) parts.append(raw[s_index:]) return GenericHelperFn({u"Fn::Join": [u"", parts]})
def handle_logout_request(self, request, name_id, binding, sign=False, sign_alg=None, relay_state=""): """ Deal with a LogoutRequest :param request: The request as text string :param name_id: The id of the current user :param binding: Which binding the message came in over :param sign: Whether the response will be signed or not :return: Keyword arguments which can be used to send the response what's returned follow different patterns for different bindings. If the binding is BINDIND_SOAP, what is returned looks like this:: { "data": <the SOAP enveloped response> "url": "", 'headers': [('content-type', 'application/soap+xml')] 'method': "POST } """ logger.info("logout request: %s", request) _req = self._parse_request(request, LogoutRequest, "single_logout_service", binding) if _req.message.name_id == name_id: try: if self.local_logout(name_id): status = success_status_factory() else: status = status_message_factory("Server error", STATUS_REQUEST_DENIED) except KeyError: status = status_message_factory("Server error", STATUS_REQUEST_DENIED) else: status = status_message_factory("Wrong user", STATUS_UNKNOWN_PRINCIPAL) if binding == BINDING_SOAP: response_bindings = [BINDING_SOAP] elif binding == BINDING_HTTP_POST or BINDING_HTTP_REDIRECT: response_bindings = [BINDING_HTTP_POST, BINDING_HTTP_REDIRECT] else: response_bindings = self.config.preferred_binding[ "single_logout_service"] response = self.create_logout_response(_req.message, response_bindings, status, sign, sign_alg=sign_alg) rinfo = self.response_args(_req.message, response_bindings) return self.apply_binding(rinfo["binding"], response, rinfo["destination"], relay_state, response=True, sign=sign)
Deal with a LogoutRequest :param request: The request as text string :param name_id: The id of the current user :param binding: Which binding the message came in over :param sign: Whether the response will be signed or not :return: Keyword arguments which can be used to send the response what's returned follow different patterns for different bindings. If the binding is BINDIND_SOAP, what is returned looks like this:: { "data": <the SOAP enveloped response> "url": "", 'headers': [('content-type', 'application/soap+xml')] 'method': "POST }
Below is the the instruction that describes the task: ### Input: Deal with a LogoutRequest :param request: The request as text string :param name_id: The id of the current user :param binding: Which binding the message came in over :param sign: Whether the response will be signed or not :return: Keyword arguments which can be used to send the response what's returned follow different patterns for different bindings. If the binding is BINDIND_SOAP, what is returned looks like this:: { "data": <the SOAP enveloped response> "url": "", 'headers': [('content-type', 'application/soap+xml')] 'method': "POST } ### Response: def handle_logout_request(self, request, name_id, binding, sign=False, sign_alg=None, relay_state=""): """ Deal with a LogoutRequest :param request: The request as text string :param name_id: The id of the current user :param binding: Which binding the message came in over :param sign: Whether the response will be signed or not :return: Keyword arguments which can be used to send the response what's returned follow different patterns for different bindings. If the binding is BINDIND_SOAP, what is returned looks like this:: { "data": <the SOAP enveloped response> "url": "", 'headers': [('content-type', 'application/soap+xml')] 'method': "POST } """ logger.info("logout request: %s", request) _req = self._parse_request(request, LogoutRequest, "single_logout_service", binding) if _req.message.name_id == name_id: try: if self.local_logout(name_id): status = success_status_factory() else: status = status_message_factory("Server error", STATUS_REQUEST_DENIED) except KeyError: status = status_message_factory("Server error", STATUS_REQUEST_DENIED) else: status = status_message_factory("Wrong user", STATUS_UNKNOWN_PRINCIPAL) if binding == BINDING_SOAP: response_bindings = [BINDING_SOAP] elif binding == BINDING_HTTP_POST or BINDING_HTTP_REDIRECT: response_bindings = [BINDING_HTTP_POST, BINDING_HTTP_REDIRECT] else: response_bindings = self.config.preferred_binding[ "single_logout_service"] response = self.create_logout_response(_req.message, response_bindings, status, sign, sign_alg=sign_alg) rinfo = self.response_args(_req.message, response_bindings) return self.apply_binding(rinfo["binding"], response, rinfo["destination"], relay_state, response=True, sign=sign)
def _frontend_url(): ''' Tries to guess the url of zabbix frontend. .. versionadded:: 2016.3.0 ''' hostname = socket.gethostname() frontend_url = 'http://' + hostname + '/zabbix/api_jsonrpc.php' try: try: response = salt.utils.http.query(frontend_url) error = response['error'] except HTTPError as http_e: error = six.text_type(http_e) if error.find('412: Precondition Failed'): return frontend_url else: raise KeyError except (ValueError, KeyError): return False
Tries to guess the url of zabbix frontend. .. versionadded:: 2016.3.0
Below is the the instruction that describes the task: ### Input: Tries to guess the url of zabbix frontend. .. versionadded:: 2016.3.0 ### Response: def _frontend_url(): ''' Tries to guess the url of zabbix frontend. .. versionadded:: 2016.3.0 ''' hostname = socket.gethostname() frontend_url = 'http://' + hostname + '/zabbix/api_jsonrpc.php' try: try: response = salt.utils.http.query(frontend_url) error = response['error'] except HTTPError as http_e: error = six.text_type(http_e) if error.find('412: Precondition Failed'): return frontend_url else: raise KeyError except (ValueError, KeyError): return False
def cmd_generate(args): """Generate images. Parameters ---------- args : `argparse.Namespace` Command arguments. """ check_output_format(args.output, args.count) markov = load(MarkovImage, args.state, args) if args.size is None: if markov.scanner.resize is None: print('Unknown output image size', file=stderr) exit(1) width, height = markov.scanner.resize else: width, height = args.size if args.level is None: scale = markov.scanner.min_size else: scale = reduce( lambda x, y: x * y, islice(markov.scanner.level_scale, 0, args.level - 1), 1 ) width, height = width // scale, height // scale markov.scanner.traversal[0].show_progress = args.progress for fname in outfiles(markov, args.output, args.count, args.progress): img = markov( width, height, state_size=args.state_size, levels=args.level ) save_image(img, fname)
Generate images. Parameters ---------- args : `argparse.Namespace` Command arguments.
Below is the the instruction that describes the task: ### Input: Generate images. Parameters ---------- args : `argparse.Namespace` Command arguments. ### Response: def cmd_generate(args): """Generate images. Parameters ---------- args : `argparse.Namespace` Command arguments. """ check_output_format(args.output, args.count) markov = load(MarkovImage, args.state, args) if args.size is None: if markov.scanner.resize is None: print('Unknown output image size', file=stderr) exit(1) width, height = markov.scanner.resize else: width, height = args.size if args.level is None: scale = markov.scanner.min_size else: scale = reduce( lambda x, y: x * y, islice(markov.scanner.level_scale, 0, args.level - 1), 1 ) width, height = width // scale, height // scale markov.scanner.traversal[0].show_progress = args.progress for fname in outfiles(markov, args.output, args.count, args.progress): img = markov( width, height, state_size=args.state_size, levels=args.level ) save_image(img, fname)
def format_pkg_list(packages, versions_as_list, attr): ''' Formats packages according to parameters for list_pkgs. ''' ret = copy.deepcopy(packages) if attr: requested_attr = {'epoch', 'version', 'release', 'arch', 'install_date', 'install_date_time_t'} if attr != 'all': requested_attr &= set(attr + ['version']) for name in ret: versions = [] for all_attr in ret[name]: filtered_attr = {} for key in requested_attr: if all_attr[key]: filtered_attr[key] = all_attr[key] versions.append(filtered_attr) ret[name] = versions return ret for name in ret: ret[name] = [format_version(d['epoch'], d['version'], d['release']) for d in ret[name]] if not versions_as_list: stringify(ret) return ret
Formats packages according to parameters for list_pkgs.
Below is the the instruction that describes the task: ### Input: Formats packages according to parameters for list_pkgs. ### Response: def format_pkg_list(packages, versions_as_list, attr): ''' Formats packages according to parameters for list_pkgs. ''' ret = copy.deepcopy(packages) if attr: requested_attr = {'epoch', 'version', 'release', 'arch', 'install_date', 'install_date_time_t'} if attr != 'all': requested_attr &= set(attr + ['version']) for name in ret: versions = [] for all_attr in ret[name]: filtered_attr = {} for key in requested_attr: if all_attr[key]: filtered_attr[key] = all_attr[key] versions.append(filtered_attr) ret[name] = versions return ret for name in ret: ret[name] = [format_version(d['epoch'], d['version'], d['release']) for d in ret[name]] if not versions_as_list: stringify(ret) return ret
def send(self, data, sample_rate=1): """ Squirt the metrics over UDP """ if self.prefix: data = dict((".".join((self.prefix, stat)), value) for stat, value in data.items()) if sample_rate < 1: if random.random() > sample_rate: return sampled_data = dict((stat, "%s|@%s" % (value, sample_rate)) for stat, value in data.items()) else: sampled_data = data try: [self.udp_sock.sendto(bytes(bytearray("%s:%s" % (stat, value), "utf-8")), self.addr) for stat, value in sampled_data.items()] except: self.log.exception("unexpected error")
Squirt the metrics over UDP
Below is the the instruction that describes the task: ### Input: Squirt the metrics over UDP ### Response: def send(self, data, sample_rate=1): """ Squirt the metrics over UDP """ if self.prefix: data = dict((".".join((self.prefix, stat)), value) for stat, value in data.items()) if sample_rate < 1: if random.random() > sample_rate: return sampled_data = dict((stat, "%s|@%s" % (value, sample_rate)) for stat, value in data.items()) else: sampled_data = data try: [self.udp_sock.sendto(bytes(bytearray("%s:%s" % (stat, value), "utf-8")), self.addr) for stat, value in sampled_data.items()] except: self.log.exception("unexpected error")
def setupFeatures(self): """ Make the features source. **This should not be called externally.** Subclasses may override this method to handle the file creation in a different way if desired. """ if self.featureWriters: featureFile = parseLayoutFeatures(self.ufo) for writer in self.featureWriters: writer.write(self.ufo, featureFile, compiler=self) # stringify AST to get correct line numbers in error messages self.features = featureFile.asFea() else: # no featureWriters, simply read existing features' text self.features = tounicode(self.ufo.features.text or "", "utf-8")
Make the features source. **This should not be called externally.** Subclasses may override this method to handle the file creation in a different way if desired.
Below is the the instruction that describes the task: ### Input: Make the features source. **This should not be called externally.** Subclasses may override this method to handle the file creation in a different way if desired. ### Response: def setupFeatures(self): """ Make the features source. **This should not be called externally.** Subclasses may override this method to handle the file creation in a different way if desired. """ if self.featureWriters: featureFile = parseLayoutFeatures(self.ufo) for writer in self.featureWriters: writer.write(self.ufo, featureFile, compiler=self) # stringify AST to get correct line numbers in error messages self.features = featureFile.asFea() else: # no featureWriters, simply read existing features' text self.features = tounicode(self.ufo.features.text or "", "utf-8")
def line_oriented(cls, line_oriented_options, console): """Given Goal.Options and a Console, yields functions for writing to stdout and stderr, respectively. The passed options instance will generally be the `Goal.Options` of a `LineOriented` `Goal`. """ if type(line_oriented_options) != cls.Options: raise AssertionError( 'Expected Options for `{}`, got: {}'.format(cls.__name__, line_oriented_options)) output_file = line_oriented_options.values.output_file sep = line_oriented_options.values.sep.encode('utf-8').decode('unicode_escape') stdout, stderr = console.stdout, console.stderr if output_file: stdout = open(output_file, 'w') try: print_stdout = lambda msg: print(msg, file=stdout, end=sep) print_stderr = lambda msg: print(msg, file=stderr) yield print_stdout, print_stderr finally: if output_file: stdout.close() else: stdout.flush() stderr.flush()
Given Goal.Options and a Console, yields functions for writing to stdout and stderr, respectively. The passed options instance will generally be the `Goal.Options` of a `LineOriented` `Goal`.
Below is the the instruction that describes the task: ### Input: Given Goal.Options and a Console, yields functions for writing to stdout and stderr, respectively. The passed options instance will generally be the `Goal.Options` of a `LineOriented` `Goal`. ### Response: def line_oriented(cls, line_oriented_options, console): """Given Goal.Options and a Console, yields functions for writing to stdout and stderr, respectively. The passed options instance will generally be the `Goal.Options` of a `LineOriented` `Goal`. """ if type(line_oriented_options) != cls.Options: raise AssertionError( 'Expected Options for `{}`, got: {}'.format(cls.__name__, line_oriented_options)) output_file = line_oriented_options.values.output_file sep = line_oriented_options.values.sep.encode('utf-8').decode('unicode_escape') stdout, stderr = console.stdout, console.stderr if output_file: stdout = open(output_file, 'w') try: print_stdout = lambda msg: print(msg, file=stdout, end=sep) print_stderr = lambda msg: print(msg, file=stderr) yield print_stdout, print_stderr finally: if output_file: stdout.close() else: stdout.flush() stderr.flush()
def add(self, fact): """Create a VALID token and send it to all children.""" token = Token.valid(fact) MATCHER.debug("<BusNode> added %r", token) for child in self.children: child.callback(token)
Create a VALID token and send it to all children.
Below is the the instruction that describes the task: ### Input: Create a VALID token and send it to all children. ### Response: def add(self, fact): """Create a VALID token and send it to all children.""" token = Token.valid(fact) MATCHER.debug("<BusNode> added %r", token) for child in self.children: child.callback(token)
def friendly_type_name(raw_type: typing.Type) -> str: """ Returns a user-friendly type name :param raw_type: raw type (str, int, ...) :return: user friendly type as string """ try: return _TRANSLATE_TYPE[raw_type] except KeyError: LOGGER.error('unmanaged value type: %s', raw_type) return str(raw_type)
Returns a user-friendly type name :param raw_type: raw type (str, int, ...) :return: user friendly type as string
Below is the the instruction that describes the task: ### Input: Returns a user-friendly type name :param raw_type: raw type (str, int, ...) :return: user friendly type as string ### Response: def friendly_type_name(raw_type: typing.Type) -> str: """ Returns a user-friendly type name :param raw_type: raw type (str, int, ...) :return: user friendly type as string """ try: return _TRANSLATE_TYPE[raw_type] except KeyError: LOGGER.error('unmanaged value type: %s', raw_type) return str(raw_type)
def restore(self): """ Set the values of whatever attributes are recoverable from the pickle file. Populate the attributes (the __dict__) of the EgStore object from the attributes (the __dict__) of the pickled object. If the pickled object has attributes that have been initialized in the EgStore object, then those attributes of the EgStore object will be replaced by the values of the corresponding attributes in the pickled object. If the pickled object is missing some attributes that have been initialized in the EgStore object, then those attributes of the EgStore object will retain the values that they were initialized with. If the pickled object has some attributes that were not initialized in the EgStore object, then those attributes will be ignored. IN SUMMARY: After the recover() operation, the EgStore object will have all, and only, the attributes that it had when it was initialized. Where possible, those attributes will have values recovered from the pickled object. """ if not os.path.exists(self.filename): return self if not os.path.isfile(self.filename): return self try: with open(self.filename, "rb") as f: unpickledObject = pickle.load(f) for key in list(self.__dict__.keys()): default = self.__dict__[key] self.__dict__[key] = unpickledObject.__dict__.get(key, default) except: pass return self
Set the values of whatever attributes are recoverable from the pickle file. Populate the attributes (the __dict__) of the EgStore object from the attributes (the __dict__) of the pickled object. If the pickled object has attributes that have been initialized in the EgStore object, then those attributes of the EgStore object will be replaced by the values of the corresponding attributes in the pickled object. If the pickled object is missing some attributes that have been initialized in the EgStore object, then those attributes of the EgStore object will retain the values that they were initialized with. If the pickled object has some attributes that were not initialized in the EgStore object, then those attributes will be ignored. IN SUMMARY: After the recover() operation, the EgStore object will have all, and only, the attributes that it had when it was initialized. Where possible, those attributes will have values recovered from the pickled object.
Below is the the instruction that describes the task: ### Input: Set the values of whatever attributes are recoverable from the pickle file. Populate the attributes (the __dict__) of the EgStore object from the attributes (the __dict__) of the pickled object. If the pickled object has attributes that have been initialized in the EgStore object, then those attributes of the EgStore object will be replaced by the values of the corresponding attributes in the pickled object. If the pickled object is missing some attributes that have been initialized in the EgStore object, then those attributes of the EgStore object will retain the values that they were initialized with. If the pickled object has some attributes that were not initialized in the EgStore object, then those attributes will be ignored. IN SUMMARY: After the recover() operation, the EgStore object will have all, and only, the attributes that it had when it was initialized. Where possible, those attributes will have values recovered from the pickled object. ### Response: def restore(self): """ Set the values of whatever attributes are recoverable from the pickle file. Populate the attributes (the __dict__) of the EgStore object from the attributes (the __dict__) of the pickled object. If the pickled object has attributes that have been initialized in the EgStore object, then those attributes of the EgStore object will be replaced by the values of the corresponding attributes in the pickled object. If the pickled object is missing some attributes that have been initialized in the EgStore object, then those attributes of the EgStore object will retain the values that they were initialized with. If the pickled object has some attributes that were not initialized in the EgStore object, then those attributes will be ignored. IN SUMMARY: After the recover() operation, the EgStore object will have all, and only, the attributes that it had when it was initialized. Where possible, those attributes will have values recovered from the pickled object. """ if not os.path.exists(self.filename): return self if not os.path.isfile(self.filename): return self try: with open(self.filename, "rb") as f: unpickledObject = pickle.load(f) for key in list(self.__dict__.keys()): default = self.__dict__[key] self.__dict__[key] = unpickledObject.__dict__.get(key, default) except: pass return self
def get_version(version=None): """Returns a tuple of the django version. If version argument is non-empty, then checks for correctness of the tuple provided. """ if version[4] > 0: # 0.2.1-alpha.1 return "%s.%s.%s-%s.%s" % (version[0], version[1], version[2], version[3], version[4]) elif version[3] != '': # 0.2.1-alpha return "%s.%s.%s-%s" % (version[0], version[1], version[2], version[3]) elif version[2] > 0: # 0.2.1 return "%s.%s.%s" % (version[0], version[1], version[2]) else: # 0.2 return "%s.%s" % (version[0], version[1])
Returns a tuple of the django version. If version argument is non-empty, then checks for correctness of the tuple provided.
Below is the the instruction that describes the task: ### Input: Returns a tuple of the django version. If version argument is non-empty, then checks for correctness of the tuple provided. ### Response: def get_version(version=None): """Returns a tuple of the django version. If version argument is non-empty, then checks for correctness of the tuple provided. """ if version[4] > 0: # 0.2.1-alpha.1 return "%s.%s.%s-%s.%s" % (version[0], version[1], version[2], version[3], version[4]) elif version[3] != '': # 0.2.1-alpha return "%s.%s.%s-%s" % (version[0], version[1], version[2], version[3]) elif version[2] > 0: # 0.2.1 return "%s.%s.%s" % (version[0], version[1], version[2]) else: # 0.2 return "%s.%s" % (version[0], version[1])
def items(self): """ Request URL and parse response. Yield a ``Torrent`` for every torrent on page. If in multipage mode, Torrents from next pages are automatically chained. """ if self._multipage: while True: # Pool for more torrents items = super(Paginated, self).items() # Stop if no more torrents first = next(items, None) if first is None: raise StopIteration() # Yield them if not else: yield first for item in items: yield item # Go to the next page self.next() else: for item in super(Paginated, self).items(): yield item
Request URL and parse response. Yield a ``Torrent`` for every torrent on page. If in multipage mode, Torrents from next pages are automatically chained.
Below is the the instruction that describes the task: ### Input: Request URL and parse response. Yield a ``Torrent`` for every torrent on page. If in multipage mode, Torrents from next pages are automatically chained. ### Response: def items(self): """ Request URL and parse response. Yield a ``Torrent`` for every torrent on page. If in multipage mode, Torrents from next pages are automatically chained. """ if self._multipage: while True: # Pool for more torrents items = super(Paginated, self).items() # Stop if no more torrents first = next(items, None) if first is None: raise StopIteration() # Yield them if not else: yield first for item in items: yield item # Go to the next page self.next() else: for item in super(Paginated, self).items(): yield item
def copy_wildcard(src_folder, dst_folder, glob): """copy """ create_dir(dst_folder) for sname in iglob(os.path.join(src_folder, glob)): rname = os.path.relpath(sname, src_folder) dname = os.path.join(dst_folder, rname) create_dir(dname) shutil.copy(sname, dname)
copy
Below is the the instruction that describes the task: ### Input: copy ### Response: def copy_wildcard(src_folder, dst_folder, glob): """copy """ create_dir(dst_folder) for sname in iglob(os.path.join(src_folder, glob)): rname = os.path.relpath(sname, src_folder) dname = os.path.join(dst_folder, rname) create_dir(dname) shutil.copy(sname, dname)
def get_server(self, datacenter_id, server_id, depth=1): """ Retrieves a server by its ID. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param server_id: The unique ID of the server. :type server_id: ``str`` :param depth: The depth of the response data. :type depth: ``int`` """ response = self._perform_request( '/datacenters/%s/servers/%s?depth=%s' % ( datacenter_id, server_id, str(depth))) return response
Retrieves a server by its ID. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param server_id: The unique ID of the server. :type server_id: ``str`` :param depth: The depth of the response data. :type depth: ``int``
Below is the the instruction that describes the task: ### Input: Retrieves a server by its ID. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param server_id: The unique ID of the server. :type server_id: ``str`` :param depth: The depth of the response data. :type depth: ``int`` ### Response: def get_server(self, datacenter_id, server_id, depth=1): """ Retrieves a server by its ID. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param server_id: The unique ID of the server. :type server_id: ``str`` :param depth: The depth of the response data. :type depth: ``int`` """ response = self._perform_request( '/datacenters/%s/servers/%s?depth=%s' % ( datacenter_id, server_id, str(depth))) return response
def resolve_workdir_path(cls, start_path=os.curdir): """ Look for an existing workdir in the given path, in a path/.lago dir, or in a .lago dir under any of it's parent directories Args: start_path (str): path to start the search from, if None passed, it will use the current dir Returns: str: path to the found prefix Raises: LagoUserException: if no prefix was found """ if start_path == 'auto': start_path = os.curdir cur_path = start_path LOGGER.debug( 'Checking if %s is a workdir', os.path.abspath(cur_path), ) if cls.is_workdir(cur_path): return os.path.abspath(cur_path) # now search for a .lago directory that's a workdir on any parent dir cur_path = os.path.join(start_path, '.lago') while not cls.is_workdir(cur_path): LOGGER.debug('%s is not a workdir', cur_path) cur_path = os.path.normpath( os.path.join(cur_path, '..', '..', '.lago') ) LOGGER.debug('Checking %s for a workdir', cur_path) if os.path.realpath(os.path.join(cur_path, '..')) == '/': # no workdir found - look workdirs up the current path + 1, # print informative message and exit. candidates = [] for path in os.listdir(os.curdir): if os.path.isdir(path): dirs = os.listdir(path) if 'current' in dirs: candidates.append( os.path.abspath(os.path.join(os.curdir, path)) ) elif '.lago' in dirs: candidates.append( os.path.abspath( os.path.join(os.curdir, path, '.lago') ) ) candidates = filter(Workdir.is_possible_workdir, candidates) for idx in range(len(candidates)): if os.path.split(candidates[idx])[1] == '.lago': candidates[idx] = os.path.dirname(candidates[idx]) msg = 'Unable to find workdir in {0}'.format( os.path.abspath(start_path) ) if candidates: msg += '\nFound possible workdirs in: {0}'.format( ', '.join(candidates) ) raise LagoUserException(msg) return os.path.abspath(cur_path)
Look for an existing workdir in the given path, in a path/.lago dir, or in a .lago dir under any of it's parent directories Args: start_path (str): path to start the search from, if None passed, it will use the current dir Returns: str: path to the found prefix Raises: LagoUserException: if no prefix was found
Below is the the instruction that describes the task: ### Input: Look for an existing workdir in the given path, in a path/.lago dir, or in a .lago dir under any of it's parent directories Args: start_path (str): path to start the search from, if None passed, it will use the current dir Returns: str: path to the found prefix Raises: LagoUserException: if no prefix was found ### Response: def resolve_workdir_path(cls, start_path=os.curdir): """ Look for an existing workdir in the given path, in a path/.lago dir, or in a .lago dir under any of it's parent directories Args: start_path (str): path to start the search from, if None passed, it will use the current dir Returns: str: path to the found prefix Raises: LagoUserException: if no prefix was found """ if start_path == 'auto': start_path = os.curdir cur_path = start_path LOGGER.debug( 'Checking if %s is a workdir', os.path.abspath(cur_path), ) if cls.is_workdir(cur_path): return os.path.abspath(cur_path) # now search for a .lago directory that's a workdir on any parent dir cur_path = os.path.join(start_path, '.lago') while not cls.is_workdir(cur_path): LOGGER.debug('%s is not a workdir', cur_path) cur_path = os.path.normpath( os.path.join(cur_path, '..', '..', '.lago') ) LOGGER.debug('Checking %s for a workdir', cur_path) if os.path.realpath(os.path.join(cur_path, '..')) == '/': # no workdir found - look workdirs up the current path + 1, # print informative message and exit. candidates = [] for path in os.listdir(os.curdir): if os.path.isdir(path): dirs = os.listdir(path) if 'current' in dirs: candidates.append( os.path.abspath(os.path.join(os.curdir, path)) ) elif '.lago' in dirs: candidates.append( os.path.abspath( os.path.join(os.curdir, path, '.lago') ) ) candidates = filter(Workdir.is_possible_workdir, candidates) for idx in range(len(candidates)): if os.path.split(candidates[idx])[1] == '.lago': candidates[idx] = os.path.dirname(candidates[idx]) msg = 'Unable to find workdir in {0}'.format( os.path.abspath(start_path) ) if candidates: msg += '\nFound possible workdirs in: {0}'.format( ', '.join(candidates) ) raise LagoUserException(msg) return os.path.abspath(cur_path)
def precursor_sequence(loci, reference): """Get sequence from genome""" region = "%s\t%s\t%s\t.\t.\t%s" % (loci[1], loci[2], loci[3], loci[4]) precursor = pybedtools.BedTool(str(region), from_string=True).sequence(fi=reference, s=True) return open(precursor.seqfn).read().split("\n")[1]
Get sequence from genome
Below is the the instruction that describes the task: ### Input: Get sequence from genome ### Response: def precursor_sequence(loci, reference): """Get sequence from genome""" region = "%s\t%s\t%s\t.\t.\t%s" % (loci[1], loci[2], loci[3], loci[4]) precursor = pybedtools.BedTool(str(region), from_string=True).sequence(fi=reference, s=True) return open(precursor.seqfn).read().split("\n")[1]
def id_generator(start=0): """ Generator for sequential numeric numbers. """ count = start while True: send_value = (yield count) if not send_value is None: if send_value < count: raise ValueError('Values from ID generator must increase ' 'monotonically (current value: %d; value ' 'sent to generator: %d).' % (count, send_value)) count = send_value else: count += 1
Generator for sequential numeric numbers.
Below is the the instruction that describes the task: ### Input: Generator for sequential numeric numbers. ### Response: def id_generator(start=0): """ Generator for sequential numeric numbers. """ count = start while True: send_value = (yield count) if not send_value is None: if send_value < count: raise ValueError('Values from ID generator must increase ' 'monotonically (current value: %d; value ' 'sent to generator: %d).' % (count, send_value)) count = send_value else: count += 1
def _handle_poll(self, relpath, params): """Handle poll requests for raw file contents.""" request = json.loads(params.get('q')[0]) ret = {} # request is a polling request for multiple files. For each file: # - id is some identifier assigned by the client, used to differentiate the results. # - path is the file to poll. # - pos is the last byte position in that file seen by the client. for poll in request: _id = poll.get('id', None) path = poll.get('path', None) pos = poll.get('pos', 0) if path: abspath = os.path.normpath(os.path.join(self._root, path)) if os.path.isfile(abspath): with open(abspath, 'rb') as infile: if pos: infile.seek(pos) content = infile.read() ret[_id] = content.decode("utf-8") content = json.dumps(ret).encode("utf-8") self._send_content(content, 'application/json')
Handle poll requests for raw file contents.
Below is the the instruction that describes the task: ### Input: Handle poll requests for raw file contents. ### Response: def _handle_poll(self, relpath, params): """Handle poll requests for raw file contents.""" request = json.loads(params.get('q')[0]) ret = {} # request is a polling request for multiple files. For each file: # - id is some identifier assigned by the client, used to differentiate the results. # - path is the file to poll. # - pos is the last byte position in that file seen by the client. for poll in request: _id = poll.get('id', None) path = poll.get('path', None) pos = poll.get('pos', 0) if path: abspath = os.path.normpath(os.path.join(self._root, path)) if os.path.isfile(abspath): with open(abspath, 'rb') as infile: if pos: infile.seek(pos) content = infile.read() ret[_id] = content.decode("utf-8") content = json.dumps(ret).encode("utf-8") self._send_content(content, 'application/json')
def run_count(bam_file, dexseq_gff, stranded, out_file, data): """ run dexseq_count on a BAM file """ assert file_exists(bam_file), "%s does not exist." % bam_file sort_order = bam._get_sort_order(bam_file, {}) assert sort_order, "Cannot determine sort order of %s." % bam_file strand_flag = _strand_flag(stranded) assert strand_flag, "%s is not a valid strandedness value." % stranded if not dexseq_gff: logger.info("No DEXSeq GFF file was found, skipping exon-level counting.") return None elif not file_exists(dexseq_gff): logger.info("%s was not found, so exon-level counting is being " "skipped." % dexseq_gff) return None dexseq_count = _dexseq_count_path() if not dexseq_count: logger.info("DEXseq is not installed, skipping exon-level counting.") return None if dd.get_aligner(data) == "bwa": logger.info("Can't use DEXSeq with bwa alignments, skipping exon-level counting.") return None sort_flag = "name" if sort_order == "queryname" else "pos" is_paired = bam.is_paired(bam_file) paired_flag = "yes" if is_paired else "no" bcbio_python = sys.executable if file_exists(out_file): return out_file cmd = ("{bcbio_python} {dexseq_count} -f bam -r {sort_flag} -p {paired_flag} " "-s {strand_flag} {dexseq_gff} {bam_file} {tx_out_file}") message = "Counting exon-level counts with %s and %s." % (bam_file, dexseq_gff) with file_transaction(data, out_file) as tx_out_file: do.run(cmd.format(**locals()), message) return out_file
run dexseq_count on a BAM file
Below is the the instruction that describes the task: ### Input: run dexseq_count on a BAM file ### Response: def run_count(bam_file, dexseq_gff, stranded, out_file, data): """ run dexseq_count on a BAM file """ assert file_exists(bam_file), "%s does not exist." % bam_file sort_order = bam._get_sort_order(bam_file, {}) assert sort_order, "Cannot determine sort order of %s." % bam_file strand_flag = _strand_flag(stranded) assert strand_flag, "%s is not a valid strandedness value." % stranded if not dexseq_gff: logger.info("No DEXSeq GFF file was found, skipping exon-level counting.") return None elif not file_exists(dexseq_gff): logger.info("%s was not found, so exon-level counting is being " "skipped." % dexseq_gff) return None dexseq_count = _dexseq_count_path() if not dexseq_count: logger.info("DEXseq is not installed, skipping exon-level counting.") return None if dd.get_aligner(data) == "bwa": logger.info("Can't use DEXSeq with bwa alignments, skipping exon-level counting.") return None sort_flag = "name" if sort_order == "queryname" else "pos" is_paired = bam.is_paired(bam_file) paired_flag = "yes" if is_paired else "no" bcbio_python = sys.executable if file_exists(out_file): return out_file cmd = ("{bcbio_python} {dexseq_count} -f bam -r {sort_flag} -p {paired_flag} " "-s {strand_flag} {dexseq_gff} {bam_file} {tx_out_file}") message = "Counting exon-level counts with %s and %s." % (bam_file, dexseq_gff) with file_transaction(data, out_file) as tx_out_file: do.run(cmd.format(**locals()), message) return out_file
def reject_insert(self, s_pappid, order_id, reason, redirect_url=None): """ 拒绝用户的开发票请求 详情请参考 https://mp.weixin.qq.com/wiki?id=mp1497082828_r1cI2 :param s_pappid: 开票平台在微信的标识号,商户需要找开票平台提供 :param order_id: 订单id,在商户内单笔开票请求的唯一识别号 :param reason: 拒绝原因 :param redirect_url: 跳转链接 """ return self._post( 'rejectinsert', data={ 's_pappid': s_pappid, 'order_id': order_id, 'reason': reason, 'url': redirect_url, }, )
拒绝用户的开发票请求 详情请参考 https://mp.weixin.qq.com/wiki?id=mp1497082828_r1cI2 :param s_pappid: 开票平台在微信的标识号,商户需要找开票平台提供 :param order_id: 订单id,在商户内单笔开票请求的唯一识别号 :param reason: 拒绝原因 :param redirect_url: 跳转链接
Below is the the instruction that describes the task: ### Input: 拒绝用户的开发票请求 详情请参考 https://mp.weixin.qq.com/wiki?id=mp1497082828_r1cI2 :param s_pappid: 开票平台在微信的标识号,商户需要找开票平台提供 :param order_id: 订单id,在商户内单笔开票请求的唯一识别号 :param reason: 拒绝原因 :param redirect_url: 跳转链接 ### Response: def reject_insert(self, s_pappid, order_id, reason, redirect_url=None): """ 拒绝用户的开发票请求 详情请参考 https://mp.weixin.qq.com/wiki?id=mp1497082828_r1cI2 :param s_pappid: 开票平台在微信的标识号,商户需要找开票平台提供 :param order_id: 订单id,在商户内单笔开票请求的唯一识别号 :param reason: 拒绝原因 :param redirect_url: 跳转链接 """ return self._post( 'rejectinsert', data={ 's_pappid': s_pappid, 'order_id': order_id, 'reason': reason, 'url': redirect_url, }, )
def get_subsites(self): """ Returns a list of subsites defined for this site :rtype: list[Site] """ url = self.build_url( self._endpoints.get('get_subsites').format(id=self.object_id)) response = self.con.get(url) if not response: return [] data = response.json() # Everything received from cloud must be passed as self._cloud_data_key return [self.__class__(parent=self, **{self._cloud_data_key: site}) for site in data.get('value', [])]
Returns a list of subsites defined for this site :rtype: list[Site]
Below is the the instruction that describes the task: ### Input: Returns a list of subsites defined for this site :rtype: list[Site] ### Response: def get_subsites(self): """ Returns a list of subsites defined for this site :rtype: list[Site] """ url = self.build_url( self._endpoints.get('get_subsites').format(id=self.object_id)) response = self.con.get(url) if not response: return [] data = response.json() # Everything received from cloud must be passed as self._cloud_data_key return [self.__class__(parent=self, **{self._cloud_data_key: site}) for site in data.get('value', [])]
def extract_extension(path): """ Reads a file path and returns the extension or None if the path contains no extension. :Parameters: path : str A filesystem path """ filename = os.path.basename(path) parts = filename.split(".") if len(parts) == 1: return filename, None else: return ".".join(parts[:-1]), parts[-1]
Reads a file path and returns the extension or None if the path contains no extension. :Parameters: path : str A filesystem path
Below is the the instruction that describes the task: ### Input: Reads a file path and returns the extension or None if the path contains no extension. :Parameters: path : str A filesystem path ### Response: def extract_extension(path): """ Reads a file path and returns the extension or None if the path contains no extension. :Parameters: path : str A filesystem path """ filename = os.path.basename(path) parts = filename.split(".") if len(parts) == 1: return filename, None else: return ".".join(parts[:-1]), parts[-1]
def update_current_time(loop): """Cache the current time, since it is needed at the end of every keep-alive request to update the request timeout time :param loop: :return: """ global current_time current_time = time() loop.call_later(1, partial(update_current_time, loop))
Cache the current time, since it is needed at the end of every keep-alive request to update the request timeout time :param loop: :return:
Below is the the instruction that describes the task: ### Input: Cache the current time, since it is needed at the end of every keep-alive request to update the request timeout time :param loop: :return: ### Response: def update_current_time(loop): """Cache the current time, since it is needed at the end of every keep-alive request to update the request timeout time :param loop: :return: """ global current_time current_time = time() loop.call_later(1, partial(update_current_time, loop))
def from_files(path_dir, dos_spin=1): """ get a BoltztrapAnalyzer object from a set of files Args: path_dir: directory where the boltztrap files are dos_spin: in DOS mode, set to 1 for spin up and -1 for spin down Returns: a BoltztrapAnalyzer object """ run_type, warning, efermi, gap, doping_levels = \ BoltztrapAnalyzer.parse_outputtrans(path_dir) vol = BoltztrapAnalyzer.parse_struct(path_dir) intrans = BoltztrapAnalyzer.parse_intrans(path_dir) if run_type == "BOLTZ": dos, pdos = BoltztrapAnalyzer.parse_transdos( path_dir, efermi, dos_spin=dos_spin, trim_dos=False) mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, mu_doping, \ seebeck_doping, cond_doping, kappa_doping, hall_doping, \ carrier_conc = BoltztrapAnalyzer. \ parse_cond_and_hall(path_dir, doping_levels) return BoltztrapAnalyzer( gap, mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, mu_doping, seebeck_doping, cond_doping, kappa_doping, hall_doping, intrans, dos, pdos, carrier_conc, vol, warning) elif run_type == "DOS": trim = True if intrans["dos_type"] == "HISTO" else False dos, pdos = BoltztrapAnalyzer.parse_transdos( path_dir, efermi, dos_spin=dos_spin, trim_dos=trim) return BoltztrapAnalyzer(gap=gap, dos=dos, dos_partial=pdos, warning=warning, vol=vol) elif run_type == "BANDS": bz_kpoints = np.loadtxt( os.path.join(path_dir, "boltztrap_band.dat"))[:, -3:] bz_bands = np.loadtxt( os.path.join(path_dir, "boltztrap_band.dat"))[:, 1:-6] return BoltztrapAnalyzer(bz_bands=bz_bands, bz_kpoints=bz_kpoints, warning=warning, vol=vol) elif run_type == "FERMI": """ """ if os.path.exists(os.path.join(path_dir, 'boltztrap_BZ.cube')): fs_data = read_cube_file( os.path.join(path_dir, 'boltztrap_BZ.cube')) elif os.path.exists(os.path.join(path_dir, 'fort.30')): fs_data = read_cube_file(os.path.join(path_dir, 'fort.30')) else: raise BoltztrapError("No data file found for fermi surface") return BoltztrapAnalyzer(fermi_surface_data=fs_data) else: raise ValueError("Run type: {} not recognized!".format(run_type))
get a BoltztrapAnalyzer object from a set of files Args: path_dir: directory where the boltztrap files are dos_spin: in DOS mode, set to 1 for spin up and -1 for spin down Returns: a BoltztrapAnalyzer object
Below is the the instruction that describes the task: ### Input: get a BoltztrapAnalyzer object from a set of files Args: path_dir: directory where the boltztrap files are dos_spin: in DOS mode, set to 1 for spin up and -1 for spin down Returns: a BoltztrapAnalyzer object ### Response: def from_files(path_dir, dos_spin=1): """ get a BoltztrapAnalyzer object from a set of files Args: path_dir: directory where the boltztrap files are dos_spin: in DOS mode, set to 1 for spin up and -1 for spin down Returns: a BoltztrapAnalyzer object """ run_type, warning, efermi, gap, doping_levels = \ BoltztrapAnalyzer.parse_outputtrans(path_dir) vol = BoltztrapAnalyzer.parse_struct(path_dir) intrans = BoltztrapAnalyzer.parse_intrans(path_dir) if run_type == "BOLTZ": dos, pdos = BoltztrapAnalyzer.parse_transdos( path_dir, efermi, dos_spin=dos_spin, trim_dos=False) mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, mu_doping, \ seebeck_doping, cond_doping, kappa_doping, hall_doping, \ carrier_conc = BoltztrapAnalyzer. \ parse_cond_and_hall(path_dir, doping_levels) return BoltztrapAnalyzer( gap, mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, mu_doping, seebeck_doping, cond_doping, kappa_doping, hall_doping, intrans, dos, pdos, carrier_conc, vol, warning) elif run_type == "DOS": trim = True if intrans["dos_type"] == "HISTO" else False dos, pdos = BoltztrapAnalyzer.parse_transdos( path_dir, efermi, dos_spin=dos_spin, trim_dos=trim) return BoltztrapAnalyzer(gap=gap, dos=dos, dos_partial=pdos, warning=warning, vol=vol) elif run_type == "BANDS": bz_kpoints = np.loadtxt( os.path.join(path_dir, "boltztrap_band.dat"))[:, -3:] bz_bands = np.loadtxt( os.path.join(path_dir, "boltztrap_band.dat"))[:, 1:-6] return BoltztrapAnalyzer(bz_bands=bz_bands, bz_kpoints=bz_kpoints, warning=warning, vol=vol) elif run_type == "FERMI": """ """ if os.path.exists(os.path.join(path_dir, 'boltztrap_BZ.cube')): fs_data = read_cube_file( os.path.join(path_dir, 'boltztrap_BZ.cube')) elif os.path.exists(os.path.join(path_dir, 'fort.30')): fs_data = read_cube_file(os.path.join(path_dir, 'fort.30')) else: raise BoltztrapError("No data file found for fermi surface") return BoltztrapAnalyzer(fermi_surface_data=fs_data) else: raise ValueError("Run type: {} not recognized!".format(run_type))
def in_range(x: int, minimum: int, maximum: int) -> bool: """ Return True if x is >= minimum and <= maximum. """ return (x >= minimum and x <= maximum)
Return True if x is >= minimum and <= maximum.
Below is the the instruction that describes the task: ### Input: Return True if x is >= minimum and <= maximum. ### Response: def in_range(x: int, minimum: int, maximum: int) -> bool: """ Return True if x is >= minimum and <= maximum. """ return (x >= minimum and x <= maximum)
def _log_likelihood_transit(theta, params, model, t, flux, err_flux, priorbounds): ''' Given a batman TransitModel and its proposed parameters (theta), update the batman params object with the proposed parameters and evaluate the gaussian likelihood. Note: the priorbounds are only needed to parse theta. ''' u = [] for ix, key in enumerate(sorted(priorbounds.keys())): if key == 'rp': params.rp = theta[ix] elif key == 't0': params.t0 = theta[ix] elif key == 'sma': params.a = theta[ix] elif key == 'incl': params.inc = theta[ix] elif key == 'period': params.per = theta[ix] elif key == 'ecc': params.per = theta[ix] elif key == 'omega': params.w = theta[ix] elif key == 'u_linear': u.append(theta[ix]) elif key == 'u_quadratic': u.append(theta[ix]) params.u = u lc = model.light_curve(params) residuals = flux - lc log_likelihood = -0.5*( np.sum((residuals/err_flux)**2 + np.log(2*np.pi*(err_flux)**2)) ) return log_likelihood
Given a batman TransitModel and its proposed parameters (theta), update the batman params object with the proposed parameters and evaluate the gaussian likelihood. Note: the priorbounds are only needed to parse theta.
Below is the the instruction that describes the task: ### Input: Given a batman TransitModel and its proposed parameters (theta), update the batman params object with the proposed parameters and evaluate the gaussian likelihood. Note: the priorbounds are only needed to parse theta. ### Response: def _log_likelihood_transit(theta, params, model, t, flux, err_flux, priorbounds): ''' Given a batman TransitModel and its proposed parameters (theta), update the batman params object with the proposed parameters and evaluate the gaussian likelihood. Note: the priorbounds are only needed to parse theta. ''' u = [] for ix, key in enumerate(sorted(priorbounds.keys())): if key == 'rp': params.rp = theta[ix] elif key == 't0': params.t0 = theta[ix] elif key == 'sma': params.a = theta[ix] elif key == 'incl': params.inc = theta[ix] elif key == 'period': params.per = theta[ix] elif key == 'ecc': params.per = theta[ix] elif key == 'omega': params.w = theta[ix] elif key == 'u_linear': u.append(theta[ix]) elif key == 'u_quadratic': u.append(theta[ix]) params.u = u lc = model.light_curve(params) residuals = flux - lc log_likelihood = -0.5*( np.sum((residuals/err_flux)**2 + np.log(2*np.pi*(err_flux)**2)) ) return log_likelihood
def itersubdirs(self, pattern=None, abspath=False): """ Generator for all subdirs (except excluded). :type pattern: str :param pattern: Unix style (glob like/gitignore like) pattern """ if pattern is not None: globster = Globster([pattern]) for root, dirs, files in self.walk(): for d in dirs: if pattern is None or (pattern is not None and globster.match(d)): if abspath: yield os.path.join(root, d) else: yield self.relpath(os.path.join(root, d))
Generator for all subdirs (except excluded). :type pattern: str :param pattern: Unix style (glob like/gitignore like) pattern
Below is the the instruction that describes the task: ### Input: Generator for all subdirs (except excluded). :type pattern: str :param pattern: Unix style (glob like/gitignore like) pattern ### Response: def itersubdirs(self, pattern=None, abspath=False): """ Generator for all subdirs (except excluded). :type pattern: str :param pattern: Unix style (glob like/gitignore like) pattern """ if pattern is not None: globster = Globster([pattern]) for root, dirs, files in self.walk(): for d in dirs: if pattern is None or (pattern is not None and globster.match(d)): if abspath: yield os.path.join(root, d) else: yield self.relpath(os.path.join(root, d))
def curtailment(self): """ Parameters ---------- curtailment_ts : :pandas:`pandas.Series<series>` See class definition for details. Returns ------- :pandas:`pandas.Series<series>` If self._curtailment is set it returns that. Otherwise, if curtailment in :class:`~.grid.network.TimeSeries` for the corresponding technology type (and if given, weather cell ID) is set this is returned. """ if self._curtailment is not None: return self._curtailment elif isinstance(self.grid.network.timeseries._curtailment, pd.DataFrame): if isinstance(self.grid.network.timeseries.curtailment. columns, pd.MultiIndex): if self.weather_cell_id: try: return self.grid.network.timeseries.curtailment[ self.type, self.weather_cell_id] except KeyError: logger.exception("No curtailment time series for type " "{} and weather cell ID {} " "given.".format(self.type, self.weather_cell_id)) raise else: logger.exception("No weather cell ID provided for " "fluctuating generator {}.".format( repr(self))) raise KeyError else: try: return self.grid.network.timeseries.curtailment[self.type] except KeyError: logger.exception("No curtailment time series for type " "{} given.".format(self.type)) raise else: return None
Parameters ---------- curtailment_ts : :pandas:`pandas.Series<series>` See class definition for details. Returns ------- :pandas:`pandas.Series<series>` If self._curtailment is set it returns that. Otherwise, if curtailment in :class:`~.grid.network.TimeSeries` for the corresponding technology type (and if given, weather cell ID) is set this is returned.
Below is the the instruction that describes the task: ### Input: Parameters ---------- curtailment_ts : :pandas:`pandas.Series<series>` See class definition for details. Returns ------- :pandas:`pandas.Series<series>` If self._curtailment is set it returns that. Otherwise, if curtailment in :class:`~.grid.network.TimeSeries` for the corresponding technology type (and if given, weather cell ID) is set this is returned. ### Response: def curtailment(self): """ Parameters ---------- curtailment_ts : :pandas:`pandas.Series<series>` See class definition for details. Returns ------- :pandas:`pandas.Series<series>` If self._curtailment is set it returns that. Otherwise, if curtailment in :class:`~.grid.network.TimeSeries` for the corresponding technology type (and if given, weather cell ID) is set this is returned. """ if self._curtailment is not None: return self._curtailment elif isinstance(self.grid.network.timeseries._curtailment, pd.DataFrame): if isinstance(self.grid.network.timeseries.curtailment. columns, pd.MultiIndex): if self.weather_cell_id: try: return self.grid.network.timeseries.curtailment[ self.type, self.weather_cell_id] except KeyError: logger.exception("No curtailment time series for type " "{} and weather cell ID {} " "given.".format(self.type, self.weather_cell_id)) raise else: logger.exception("No weather cell ID provided for " "fluctuating generator {}.".format( repr(self))) raise KeyError else: try: return self.grid.network.timeseries.curtailment[self.type] except KeyError: logger.exception("No curtailment time series for type " "{} given.".format(self.type)) raise else: return None
def getVerificators(self): """Returns the user ids of the users that verified this analysis """ verifiers = list() actions = ["verify", "multi_verify"] for event in wf.getReviewHistory(self): if event['action'] in actions: verifiers.append(event['actor']) sorted(verifiers, reverse=True) return verifiers
Returns the user ids of the users that verified this analysis
Below is the the instruction that describes the task: ### Input: Returns the user ids of the users that verified this analysis ### Response: def getVerificators(self): """Returns the user ids of the users that verified this analysis """ verifiers = list() actions = ["verify", "multi_verify"] for event in wf.getReviewHistory(self): if event['action'] in actions: verifiers.append(event['actor']) sorted(verifiers, reverse=True) return verifiers
def with_debug(self, os_path=None, skip_sub_command=False, disk_closed_callback=None): """ A context manager yielding a debug-output-suitable file-like object based on the optional os_path and optionally skipping any configured sub-command. :param os_path: Optional path to base the file-like object on. :param skip_sub_command: Set True to skip any configured sub-command filter. :param disk_closed_callback: If the backing of the file-like object is an actual file that will be closed, disk_closed_callback (if set) will be called with the on-disk path just after closing it. """ sub_command = None if skip_sub_command else self.debug_sub_command out, path = self._get_out_and_path( self.debug, self.debug_root, sub_command, os_path) try: if hasattr(out, 'stdin'): yield out.stdin else: yield out finally: if hasattr(out, 'stdin'): self._close(out.stdin) self._wait(out, path) self._close(out) if disk_closed_callback and path: disk_closed_callback(path)
A context manager yielding a debug-output-suitable file-like object based on the optional os_path and optionally skipping any configured sub-command. :param os_path: Optional path to base the file-like object on. :param skip_sub_command: Set True to skip any configured sub-command filter. :param disk_closed_callback: If the backing of the file-like object is an actual file that will be closed, disk_closed_callback (if set) will be called with the on-disk path just after closing it.
Below is the the instruction that describes the task: ### Input: A context manager yielding a debug-output-suitable file-like object based on the optional os_path and optionally skipping any configured sub-command. :param os_path: Optional path to base the file-like object on. :param skip_sub_command: Set True to skip any configured sub-command filter. :param disk_closed_callback: If the backing of the file-like object is an actual file that will be closed, disk_closed_callback (if set) will be called with the on-disk path just after closing it. ### Response: def with_debug(self, os_path=None, skip_sub_command=False, disk_closed_callback=None): """ A context manager yielding a debug-output-suitable file-like object based on the optional os_path and optionally skipping any configured sub-command. :param os_path: Optional path to base the file-like object on. :param skip_sub_command: Set True to skip any configured sub-command filter. :param disk_closed_callback: If the backing of the file-like object is an actual file that will be closed, disk_closed_callback (if set) will be called with the on-disk path just after closing it. """ sub_command = None if skip_sub_command else self.debug_sub_command out, path = self._get_out_and_path( self.debug, self.debug_root, sub_command, os_path) try: if hasattr(out, 'stdin'): yield out.stdin else: yield out finally: if hasattr(out, 'stdin'): self._close(out.stdin) self._wait(out, path) self._close(out) if disk_closed_callback and path: disk_closed_callback(path)
def get_upstream_fork_point(self): """Get the most recent ancestor of HEAD that occurs on an upstream branch. First looks at the current branch's tracking branch, if applicable. If that doesn't work, looks at every other branch to find the most recent ancestor of HEAD that occurs on a tracking branch. Returns: git.Commit object or None """ possible_relatives = [] try: if not self.repo: return None try: active_branch = self.repo.active_branch except (TypeError, ValueError): logger.debug("git is in a detached head state") return None # detached head else: tracking_branch = active_branch.tracking_branch() if tracking_branch: possible_relatives.append(tracking_branch.commit) if not possible_relatives: for branch in self.repo.branches: tracking_branch = branch.tracking_branch() if tracking_branch is not None: possible_relatives.append(tracking_branch.commit) head = self.repo.head most_recent_ancestor = None for possible_relative in possible_relatives: # at most one: for ancestor in self.repo.merge_base(head, possible_relative): if most_recent_ancestor is None: most_recent_ancestor = ancestor elif self.repo.is_ancestor(most_recent_ancestor, ancestor): most_recent_ancestor = ancestor return most_recent_ancestor except exc.GitCommandError as e: logger.debug("git remote upstream fork point could not be found") logger.debug(e.message) return None
Get the most recent ancestor of HEAD that occurs on an upstream branch. First looks at the current branch's tracking branch, if applicable. If that doesn't work, looks at every other branch to find the most recent ancestor of HEAD that occurs on a tracking branch. Returns: git.Commit object or None
Below is the the instruction that describes the task: ### Input: Get the most recent ancestor of HEAD that occurs on an upstream branch. First looks at the current branch's tracking branch, if applicable. If that doesn't work, looks at every other branch to find the most recent ancestor of HEAD that occurs on a tracking branch. Returns: git.Commit object or None ### Response: def get_upstream_fork_point(self): """Get the most recent ancestor of HEAD that occurs on an upstream branch. First looks at the current branch's tracking branch, if applicable. If that doesn't work, looks at every other branch to find the most recent ancestor of HEAD that occurs on a tracking branch. Returns: git.Commit object or None """ possible_relatives = [] try: if not self.repo: return None try: active_branch = self.repo.active_branch except (TypeError, ValueError): logger.debug("git is in a detached head state") return None # detached head else: tracking_branch = active_branch.tracking_branch() if tracking_branch: possible_relatives.append(tracking_branch.commit) if not possible_relatives: for branch in self.repo.branches: tracking_branch = branch.tracking_branch() if tracking_branch is not None: possible_relatives.append(tracking_branch.commit) head = self.repo.head most_recent_ancestor = None for possible_relative in possible_relatives: # at most one: for ancestor in self.repo.merge_base(head, possible_relative): if most_recent_ancestor is None: most_recent_ancestor = ancestor elif self.repo.is_ancestor(most_recent_ancestor, ancestor): most_recent_ancestor = ancestor return most_recent_ancestor except exc.GitCommandError as e: logger.debug("git remote upstream fork point could not be found") logger.debug(e.message) return None
def validate(self, model, checks=[]): """Use a defined schema to validate the medium table format.""" custom = [ check_partial(reaction_id_check, frozenset(r.id for r in model.reactions)) ] super(Medium, self).validate(model=model, checks=checks + custom)
Use a defined schema to validate the medium table format.
Below is the the instruction that describes the task: ### Input: Use a defined schema to validate the medium table format. ### Response: def validate(self, model, checks=[]): """Use a defined schema to validate the medium table format.""" custom = [ check_partial(reaction_id_check, frozenset(r.id for r in model.reactions)) ] super(Medium, self).validate(model=model, checks=checks + custom)
def feature_detector(self, tokens, index, history): """Implementing a slightly modified feature detector. @param tokens: The tokens from the sentence to tag. @param index: The current token index to tag. @param history: The previous tagged tokens. """ word = tokens[index] if index == 0: # At the beginning of the sentence prevword = prevprevword = None prevtag = prevprevtag = None #word = word.lower() # Lowercase at the beginning of sentence elif index == 1: prevword = tokens[index-1] # Note: no lowercase prevprevword = None prevtag = history[index-1] prevprevtag = None else: prevword = tokens[index-1] prevprevword = tokens[index-2] prevtag = history[index-1] prevprevtag = history[index-2] if re.match('[0-9]+([\.,][0-9]*)?|[0-9]*[\.,][0-9]+$', word): # Included "," as decimal point shape = 'number' elif re.compile('\W+$', re.UNICODE).match(word): # Included unicode flag shape = 'punct' elif re.match('([A-ZÄÖÜ]+[a-zäöüß]*-?)+$', word): # Included dash for dashed words and umlauts shape = 'upcase' elif re.match('[a-zäöüß]+', word): # Included umlauts shape = 'downcase' elif re.compile("\w+", re.UNICODE).match(word): # Included unicode flag shape = 'mixedcase' else: shape = 'other' features = { 'prevtag': prevtag, 'prevprevtag': prevprevtag, 'word': word, 'word.lower': word.lower(), 'suffix3': word.lower()[-3:], #'suffix2': word.lower()[-2:], #'suffix1': word.lower()[-1:], 'preffix1': word[:1], # included 'prevprevword': prevprevword, 'prevword': prevword, 'prevtag+word': '%s+%s' % (prevtag, word), 'prevprevtag+word': '%s+%s' % (prevprevtag, word), 'prevword+word': '%s+%s' % (prevword, word), 'shape': shape } return features
Implementing a slightly modified feature detector. @param tokens: The tokens from the sentence to tag. @param index: The current token index to tag. @param history: The previous tagged tokens.
Below is the the instruction that describes the task: ### Input: Implementing a slightly modified feature detector. @param tokens: The tokens from the sentence to tag. @param index: The current token index to tag. @param history: The previous tagged tokens. ### Response: def feature_detector(self, tokens, index, history): """Implementing a slightly modified feature detector. @param tokens: The tokens from the sentence to tag. @param index: The current token index to tag. @param history: The previous tagged tokens. """ word = tokens[index] if index == 0: # At the beginning of the sentence prevword = prevprevword = None prevtag = prevprevtag = None #word = word.lower() # Lowercase at the beginning of sentence elif index == 1: prevword = tokens[index-1] # Note: no lowercase prevprevword = None prevtag = history[index-1] prevprevtag = None else: prevword = tokens[index-1] prevprevword = tokens[index-2] prevtag = history[index-1] prevprevtag = history[index-2] if re.match('[0-9]+([\.,][0-9]*)?|[0-9]*[\.,][0-9]+$', word): # Included "," as decimal point shape = 'number' elif re.compile('\W+$', re.UNICODE).match(word): # Included unicode flag shape = 'punct' elif re.match('([A-ZÄÖÜ]+[a-zäöüß]*-?)+$', word): # Included dash for dashed words and umlauts shape = 'upcase' elif re.match('[a-zäöüß]+', word): # Included umlauts shape = 'downcase' elif re.compile("\w+", re.UNICODE).match(word): # Included unicode flag shape = 'mixedcase' else: shape = 'other' features = { 'prevtag': prevtag, 'prevprevtag': prevprevtag, 'word': word, 'word.lower': word.lower(), 'suffix3': word.lower()[-3:], #'suffix2': word.lower()[-2:], #'suffix1': word.lower()[-1:], 'preffix1': word[:1], # included 'prevprevword': prevprevword, 'prevword': prevword, 'prevtag+word': '%s+%s' % (prevtag, word), 'prevprevtag+word': '%s+%s' % (prevprevtag, word), 'prevword+word': '%s+%s' % (prevword, word), 'shape': shape } return features
def unlink(client, name, include, exclude, yes): """Remove matching files from a dataset.""" dataset = client.load_dataset(name=name) records = _filter( client, names=[dataset.name], include=include, exclude=exclude ) if not yes and records: prompt_text = ( 'You are about to remove ' 'following from "{0}" dataset.\n'.format(dataset.name) + '\n'.join([str(record.full_path) for record in records]) + '\nDo you wish to continue?' ) click.confirm(WARNING + prompt_text, abort=True) if records: for item in records: dataset.unlink_file(item.path) dataset.to_yaml() click.secho('OK', fg='green')
Remove matching files from a dataset.
Below is the the instruction that describes the task: ### Input: Remove matching files from a dataset. ### Response: def unlink(client, name, include, exclude, yes): """Remove matching files from a dataset.""" dataset = client.load_dataset(name=name) records = _filter( client, names=[dataset.name], include=include, exclude=exclude ) if not yes and records: prompt_text = ( 'You are about to remove ' 'following from "{0}" dataset.\n'.format(dataset.name) + '\n'.join([str(record.full_path) for record in records]) + '\nDo you wish to continue?' ) click.confirm(WARNING + prompt_text, abort=True) if records: for item in records: dataset.unlink_file(item.path) dataset.to_yaml() click.secho('OK', fg='green')
def min_eta_for_em_bright(bh_spin_z, ns_g_mass, mNS_pts, sBH_pts, eta_mins): """ Function that uses the end product of generate_em_constraint_data to swipe over a set of NS-BH binaries and determine the minimum symmetric mass ratio required by each binary to yield a remnant disk mass that exceeds a certain threshold. Each binary passed to this function consists of a NS mass and a BH spin parameter component along the orbital angular momentum. Unlike find_em_constraint_data_point, which solves the problem at a given point in the paremter space and is more generic, this function interpolates the results produced by generate_em_constraint_data at the desired locations: generate_em_constraint_data must be run once prior to calling min_eta_for_em_bright. Parameters ----------- bh_spin_z: array desired values of the BH dimensionless spin parameter for the spin projection along the orbital angular momentum ns_g_mass: array desired values of the NS gravitational mass (in solar masses) mNS_pts: array NS mass values (in solar masses) from the output of generate_em_constraint_data sBH_pts: array BH dimensionless spin parameter values along the orbital angular momentum from the output of generate_em_constraint_data eta_mins: array minimum symmetric mass ratio values to exceed a given remnant disk mass threshold from the output of generate_em_constraint_data Returns ---------- eta_min: array the minimum symmetric mass ratio required by each binary in the input to yield a remnant disk mass that exceeds a certain threshold """ f = scipy.interpolate.RectBivariateSpline(mNS_pts, sBH_pts, eta_mins, kx=1, ky=1) # If bh_spin_z is a numpy array (assuming ns_g_mass has the same size) if isinstance(bh_spin_z, np.ndarray): eta_min = np.empty(len(bh_spin_z)) for i in range(len(bh_spin_z)): eta_min[i] = f(ns_g_mass[i], bh_spin_z[i]) # Else (assuming ns_g_mass and bh_spin_z are single numbers) else: eta_min = f(ns_g_mass, bh_spin_z) return eta_min
Function that uses the end product of generate_em_constraint_data to swipe over a set of NS-BH binaries and determine the minimum symmetric mass ratio required by each binary to yield a remnant disk mass that exceeds a certain threshold. Each binary passed to this function consists of a NS mass and a BH spin parameter component along the orbital angular momentum. Unlike find_em_constraint_data_point, which solves the problem at a given point in the paremter space and is more generic, this function interpolates the results produced by generate_em_constraint_data at the desired locations: generate_em_constraint_data must be run once prior to calling min_eta_for_em_bright. Parameters ----------- bh_spin_z: array desired values of the BH dimensionless spin parameter for the spin projection along the orbital angular momentum ns_g_mass: array desired values of the NS gravitational mass (in solar masses) mNS_pts: array NS mass values (in solar masses) from the output of generate_em_constraint_data sBH_pts: array BH dimensionless spin parameter values along the orbital angular momentum from the output of generate_em_constraint_data eta_mins: array minimum symmetric mass ratio values to exceed a given remnant disk mass threshold from the output of generate_em_constraint_data Returns ---------- eta_min: array the minimum symmetric mass ratio required by each binary in the input to yield a remnant disk mass that exceeds a certain threshold
Below is the the instruction that describes the task: ### Input: Function that uses the end product of generate_em_constraint_data to swipe over a set of NS-BH binaries and determine the minimum symmetric mass ratio required by each binary to yield a remnant disk mass that exceeds a certain threshold. Each binary passed to this function consists of a NS mass and a BH spin parameter component along the orbital angular momentum. Unlike find_em_constraint_data_point, which solves the problem at a given point in the paremter space and is more generic, this function interpolates the results produced by generate_em_constraint_data at the desired locations: generate_em_constraint_data must be run once prior to calling min_eta_for_em_bright. Parameters ----------- bh_spin_z: array desired values of the BH dimensionless spin parameter for the spin projection along the orbital angular momentum ns_g_mass: array desired values of the NS gravitational mass (in solar masses) mNS_pts: array NS mass values (in solar masses) from the output of generate_em_constraint_data sBH_pts: array BH dimensionless spin parameter values along the orbital angular momentum from the output of generate_em_constraint_data eta_mins: array minimum symmetric mass ratio values to exceed a given remnant disk mass threshold from the output of generate_em_constraint_data Returns ---------- eta_min: array the minimum symmetric mass ratio required by each binary in the input to yield a remnant disk mass that exceeds a certain threshold ### Response: def min_eta_for_em_bright(bh_spin_z, ns_g_mass, mNS_pts, sBH_pts, eta_mins): """ Function that uses the end product of generate_em_constraint_data to swipe over a set of NS-BH binaries and determine the minimum symmetric mass ratio required by each binary to yield a remnant disk mass that exceeds a certain threshold. Each binary passed to this function consists of a NS mass and a BH spin parameter component along the orbital angular momentum. Unlike find_em_constraint_data_point, which solves the problem at a given point in the paremter space and is more generic, this function interpolates the results produced by generate_em_constraint_data at the desired locations: generate_em_constraint_data must be run once prior to calling min_eta_for_em_bright. Parameters ----------- bh_spin_z: array desired values of the BH dimensionless spin parameter for the spin projection along the orbital angular momentum ns_g_mass: array desired values of the NS gravitational mass (in solar masses) mNS_pts: array NS mass values (in solar masses) from the output of generate_em_constraint_data sBH_pts: array BH dimensionless spin parameter values along the orbital angular momentum from the output of generate_em_constraint_data eta_mins: array minimum symmetric mass ratio values to exceed a given remnant disk mass threshold from the output of generate_em_constraint_data Returns ---------- eta_min: array the minimum symmetric mass ratio required by each binary in the input to yield a remnant disk mass that exceeds a certain threshold """ f = scipy.interpolate.RectBivariateSpline(mNS_pts, sBH_pts, eta_mins, kx=1, ky=1) # If bh_spin_z is a numpy array (assuming ns_g_mass has the same size) if isinstance(bh_spin_z, np.ndarray): eta_min = np.empty(len(bh_spin_z)) for i in range(len(bh_spin_z)): eta_min[i] = f(ns_g_mass[i], bh_spin_z[i]) # Else (assuming ns_g_mass and bh_spin_z are single numbers) else: eta_min = f(ns_g_mass, bh_spin_z) return eta_min
def _verifyDiscoverySingle(self, endpoint, to_match): """Verify that the given endpoint matches the information extracted from the OpenID assertion, and raise an exception if there is a mismatch. @type endpoint: openid.consumer.discover.OpenIDServiceEndpoint @type to_match: openid.consumer.discover.OpenIDServiceEndpoint @rtype: NoneType @raises ProtocolError: when the endpoint does not match the discovered information. """ # Every type URI that's in the to_match endpoint has to be # present in the discovered endpoint. for type_uri in to_match.type_uris: if not endpoint.usesExtension(type_uri): raise TypeURIMismatch(type_uri, endpoint) # Fragments do not influence discovery, so we can't compare a # claimed identifier with a fragment to discovered information. defragged_claimed_id, _ = urldefrag(to_match.claimed_id) if defragged_claimed_id != endpoint.claimed_id: raise ProtocolError( 'Claimed ID does not match (different subjects!), ' 'Expected %s, got %s' % (defragged_claimed_id, endpoint.claimed_id)) if to_match.getLocalID() != endpoint.getLocalID(): raise ProtocolError('local_id mismatch. Expected %s, got %s' % (to_match.getLocalID(), endpoint.getLocalID())) # If the server URL is None, this must be an OpenID 1 # response, because op_endpoint is a required parameter in # OpenID 2. In that case, we don't actually care what the # discovered server_url is, because signature checking or # check_auth should take care of that check for us. if to_match.server_url is None: assert to_match.preferredNamespace() == OPENID1_NS, ( """The code calling this must ensure that OpenID 2 responses have a non-none `openid.op_endpoint' and that it is set as the `server_url' attribute of the `to_match' endpoint.""") elif to_match.server_url != endpoint.server_url: raise ProtocolError('OP Endpoint mismatch. Expected %s, got %s' % (to_match.server_url, endpoint.server_url))
Verify that the given endpoint matches the information extracted from the OpenID assertion, and raise an exception if there is a mismatch. @type endpoint: openid.consumer.discover.OpenIDServiceEndpoint @type to_match: openid.consumer.discover.OpenIDServiceEndpoint @rtype: NoneType @raises ProtocolError: when the endpoint does not match the discovered information.
Below is the the instruction that describes the task: ### Input: Verify that the given endpoint matches the information extracted from the OpenID assertion, and raise an exception if there is a mismatch. @type endpoint: openid.consumer.discover.OpenIDServiceEndpoint @type to_match: openid.consumer.discover.OpenIDServiceEndpoint @rtype: NoneType @raises ProtocolError: when the endpoint does not match the discovered information. ### Response: def _verifyDiscoverySingle(self, endpoint, to_match): """Verify that the given endpoint matches the information extracted from the OpenID assertion, and raise an exception if there is a mismatch. @type endpoint: openid.consumer.discover.OpenIDServiceEndpoint @type to_match: openid.consumer.discover.OpenIDServiceEndpoint @rtype: NoneType @raises ProtocolError: when the endpoint does not match the discovered information. """ # Every type URI that's in the to_match endpoint has to be # present in the discovered endpoint. for type_uri in to_match.type_uris: if not endpoint.usesExtension(type_uri): raise TypeURIMismatch(type_uri, endpoint) # Fragments do not influence discovery, so we can't compare a # claimed identifier with a fragment to discovered information. defragged_claimed_id, _ = urldefrag(to_match.claimed_id) if defragged_claimed_id != endpoint.claimed_id: raise ProtocolError( 'Claimed ID does not match (different subjects!), ' 'Expected %s, got %s' % (defragged_claimed_id, endpoint.claimed_id)) if to_match.getLocalID() != endpoint.getLocalID(): raise ProtocolError('local_id mismatch. Expected %s, got %s' % (to_match.getLocalID(), endpoint.getLocalID())) # If the server URL is None, this must be an OpenID 1 # response, because op_endpoint is a required parameter in # OpenID 2. In that case, we don't actually care what the # discovered server_url is, because signature checking or # check_auth should take care of that check for us. if to_match.server_url is None: assert to_match.preferredNamespace() == OPENID1_NS, ( """The code calling this must ensure that OpenID 2 responses have a non-none `openid.op_endpoint' and that it is set as the `server_url' attribute of the `to_match' endpoint.""") elif to_match.server_url != endpoint.server_url: raise ProtocolError('OP Endpoint mismatch. Expected %s, got %s' % (to_match.server_url, endpoint.server_url))
def flash_defl_begin(self, size, compsize, offset): """ Start downloading compressed data to Flash (performs an erase) Returns number of blocks (size self.FLASH_WRITE_SIZE) to write. """ num_blocks = (compsize + self.FLASH_WRITE_SIZE - 1) // self.FLASH_WRITE_SIZE erase_blocks = (size + self.FLASH_WRITE_SIZE - 1) // self.FLASH_WRITE_SIZE t = time.time() if self.IS_STUB: write_size = size # stub expects number of bytes here, manages erasing internally timeout = DEFAULT_TIMEOUT else: write_size = erase_blocks * self.FLASH_WRITE_SIZE # ROM expects rounded up to erase block size timeout = timeout_per_mb(ERASE_REGION_TIMEOUT_PER_MB, write_size) # ROM performs the erase up front print("Compressed %d bytes to %d..." % (size, compsize)) self.check_command("enter compressed flash mode", self.ESP_FLASH_DEFL_BEGIN, struct.pack('<IIII', write_size, num_blocks, self.FLASH_WRITE_SIZE, offset), timeout=timeout) if size != 0 and not self.IS_STUB: # (stub erases as it writes, but ROM loaders erase on begin) print("Took %.2fs to erase flash block" % (time.time() - t)) return num_blocks
Start downloading compressed data to Flash (performs an erase) Returns number of blocks (size self.FLASH_WRITE_SIZE) to write.
Below is the the instruction that describes the task: ### Input: Start downloading compressed data to Flash (performs an erase) Returns number of blocks (size self.FLASH_WRITE_SIZE) to write. ### Response: def flash_defl_begin(self, size, compsize, offset): """ Start downloading compressed data to Flash (performs an erase) Returns number of blocks (size self.FLASH_WRITE_SIZE) to write. """ num_blocks = (compsize + self.FLASH_WRITE_SIZE - 1) // self.FLASH_WRITE_SIZE erase_blocks = (size + self.FLASH_WRITE_SIZE - 1) // self.FLASH_WRITE_SIZE t = time.time() if self.IS_STUB: write_size = size # stub expects number of bytes here, manages erasing internally timeout = DEFAULT_TIMEOUT else: write_size = erase_blocks * self.FLASH_WRITE_SIZE # ROM expects rounded up to erase block size timeout = timeout_per_mb(ERASE_REGION_TIMEOUT_PER_MB, write_size) # ROM performs the erase up front print("Compressed %d bytes to %d..." % (size, compsize)) self.check_command("enter compressed flash mode", self.ESP_FLASH_DEFL_BEGIN, struct.pack('<IIII', write_size, num_blocks, self.FLASH_WRITE_SIZE, offset), timeout=timeout) if size != 0 and not self.IS_STUB: # (stub erases as it writes, but ROM loaders erase on begin) print("Took %.2fs to erase flash block" % (time.time() - t)) return num_blocks
def list_projects(self): """Return a list of all followed projects.""" method = 'GET' url = '/projects?circle-token={token}'.format( token=self.client.api_token) json_data = self.client.request(method, url) return json_data
Return a list of all followed projects.
Below is the the instruction that describes the task: ### Input: Return a list of all followed projects. ### Response: def list_projects(self): """Return a list of all followed projects.""" method = 'GET' url = '/projects?circle-token={token}'.format( token=self.client.api_token) json_data = self.client.request(method, url) return json_data
def get_possible_app_ids(): """ Returns all possible app ids. """ try: req = requests.get( "https://clients3.google.com/cast/chromecast/device/baseconfig") data = json.loads(req.text[4:]) return [app['app_id'] for app in data['applications']] + \ data["enabled_app_ids"] except ValueError: # If json fails to parse return []
Returns all possible app ids.
Below is the the instruction that describes the task: ### Input: Returns all possible app ids. ### Response: def get_possible_app_ids(): """ Returns all possible app ids. """ try: req = requests.get( "https://clients3.google.com/cast/chromecast/device/baseconfig") data = json.loads(req.text[4:]) return [app['app_id'] for app in data['applications']] + \ data["enabled_app_ids"] except ValueError: # If json fails to parse return []
def start(self, blocking=False): """ Start the interface :param blocking: Should the call block until stop() is called (default: False) :type blocking: bool :rtype: None :raises SensorStartException: Failed to start """ try: self._init_listen_socket() except: self.exception(u"Failed to init listen socket ({}:{})".format( self._listen_ip, self._listen_port )) self._shutdown_listen_socket() raise SensorStartException("Listen socket init failed") self.info(u"Listening on {}:{}".format( self._listen_ip, self._listen_port )) super(Sensor, self).start(False) try: a_thread = threading.Thread( target=self._thread_wrapper, args=(self._receiving,) ) a_thread.daemon = True a_thread.start() except: self.exception("Failed to run receive loop") raise SensorStartException("Packet loop failed") super(Sensor, self).start(blocking)
Start the interface :param blocking: Should the call block until stop() is called (default: False) :type blocking: bool :rtype: None :raises SensorStartException: Failed to start
Below is the the instruction that describes the task: ### Input: Start the interface :param blocking: Should the call block until stop() is called (default: False) :type blocking: bool :rtype: None :raises SensorStartException: Failed to start ### Response: def start(self, blocking=False): """ Start the interface :param blocking: Should the call block until stop() is called (default: False) :type blocking: bool :rtype: None :raises SensorStartException: Failed to start """ try: self._init_listen_socket() except: self.exception(u"Failed to init listen socket ({}:{})".format( self._listen_ip, self._listen_port )) self._shutdown_listen_socket() raise SensorStartException("Listen socket init failed") self.info(u"Listening on {}:{}".format( self._listen_ip, self._listen_port )) super(Sensor, self).start(False) try: a_thread = threading.Thread( target=self._thread_wrapper, args=(self._receiving,) ) a_thread.daemon = True a_thread.start() except: self.exception("Failed to run receive loop") raise SensorStartException("Packet loop failed") super(Sensor, self).start(blocking)
def interp_bin(self, egy_bins, dtheta, scale_fn=None): """Evaluate the bin-averaged PSF model over the energy bins ``egy_bins``. Parameters ---------- egy_bins : array_like Energy bin edges in MeV. dtheta : array_like Array of angular separations in degrees. scale_fn : callable Function that evaluates the PSF scaling function. Argument is energy in MeV. """ npts = 4 egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts)) egy = np.exp(utils.edge_to_center(np.log(egy_bins))) log_energies = np.log10(egy) vals = self.interp(egy[None, :], dtheta[:, None], scale_fn=scale_fn) wts = np.exp(self._wts_fn((log_energies,))) wts = wts.reshape((1,) + wts.shape) vals = np.sum( (vals * wts).reshape((vals.shape[0], int(vals.shape[1] / npts), npts)), axis=2) vals /= np.sum(wts.reshape(wts.shape[0], int(wts.shape[1] / npts), npts), axis=2) return vals
Evaluate the bin-averaged PSF model over the energy bins ``egy_bins``. Parameters ---------- egy_bins : array_like Energy bin edges in MeV. dtheta : array_like Array of angular separations in degrees. scale_fn : callable Function that evaluates the PSF scaling function. Argument is energy in MeV.
Below is the the instruction that describes the task: ### Input: Evaluate the bin-averaged PSF model over the energy bins ``egy_bins``. Parameters ---------- egy_bins : array_like Energy bin edges in MeV. dtheta : array_like Array of angular separations in degrees. scale_fn : callable Function that evaluates the PSF scaling function. Argument is energy in MeV. ### Response: def interp_bin(self, egy_bins, dtheta, scale_fn=None): """Evaluate the bin-averaged PSF model over the energy bins ``egy_bins``. Parameters ---------- egy_bins : array_like Energy bin edges in MeV. dtheta : array_like Array of angular separations in degrees. scale_fn : callable Function that evaluates the PSF scaling function. Argument is energy in MeV. """ npts = 4 egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts)) egy = np.exp(utils.edge_to_center(np.log(egy_bins))) log_energies = np.log10(egy) vals = self.interp(egy[None, :], dtheta[:, None], scale_fn=scale_fn) wts = np.exp(self._wts_fn((log_energies,))) wts = wts.reshape((1,) + wts.shape) vals = np.sum( (vals * wts).reshape((vals.shape[0], int(vals.shape[1] / npts), npts)), axis=2) vals /= np.sum(wts.reshape(wts.shape[0], int(wts.shape[1] / npts), npts), axis=2) return vals
def _adjust_crop_box(box, crop): """ Given a fit box and a crop box, adjust one to the other """ if crop and box: # Both boxes are the same size; just line them up. return (box[0] + crop[0], box[1] + crop[1], box[2] + crop[0], box[3] + crop[1]) if crop: # We don't have a fit box, so just convert the crop box return (crop[0], crop[1], crop[0] + crop[2], crop[1] + crop[3]) # We don't have a crop box, so return the fit box (even if it's None) return box
Given a fit box and a crop box, adjust one to the other
Below is the the instruction that describes the task: ### Input: Given a fit box and a crop box, adjust one to the other ### Response: def _adjust_crop_box(box, crop): """ Given a fit box and a crop box, adjust one to the other """ if crop and box: # Both boxes are the same size; just line them up. return (box[0] + crop[0], box[1] + crop[1], box[2] + crop[0], box[3] + crop[1]) if crop: # We don't have a fit box, so just convert the crop box return (crop[0], crop[1], crop[0] + crop[2], crop[1] + crop[3]) # We don't have a crop box, so return the fit box (even if it's None) return box
def target_range(self): """Get the range on the target strand :return: target range :rtype: GenomicRange """ if not self.is_aligned(): return None if self._target_range: return self._target_range # check cache global _sam_cigar_target_add tlen = sum([x[0] for x in self.cigar_array if _sam_cigar_target_add.match(x[1])]) self._target_range = GenomicRange(self.entries.rname,self.entries.pos,self.entries.pos+tlen-1) return self._target_range
Get the range on the target strand :return: target range :rtype: GenomicRange
Below is the the instruction that describes the task: ### Input: Get the range on the target strand :return: target range :rtype: GenomicRange ### Response: def target_range(self): """Get the range on the target strand :return: target range :rtype: GenomicRange """ if not self.is_aligned(): return None if self._target_range: return self._target_range # check cache global _sam_cigar_target_add tlen = sum([x[0] for x in self.cigar_array if _sam_cigar_target_add.match(x[1])]) self._target_range = GenomicRange(self.entries.rname,self.entries.pos,self.entries.pos+tlen-1) return self._target_range
def match_route(self, url): # type: (str) -> MatchResult """Match the url against known routes. This method takes a concrete route "/foo/bar", and matches it against a set of routes. These routes can use param substitution corresponding to API gateway patterns. For example:: match_route('/foo/bar') -> '/foo/{name}' """ # Otherwise we need to check for param substitution parsed_url = urlparse(url) parsed_qs = parse_qs(parsed_url.query, keep_blank_values=True) query_params = {k: v[-1] for k, v in parsed_qs.items()} path = parsed_url.path # API Gateway removes the trailing slash if the route is not the root # path. We do the same here so our route matching works the same way. if path != '/' and path.endswith('/'): path = path[:-1] parts = path.split('/') captured = {} for route_url in self.route_urls: url_parts = route_url.split('/') if len(parts) == len(url_parts): for i, j in zip(parts, url_parts): if j.startswith('{') and j.endswith('}'): captured[j[1:-1]] = i continue if i != j: break else: return MatchResult(route_url, captured, query_params) raise ValueError("No matching route found for: %s" % url)
Match the url against known routes. This method takes a concrete route "/foo/bar", and matches it against a set of routes. These routes can use param substitution corresponding to API gateway patterns. For example:: match_route('/foo/bar') -> '/foo/{name}'
Below is the the instruction that describes the task: ### Input: Match the url against known routes. This method takes a concrete route "/foo/bar", and matches it against a set of routes. These routes can use param substitution corresponding to API gateway patterns. For example:: match_route('/foo/bar') -> '/foo/{name}' ### Response: def match_route(self, url): # type: (str) -> MatchResult """Match the url against known routes. This method takes a concrete route "/foo/bar", and matches it against a set of routes. These routes can use param substitution corresponding to API gateway patterns. For example:: match_route('/foo/bar') -> '/foo/{name}' """ # Otherwise we need to check for param substitution parsed_url = urlparse(url) parsed_qs = parse_qs(parsed_url.query, keep_blank_values=True) query_params = {k: v[-1] for k, v in parsed_qs.items()} path = parsed_url.path # API Gateway removes the trailing slash if the route is not the root # path. We do the same here so our route matching works the same way. if path != '/' and path.endswith('/'): path = path[:-1] parts = path.split('/') captured = {} for route_url in self.route_urls: url_parts = route_url.split('/') if len(parts) == len(url_parts): for i, j in zip(parts, url_parts): if j.startswith('{') and j.endswith('}'): captured[j[1:-1]] = i continue if i != j: break else: return MatchResult(route_url, captured, query_params) raise ValueError("No matching route found for: %s" % url)
def open_api_json_view(request): """ :param request: :return: Generates JSON representation of Swagger spec """ doc = cornice_swagger.CorniceSwagger( cornice.service.get_services(), pyramid_registry=request.registry) kwargs = request.registry.settings['cornice_swagger.spec_kwargs'] my_spec = doc.generate(**kwargs) return my_spec
:param request: :return: Generates JSON representation of Swagger spec
Below is the the instruction that describes the task: ### Input: :param request: :return: Generates JSON representation of Swagger spec ### Response: def open_api_json_view(request): """ :param request: :return: Generates JSON representation of Swagger spec """ doc = cornice_swagger.CorniceSwagger( cornice.service.get_services(), pyramid_registry=request.registry) kwargs = request.registry.settings['cornice_swagger.spec_kwargs'] my_spec = doc.generate(**kwargs) return my_spec
def mapCellsToPoints(self): """ Transform cell data (i.e., data specified per cell) into point data (i.e., data specified at cell points). The method of transformation is based on averaging the data values of all cells using a particular point. """ c2p = vtk.vtkCellDataToPointData() c2p.SetInputData(self.polydata(False)) c2p.Update() return self.updateMesh(c2p.GetOutput())
Transform cell data (i.e., data specified per cell) into point data (i.e., data specified at cell points). The method of transformation is based on averaging the data values of all cells using a particular point.
Below is the the instruction that describes the task: ### Input: Transform cell data (i.e., data specified per cell) into point data (i.e., data specified at cell points). The method of transformation is based on averaging the data values of all cells using a particular point. ### Response: def mapCellsToPoints(self): """ Transform cell data (i.e., data specified per cell) into point data (i.e., data specified at cell points). The method of transformation is based on averaging the data values of all cells using a particular point. """ c2p = vtk.vtkCellDataToPointData() c2p.SetInputData(self.polydata(False)) c2p.Update() return self.updateMesh(c2p.GetOutput())
def on_train_begin(self, **kwargs): "Call watch method to log model topology, gradients & weights" # Set self.best, method inherited from "TrackerCallback" by "SaveModelCallback" super().on_train_begin() # Ensure we don't call "watch" multiple times if not WandbCallback.watch_called: WandbCallback.watch_called = True # Logs model topology and optionally gradients and weights wandb.watch(self.learn.model, log=self.log)
Call watch method to log model topology, gradients & weights
Below is the the instruction that describes the task: ### Input: Call watch method to log model topology, gradients & weights ### Response: def on_train_begin(self, **kwargs): "Call watch method to log model topology, gradients & weights" # Set self.best, method inherited from "TrackerCallback" by "SaveModelCallback" super().on_train_begin() # Ensure we don't call "watch" multiple times if not WandbCallback.watch_called: WandbCallback.watch_called = True # Logs model topology and optionally gradients and weights wandb.watch(self.learn.model, log=self.log)
def find_min_required(path): """Inspect terraform files and find minimum version.""" found_min_required = '' for filename in glob.glob(os.path.join(path, '*.tf')): with open(filename, 'r') as stream: tf_config = hcl.load(stream) if tf_config.get('terraform', {}).get('required_version'): found_min_required = tf_config.get('terraform', {}).get('required_version') break if found_min_required: if re.match(r'^!=.+', found_min_required): LOGGER.error('Min required Terraform version is a negation (%s) ' '- unable to determine required version', found_min_required) sys.exit(1) else: found_min_required = re.search(r'[0-9]*\.[0-9]*(?:\.[0-9]*)?', found_min_required).group(0) LOGGER.debug("Detected minimum terraform version is %s", found_min_required) return found_min_required LOGGER.error('Terraform version specified as min-required, but unable to ' 'find a specified version requirement in this module\'s tf ' 'files') sys.exit(1)
Inspect terraform files and find minimum version.
Below is the the instruction that describes the task: ### Input: Inspect terraform files and find minimum version. ### Response: def find_min_required(path): """Inspect terraform files and find minimum version.""" found_min_required = '' for filename in glob.glob(os.path.join(path, '*.tf')): with open(filename, 'r') as stream: tf_config = hcl.load(stream) if tf_config.get('terraform', {}).get('required_version'): found_min_required = tf_config.get('terraform', {}).get('required_version') break if found_min_required: if re.match(r'^!=.+', found_min_required): LOGGER.error('Min required Terraform version is a negation (%s) ' '- unable to determine required version', found_min_required) sys.exit(1) else: found_min_required = re.search(r'[0-9]*\.[0-9]*(?:\.[0-9]*)?', found_min_required).group(0) LOGGER.debug("Detected minimum terraform version is %s", found_min_required) return found_min_required LOGGER.error('Terraform version specified as min-required, but unable to ' 'find a specified version requirement in this module\'s tf ' 'files') sys.exit(1)
def _MultiWritePathInfos(self, path_infos, connection=None): """Writes a collection of path info records for specified clients.""" query = "" path_info_count = 0 path_info_values = [] parent_path_info_count = 0 parent_path_info_values = [] has_stat_entries = False has_hash_entries = False for client_id, client_path_infos in iteritems(path_infos): for path_info in client_path_infos: path = mysql_utils.ComponentsToPath(path_info.components) path_info_values.append(db_utils.ClientIDToInt(client_id)) path_info_values.append(int(path_info.path_type)) path_info_values.append(path_info.GetPathID().AsBytes()) path_info_values.append(path) path_info_values.append(bool(path_info.directory)) path_info_values.append(len(path_info.components)) if path_info.HasField("stat_entry"): path_info_values.append(path_info.stat_entry.SerializeToString()) has_stat_entries = True else: path_info_values.append(None) if path_info.HasField("hash_entry"): path_info_values.append(path_info.hash_entry.SerializeToString()) path_info_values.append(path_info.hash_entry.sha256.AsBytes()) has_hash_entries = True else: path_info_values.append(None) path_info_values.append(None) path_info_count += 1 # TODO(hanuszczak): Implement a trie in order to avoid inserting # duplicated records. for parent_path_info in path_info.GetAncestors(): path = mysql_utils.ComponentsToPath(parent_path_info.components) parent_path_info_values.append(db_utils.ClientIDToInt(client_id)) parent_path_info_values.append(int(parent_path_info.path_type)) parent_path_info_values.append(parent_path_info.GetPathID().AsBytes()) parent_path_info_values.append(path) parent_path_info_values.append(len(parent_path_info.components)) parent_path_info_count += 1 query += """ CREATE TEMPORARY TABLE client_path_infos( client_id BIGINT UNSIGNED NOT NULL, path_type INT UNSIGNED NOT NULL, path_id BINARY(32) NOT NULL, path TEXT NOT NULL, directory BOOLEAN NOT NULL, depth INT NOT NULL, stat_entry MEDIUMBLOB NULL, hash_entry MEDIUMBLOB NULL, sha256 BINARY(32) NULL, timestamp TIMESTAMP(6) NOT NULL DEFAULT now(6) );""" if path_info_count > 0: query += """ INSERT INTO client_path_infos(client_id, path_type, path_id, path, directory, depth, stat_entry, hash_entry, sha256) VALUES {}; """.format(mysql_utils.Placeholders(num=9, values=path_info_count)) query += """ INSERT INTO client_paths(client_id, path_type, path_id, path, directory, depth) SELECT client_id, path_type, path_id, path, directory, depth FROM client_path_infos ON DUPLICATE KEY UPDATE client_paths.directory = client_paths.directory OR VALUES(client_paths.directory), client_paths.timestamp = now(6); """ if parent_path_info_count > 0: placeholders = ["(%s, %s, %s, %s, TRUE, %s)"] * parent_path_info_count query += """ INSERT INTO client_paths(client_id, path_type, path_id, path, directory, depth) VALUES {} ON DUPLICATE KEY UPDATE directory = TRUE, timestamp = now(); """.format(", ".join(placeholders)) if has_stat_entries: query += """ INSERT INTO client_path_stat_entries(client_id, path_type, path_id, stat_entry, timestamp) SELECT client_id, path_type, path_id, stat_entry, timestamp FROM client_path_infos WHERE stat_entry IS NOT NULL; """ query += """ UPDATE client_paths, client_path_infos SET client_paths.last_stat_entry_timestamp = client_path_infos.timestamp WHERE client_paths.client_id = client_path_infos.client_id AND client_paths.path_type = client_path_infos.path_type AND client_paths.path_id = client_path_infos.path_id AND client_path_infos.stat_entry IS NOT NULL; """ if has_hash_entries: query += """ INSERT INTO client_path_hash_entries(client_id, path_type, path_id, hash_entry, sha256, timestamp) SELECT client_id, path_type, path_id, hash_entry, sha256, timestamp FROM client_path_infos WHERE hash_entry IS NOT NULL; """ query += """ UPDATE client_paths, client_path_infos SET client_paths.last_hash_entry_timestamp = client_path_infos.timestamp WHERE client_paths.client_id = client_path_infos.client_id AND client_paths.path_type = client_path_infos.path_type AND client_paths.path_id = client_path_infos.path_id AND client_path_infos.hash_entry IS NOT NULL; """ try: with contextlib.closing(connection.cursor()) as cursor: cursor.execute(query, path_info_values + parent_path_info_values) finally: # Drop the temporary table in a separate cursor. This ensures that # even if the previous cursor.execute fails mid-way leaving the # temporary table created (as table creation can't be rolled back), the # table would still be correctly dropped. # # This is important since connections are reused in the MySQL connection # pool. with contextlib.closing(connection.cursor()) as cursor: cursor.execute("DROP TEMPORARY TABLE IF EXISTS client_path_infos")
Writes a collection of path info records for specified clients.
Below is the the instruction that describes the task: ### Input: Writes a collection of path info records for specified clients. ### Response: def _MultiWritePathInfos(self, path_infos, connection=None): """Writes a collection of path info records for specified clients.""" query = "" path_info_count = 0 path_info_values = [] parent_path_info_count = 0 parent_path_info_values = [] has_stat_entries = False has_hash_entries = False for client_id, client_path_infos in iteritems(path_infos): for path_info in client_path_infos: path = mysql_utils.ComponentsToPath(path_info.components) path_info_values.append(db_utils.ClientIDToInt(client_id)) path_info_values.append(int(path_info.path_type)) path_info_values.append(path_info.GetPathID().AsBytes()) path_info_values.append(path) path_info_values.append(bool(path_info.directory)) path_info_values.append(len(path_info.components)) if path_info.HasField("stat_entry"): path_info_values.append(path_info.stat_entry.SerializeToString()) has_stat_entries = True else: path_info_values.append(None) if path_info.HasField("hash_entry"): path_info_values.append(path_info.hash_entry.SerializeToString()) path_info_values.append(path_info.hash_entry.sha256.AsBytes()) has_hash_entries = True else: path_info_values.append(None) path_info_values.append(None) path_info_count += 1 # TODO(hanuszczak): Implement a trie in order to avoid inserting # duplicated records. for parent_path_info in path_info.GetAncestors(): path = mysql_utils.ComponentsToPath(parent_path_info.components) parent_path_info_values.append(db_utils.ClientIDToInt(client_id)) parent_path_info_values.append(int(parent_path_info.path_type)) parent_path_info_values.append(parent_path_info.GetPathID().AsBytes()) parent_path_info_values.append(path) parent_path_info_values.append(len(parent_path_info.components)) parent_path_info_count += 1 query += """ CREATE TEMPORARY TABLE client_path_infos( client_id BIGINT UNSIGNED NOT NULL, path_type INT UNSIGNED NOT NULL, path_id BINARY(32) NOT NULL, path TEXT NOT NULL, directory BOOLEAN NOT NULL, depth INT NOT NULL, stat_entry MEDIUMBLOB NULL, hash_entry MEDIUMBLOB NULL, sha256 BINARY(32) NULL, timestamp TIMESTAMP(6) NOT NULL DEFAULT now(6) );""" if path_info_count > 0: query += """ INSERT INTO client_path_infos(client_id, path_type, path_id, path, directory, depth, stat_entry, hash_entry, sha256) VALUES {}; """.format(mysql_utils.Placeholders(num=9, values=path_info_count)) query += """ INSERT INTO client_paths(client_id, path_type, path_id, path, directory, depth) SELECT client_id, path_type, path_id, path, directory, depth FROM client_path_infos ON DUPLICATE KEY UPDATE client_paths.directory = client_paths.directory OR VALUES(client_paths.directory), client_paths.timestamp = now(6); """ if parent_path_info_count > 0: placeholders = ["(%s, %s, %s, %s, TRUE, %s)"] * parent_path_info_count query += """ INSERT INTO client_paths(client_id, path_type, path_id, path, directory, depth) VALUES {} ON DUPLICATE KEY UPDATE directory = TRUE, timestamp = now(); """.format(", ".join(placeholders)) if has_stat_entries: query += """ INSERT INTO client_path_stat_entries(client_id, path_type, path_id, stat_entry, timestamp) SELECT client_id, path_type, path_id, stat_entry, timestamp FROM client_path_infos WHERE stat_entry IS NOT NULL; """ query += """ UPDATE client_paths, client_path_infos SET client_paths.last_stat_entry_timestamp = client_path_infos.timestamp WHERE client_paths.client_id = client_path_infos.client_id AND client_paths.path_type = client_path_infos.path_type AND client_paths.path_id = client_path_infos.path_id AND client_path_infos.stat_entry IS NOT NULL; """ if has_hash_entries: query += """ INSERT INTO client_path_hash_entries(client_id, path_type, path_id, hash_entry, sha256, timestamp) SELECT client_id, path_type, path_id, hash_entry, sha256, timestamp FROM client_path_infos WHERE hash_entry IS NOT NULL; """ query += """ UPDATE client_paths, client_path_infos SET client_paths.last_hash_entry_timestamp = client_path_infos.timestamp WHERE client_paths.client_id = client_path_infos.client_id AND client_paths.path_type = client_path_infos.path_type AND client_paths.path_id = client_path_infos.path_id AND client_path_infos.hash_entry IS NOT NULL; """ try: with contextlib.closing(connection.cursor()) as cursor: cursor.execute(query, path_info_values + parent_path_info_values) finally: # Drop the temporary table in a separate cursor. This ensures that # even if the previous cursor.execute fails mid-way leaving the # temporary table created (as table creation can't be rolled back), the # table would still be correctly dropped. # # This is important since connections are reused in the MySQL connection # pool. with contextlib.closing(connection.cursor()) as cursor: cursor.execute("DROP TEMPORARY TABLE IF EXISTS client_path_infos")
def emit_end(MEMORY=None): """ This special ending autoinitializes required inits (mainly alloc.asm) and changes the MEMORY initial address if it is ORG XXXX to ORG XXXX + heap size """ output = [] output.extend(AT_END) if REQUIRES.intersection(MEMINITS) or '__MEM_INIT' in INITS: output.append(OPTIONS.heap_start_label.value + ':') output.append('; Defines DATA END\n' + 'ZXBASIC_USER_DATA_END EQU ZXBASIC_MEM_HEAP + ZXBASIC_HEAP_SIZE') else: output.append('; Defines DATA END --> HEAP size is 0\n' + 'ZXBASIC_USER_DATA_END EQU ZXBASIC_MEM_HEAP') output.append('; Defines USER DATA Length in bytes\n' + 'ZXBASIC_USER_DATA_LEN EQU ZXBASIC_USER_DATA_END - ZXBASIC_USER_DATA') if OPTIONS.autorun.value: output.append('END %s' % START_LABEL) else: output.append('END') return output
This special ending autoinitializes required inits (mainly alloc.asm) and changes the MEMORY initial address if it is ORG XXXX to ORG XXXX + heap size
Below is the the instruction that describes the task: ### Input: This special ending autoinitializes required inits (mainly alloc.asm) and changes the MEMORY initial address if it is ORG XXXX to ORG XXXX + heap size ### Response: def emit_end(MEMORY=None): """ This special ending autoinitializes required inits (mainly alloc.asm) and changes the MEMORY initial address if it is ORG XXXX to ORG XXXX + heap size """ output = [] output.extend(AT_END) if REQUIRES.intersection(MEMINITS) or '__MEM_INIT' in INITS: output.append(OPTIONS.heap_start_label.value + ':') output.append('; Defines DATA END\n' + 'ZXBASIC_USER_DATA_END EQU ZXBASIC_MEM_HEAP + ZXBASIC_HEAP_SIZE') else: output.append('; Defines DATA END --> HEAP size is 0\n' + 'ZXBASIC_USER_DATA_END EQU ZXBASIC_MEM_HEAP') output.append('; Defines USER DATA Length in bytes\n' + 'ZXBASIC_USER_DATA_LEN EQU ZXBASIC_USER_DATA_END - ZXBASIC_USER_DATA') if OPTIONS.autorun.value: output.append('END %s' % START_LABEL) else: output.append('END') return output
def _select_filepath(self, filepath): """Make a choice between ``filepath`` and ``self.default_filepath``. Args ---- filepath: str the filepath to be compared with ``self.default_filepath`` Returns ------- str The selected filepath """ if filepath is None: return self.default_filepath else: if os.path.basename(filepath) == '': filepath = os.path.join(filepath, os.path.basename( self.default_filepath)) return filepath
Make a choice between ``filepath`` and ``self.default_filepath``. Args ---- filepath: str the filepath to be compared with ``self.default_filepath`` Returns ------- str The selected filepath
Below is the the instruction that describes the task: ### Input: Make a choice between ``filepath`` and ``self.default_filepath``. Args ---- filepath: str the filepath to be compared with ``self.default_filepath`` Returns ------- str The selected filepath ### Response: def _select_filepath(self, filepath): """Make a choice between ``filepath`` and ``self.default_filepath``. Args ---- filepath: str the filepath to be compared with ``self.default_filepath`` Returns ------- str The selected filepath """ if filepath is None: return self.default_filepath else: if os.path.basename(filepath) == '': filepath = os.path.join(filepath, os.path.basename( self.default_filepath)) return filepath
def send(self, request_id, payload): """ Send a request to Kafka Arguments:: request_id (int): can be any int (used only for debug logging...) payload: an encoded kafka packet (see KafkaProtocol) """ log.debug("About to send %d bytes to Kafka, request %d" % (len(payload), request_id)) # Make sure we have a connection if not self._sock: self.reinit() try: self._sock.sendall(payload) except socket.error: log.exception('Unable to send payload to Kafka') self._raise_connection_error()
Send a request to Kafka Arguments:: request_id (int): can be any int (used only for debug logging...) payload: an encoded kafka packet (see KafkaProtocol)
Below is the the instruction that describes the task: ### Input: Send a request to Kafka Arguments:: request_id (int): can be any int (used only for debug logging...) payload: an encoded kafka packet (see KafkaProtocol) ### Response: def send(self, request_id, payload): """ Send a request to Kafka Arguments:: request_id (int): can be any int (used only for debug logging...) payload: an encoded kafka packet (see KafkaProtocol) """ log.debug("About to send %d bytes to Kafka, request %d" % (len(payload), request_id)) # Make sure we have a connection if not self._sock: self.reinit() try: self._sock.sendall(payload) except socket.error: log.exception('Unable to send payload to Kafka') self._raise_connection_error()
def dead(self): """Whether the callback no longer exists. If the callback is maintained via a weak reference, and that weak reference has been collected, this will be true instead of false. """ if not self._weak: return False cb = self._callback() if cb is None: return True return False
Whether the callback no longer exists. If the callback is maintained via a weak reference, and that weak reference has been collected, this will be true instead of false.
Below is the the instruction that describes the task: ### Input: Whether the callback no longer exists. If the callback is maintained via a weak reference, and that weak reference has been collected, this will be true instead of false. ### Response: def dead(self): """Whether the callback no longer exists. If the callback is maintained via a weak reference, and that weak reference has been collected, this will be true instead of false. """ if not self._weak: return False cb = self._callback() if cb is None: return True return False
def descendants(self, include_clip=True): """ Return a generator to iterate over all descendant layers. Example:: # Iterate over all layers for layer in psd.descendants(): print(layer) # Iterate over all layers in reverse order for layer in reversed(list(psd.descendants())): print(layer) :param include_clip: include clipping layers. """ for layer in self: yield layer if layer.is_group(): for child in layer.descendants(include_clip): yield child if include_clip and hasattr(layer, 'clip_layers'): for clip_layer in layer.clip_layers: yield clip_layer
Return a generator to iterate over all descendant layers. Example:: # Iterate over all layers for layer in psd.descendants(): print(layer) # Iterate over all layers in reverse order for layer in reversed(list(psd.descendants())): print(layer) :param include_clip: include clipping layers.
Below is the the instruction that describes the task: ### Input: Return a generator to iterate over all descendant layers. Example:: # Iterate over all layers for layer in psd.descendants(): print(layer) # Iterate over all layers in reverse order for layer in reversed(list(psd.descendants())): print(layer) :param include_clip: include clipping layers. ### Response: def descendants(self, include_clip=True): """ Return a generator to iterate over all descendant layers. Example:: # Iterate over all layers for layer in psd.descendants(): print(layer) # Iterate over all layers in reverse order for layer in reversed(list(psd.descendants())): print(layer) :param include_clip: include clipping layers. """ for layer in self: yield layer if layer.is_group(): for child in layer.descendants(include_clip): yield child if include_clip and hasattr(layer, 'clip_layers'): for clip_layer in layer.clip_layers: yield clip_layer
def getSkeletalBoneDataCompressed(self, action, eMotionRange, pvCompressedData, unCompressedSize): """ Reads the state of the skeletal bone data in a compressed form that is suitable for sending over the network. The required buffer size will never exceed ( sizeof(VR_BoneTransform_t)*boneCount + 2). Usually the size will be much smaller. """ fn = self.function_table.getSkeletalBoneDataCompressed punRequiredCompressedSize = c_uint32() result = fn(action, eMotionRange, pvCompressedData, unCompressedSize, byref(punRequiredCompressedSize)) return result, punRequiredCompressedSize.value
Reads the state of the skeletal bone data in a compressed form that is suitable for sending over the network. The required buffer size will never exceed ( sizeof(VR_BoneTransform_t)*boneCount + 2). Usually the size will be much smaller.
Below is the the instruction that describes the task: ### Input: Reads the state of the skeletal bone data in a compressed form that is suitable for sending over the network. The required buffer size will never exceed ( sizeof(VR_BoneTransform_t)*boneCount + 2). Usually the size will be much smaller. ### Response: def getSkeletalBoneDataCompressed(self, action, eMotionRange, pvCompressedData, unCompressedSize): """ Reads the state of the skeletal bone data in a compressed form that is suitable for sending over the network. The required buffer size will never exceed ( sizeof(VR_BoneTransform_t)*boneCount + 2). Usually the size will be much smaller. """ fn = self.function_table.getSkeletalBoneDataCompressed punRequiredCompressedSize = c_uint32() result = fn(action, eMotionRange, pvCompressedData, unCompressedSize, byref(punRequiredCompressedSize)) return result, punRequiredCompressedSize.value
def generate_dh_parameters(bit_size): """ Generates DH parameters for use with Diffie-Hellman key exchange. Returns a structure in the format of DHParameter defined in PKCS#3, which is also used by the OpenSSL dhparam tool. THIS CAN BE VERY TIME CONSUMING! :param bit_size: The integer bit size of the parameters to generate. Must be between 512 and 4096, and divisible by 64. Recommended secure value as of early 2016 is 2048, with an absolute minimum of 1024. :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: An asn1crypto.algos.DHParameters object. Use oscrypto.asymmetric.dump_dh_parameters() to save to disk for usage with web servers. """ if not isinstance(bit_size, int_types): raise TypeError(pretty_message( ''' bit_size must be an integer, not %s ''', type_name(bit_size) )) if bit_size < 512: raise ValueError('bit_size must be greater than or equal to 512') if bit_size > 4096: raise ValueError('bit_size must be less than or equal to 4096') if bit_size % 64 != 0: raise ValueError('bit_size must be a multiple of 64') alg_handle = None # The following algorithm has elements taken from OpenSSL. In short, it # generates random numbers and then ensures that they are valid for the # hardcoded generator of 2, and then ensures the number is a "safe" prime # by ensuring p//2 is prime also. # OpenSSL allows use of generator 2 or 5, but we hardcode 2 since it is # the default, and what is used by Security.framework on OS X also. g = 2 try: byte_size = bit_size // 8 if _backend == 'win': alg_handle = open_alg_handle(BcryptConst.BCRYPT_RNG_ALGORITHM) buffer = buffer_from_bytes(byte_size) while True: if _backend == 'winlegacy': rb = os.urandom(byte_size) else: res = bcrypt.BCryptGenRandom(alg_handle, buffer, byte_size, 0) handle_error(res) rb = bytes_from_buffer(buffer) p = int_from_bytes(rb) # If a number is even, it can't be prime if p % 2 == 0: continue # Perform the generator checks outlined in OpenSSL's # dh_builtin_genparams() located in dh_gen.c if g == 2: if p % 24 != 11: continue elif g == 5: rem = p % 10 if rem != 3 and rem != 7: continue divisible = False for prime in _SMALL_PRIMES: if p % prime == 0: divisible = True break # If the number is not divisible by any of the small primes, then # move on to the full Miller-Rabin test. if not divisible and _is_prime(bit_size, p): q = p // 2 if _is_prime(bit_size, q): return algos.DHParameters({'p': p, 'g': g}) finally: if alg_handle: close_alg_handle(alg_handle)
Generates DH parameters for use with Diffie-Hellman key exchange. Returns a structure in the format of DHParameter defined in PKCS#3, which is also used by the OpenSSL dhparam tool. THIS CAN BE VERY TIME CONSUMING! :param bit_size: The integer bit size of the parameters to generate. Must be between 512 and 4096, and divisible by 64. Recommended secure value as of early 2016 is 2048, with an absolute minimum of 1024. :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: An asn1crypto.algos.DHParameters object. Use oscrypto.asymmetric.dump_dh_parameters() to save to disk for usage with web servers.
Below is the the instruction that describes the task: ### Input: Generates DH parameters for use with Diffie-Hellman key exchange. Returns a structure in the format of DHParameter defined in PKCS#3, which is also used by the OpenSSL dhparam tool. THIS CAN BE VERY TIME CONSUMING! :param bit_size: The integer bit size of the parameters to generate. Must be between 512 and 4096, and divisible by 64. Recommended secure value as of early 2016 is 2048, with an absolute minimum of 1024. :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: An asn1crypto.algos.DHParameters object. Use oscrypto.asymmetric.dump_dh_parameters() to save to disk for usage with web servers. ### Response: def generate_dh_parameters(bit_size): """ Generates DH parameters for use with Diffie-Hellman key exchange. Returns a structure in the format of DHParameter defined in PKCS#3, which is also used by the OpenSSL dhparam tool. THIS CAN BE VERY TIME CONSUMING! :param bit_size: The integer bit size of the parameters to generate. Must be between 512 and 4096, and divisible by 64. Recommended secure value as of early 2016 is 2048, with an absolute minimum of 1024. :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: An asn1crypto.algos.DHParameters object. Use oscrypto.asymmetric.dump_dh_parameters() to save to disk for usage with web servers. """ if not isinstance(bit_size, int_types): raise TypeError(pretty_message( ''' bit_size must be an integer, not %s ''', type_name(bit_size) )) if bit_size < 512: raise ValueError('bit_size must be greater than or equal to 512') if bit_size > 4096: raise ValueError('bit_size must be less than or equal to 4096') if bit_size % 64 != 0: raise ValueError('bit_size must be a multiple of 64') alg_handle = None # The following algorithm has elements taken from OpenSSL. In short, it # generates random numbers and then ensures that they are valid for the # hardcoded generator of 2, and then ensures the number is a "safe" prime # by ensuring p//2 is prime also. # OpenSSL allows use of generator 2 or 5, but we hardcode 2 since it is # the default, and what is used by Security.framework on OS X also. g = 2 try: byte_size = bit_size // 8 if _backend == 'win': alg_handle = open_alg_handle(BcryptConst.BCRYPT_RNG_ALGORITHM) buffer = buffer_from_bytes(byte_size) while True: if _backend == 'winlegacy': rb = os.urandom(byte_size) else: res = bcrypt.BCryptGenRandom(alg_handle, buffer, byte_size, 0) handle_error(res) rb = bytes_from_buffer(buffer) p = int_from_bytes(rb) # If a number is even, it can't be prime if p % 2 == 0: continue # Perform the generator checks outlined in OpenSSL's # dh_builtin_genparams() located in dh_gen.c if g == 2: if p % 24 != 11: continue elif g == 5: rem = p % 10 if rem != 3 and rem != 7: continue divisible = False for prime in _SMALL_PRIMES: if p % prime == 0: divisible = True break # If the number is not divisible by any of the small primes, then # move on to the full Miller-Rabin test. if not divisible and _is_prime(bit_size, p): q = p // 2 if _is_prime(bit_size, q): return algos.DHParameters({'p': p, 'g': g}) finally: if alg_handle: close_alg_handle(alg_handle)
def get_xy_1D(ds, stride=1, getval=False): """Return 1D arrays of x and y map coordinates for input GDAL Dataset """ gt = ds.GetGeoTransform() #stride = stride_m/gt[1] pX = np.arange(0, ds.RasterXSize, stride) pY = np.arange(0, ds.RasterYSize, stride) mX, dummy = pixelToMap(pX, pY[0], gt) dummy, mY = pixelToMap(pX[0], pY, gt) return mX, mY
Return 1D arrays of x and y map coordinates for input GDAL Dataset
Below is the the instruction that describes the task: ### Input: Return 1D arrays of x and y map coordinates for input GDAL Dataset ### Response: def get_xy_1D(ds, stride=1, getval=False): """Return 1D arrays of x and y map coordinates for input GDAL Dataset """ gt = ds.GetGeoTransform() #stride = stride_m/gt[1] pX = np.arange(0, ds.RasterXSize, stride) pY = np.arange(0, ds.RasterYSize, stride) mX, dummy = pixelToMap(pX, pY[0], gt) dummy, mY = pixelToMap(pX[0], pY, gt) return mX, mY
def convert_to_nested_dict(dotted_dict): """Convert a dict with dotted path keys to corresponding nested dict.""" nested_dict = {} for k, v in iterate_flattened(dotted_dict): set_by_dotted_path(nested_dict, k, v) return nested_dict
Convert a dict with dotted path keys to corresponding nested dict.
Below is the the instruction that describes the task: ### Input: Convert a dict with dotted path keys to corresponding nested dict. ### Response: def convert_to_nested_dict(dotted_dict): """Convert a dict with dotted path keys to corresponding nested dict.""" nested_dict = {} for k, v in iterate_flattened(dotted_dict): set_by_dotted_path(nested_dict, k, v) return nested_dict
def _spawn_kafka_connection_thread(self): """Spawns a kafka connection thread""" self.logger.debug("Spawn kafka connection thread") self.kafka_connected = False self._kafka_thread = Thread(target=self._setup_kafka) self._kafka_thread.setDaemon(True) self._kafka_thread.start()
Spawns a kafka connection thread
Below is the the instruction that describes the task: ### Input: Spawns a kafka connection thread ### Response: def _spawn_kafka_connection_thread(self): """Spawns a kafka connection thread""" self.logger.debug("Spawn kafka connection thread") self.kafka_connected = False self._kafka_thread = Thread(target=self._setup_kafka) self._kafka_thread.setDaemon(True) self._kafka_thread.start()
def get_work_item_by_id(self, wi_id): ''' Retrieves a single work item based off of the supplied ID :param wi_id: The work item ID number :return: Workitem or None ''' work_items = self.get_work_items(id=wi_id) if work_items is not None: return work_items[0] return None
Retrieves a single work item based off of the supplied ID :param wi_id: The work item ID number :return: Workitem or None
Below is the the instruction that describes the task: ### Input: Retrieves a single work item based off of the supplied ID :param wi_id: The work item ID number :return: Workitem or None ### Response: def get_work_item_by_id(self, wi_id): ''' Retrieves a single work item based off of the supplied ID :param wi_id: The work item ID number :return: Workitem or None ''' work_items = self.get_work_items(id=wi_id) if work_items is not None: return work_items[0] return None
def get_s3_region_from_endpoint(endpoint): """ Extracts and returns an AWS S3 region from an endpoint of form `s3-ap-southeast-1.amazonaws.com` :param endpoint: Endpoint region to be extracted. """ # Extract region by regex search. m = _EXTRACT_REGION_REGEX.search(endpoint) if m: # Regex matches, we have found a region. region = m.group(1) if region == 'external-1': # Handle special scenario for us-east-1 URL. return 'us-east-1' if region.startswith('dualstack'): # Handle special scenario for dualstack URL. return region.split('.')[1] return region # No regex matches return None. return None
Extracts and returns an AWS S3 region from an endpoint of form `s3-ap-southeast-1.amazonaws.com` :param endpoint: Endpoint region to be extracted.
Below is the the instruction that describes the task: ### Input: Extracts and returns an AWS S3 region from an endpoint of form `s3-ap-southeast-1.amazonaws.com` :param endpoint: Endpoint region to be extracted. ### Response: def get_s3_region_from_endpoint(endpoint): """ Extracts and returns an AWS S3 region from an endpoint of form `s3-ap-southeast-1.amazonaws.com` :param endpoint: Endpoint region to be extracted. """ # Extract region by regex search. m = _EXTRACT_REGION_REGEX.search(endpoint) if m: # Regex matches, we have found a region. region = m.group(1) if region == 'external-1': # Handle special scenario for us-east-1 URL. return 'us-east-1' if region.startswith('dualstack'): # Handle special scenario for dualstack URL. return region.split('.')[1] return region # No regex matches return None. return None
def verify(self): """Verify Magic Envelope document against public key.""" if not self.public_key: self.fetch_public_key() data = self.doc.find(".//{http://salmon-protocol.org/ns/magic-env}data").text sig = self.doc.find(".//{http://salmon-protocol.org/ns/magic-env}sig").text sig_contents = '.'.join([ data, b64encode(b"application/xml").decode("ascii"), b64encode(b"base64url").decode("ascii"), b64encode(b"RSA-SHA256").decode("ascii") ]) sig_hash = SHA256.new(sig_contents.encode("ascii")) cipher = PKCS1_v1_5.new(RSA.importKey(self.public_key)) if not cipher.verify(sig_hash, urlsafe_b64decode(sig)): raise SignatureVerificationError("Signature cannot be verified using the given public key")
Verify Magic Envelope document against public key.
Below is the the instruction that describes the task: ### Input: Verify Magic Envelope document against public key. ### Response: def verify(self): """Verify Magic Envelope document against public key.""" if not self.public_key: self.fetch_public_key() data = self.doc.find(".//{http://salmon-protocol.org/ns/magic-env}data").text sig = self.doc.find(".//{http://salmon-protocol.org/ns/magic-env}sig").text sig_contents = '.'.join([ data, b64encode(b"application/xml").decode("ascii"), b64encode(b"base64url").decode("ascii"), b64encode(b"RSA-SHA256").decode("ascii") ]) sig_hash = SHA256.new(sig_contents.encode("ascii")) cipher = PKCS1_v1_5.new(RSA.importKey(self.public_key)) if not cipher.verify(sig_hash, urlsafe_b64decode(sig)): raise SignatureVerificationError("Signature cannot be verified using the given public key")
def delete_resource_subscription(self, device_id, _resource_path, **kwargs): # noqa: E501 """Remove a subscription # noqa: E501 To remove an existing subscription from a resource path. **Example usage:** curl -X DELETE \\ https://api.us-east-1.mbedcloud.com/v2/subscriptions/{device-id}/{resourcePath} \\ -H 'authorization: Bearer {api-key}' # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.delete_resource_subscription(device_id, _resource_path, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str device_id: A unique Device Management device ID for the endpoint. Note that the ID must be an exact match. You cannot use wildcards here. (required) :param str _resource_path: The URL of the resource. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.delete_resource_subscription_with_http_info(device_id, _resource_path, **kwargs) # noqa: E501 else: (data) = self.delete_resource_subscription_with_http_info(device_id, _resource_path, **kwargs) # noqa: E501 return data
Remove a subscription # noqa: E501 To remove an existing subscription from a resource path. **Example usage:** curl -X DELETE \\ https://api.us-east-1.mbedcloud.com/v2/subscriptions/{device-id}/{resourcePath} \\ -H 'authorization: Bearer {api-key}' # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.delete_resource_subscription(device_id, _resource_path, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str device_id: A unique Device Management device ID for the endpoint. Note that the ID must be an exact match. You cannot use wildcards here. (required) :param str _resource_path: The URL of the resource. (required) :return: None If the method is called asynchronously, returns the request thread.
Below is the the instruction that describes the task: ### Input: Remove a subscription # noqa: E501 To remove an existing subscription from a resource path. **Example usage:** curl -X DELETE \\ https://api.us-east-1.mbedcloud.com/v2/subscriptions/{device-id}/{resourcePath} \\ -H 'authorization: Bearer {api-key}' # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.delete_resource_subscription(device_id, _resource_path, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str device_id: A unique Device Management device ID for the endpoint. Note that the ID must be an exact match. You cannot use wildcards here. (required) :param str _resource_path: The URL of the resource. (required) :return: None If the method is called asynchronously, returns the request thread. ### Response: def delete_resource_subscription(self, device_id, _resource_path, **kwargs): # noqa: E501 """Remove a subscription # noqa: E501 To remove an existing subscription from a resource path. **Example usage:** curl -X DELETE \\ https://api.us-east-1.mbedcloud.com/v2/subscriptions/{device-id}/{resourcePath} \\ -H 'authorization: Bearer {api-key}' # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.delete_resource_subscription(device_id, _resource_path, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str device_id: A unique Device Management device ID for the endpoint. Note that the ID must be an exact match. You cannot use wildcards here. (required) :param str _resource_path: The URL of the resource. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.delete_resource_subscription_with_http_info(device_id, _resource_path, **kwargs) # noqa: E501 else: (data) = self.delete_resource_subscription_with_http_info(device_id, _resource_path, **kwargs) # noqa: E501 return data
def withValues(cls, *values): """Creates a subclass with discreet values constraint. """ class X(cls): subtypeSpec = cls.subtypeSpec + constraint.SingleValueConstraint( *values) X.__name__ = cls.__name__ return X
Creates a subclass with discreet values constraint.
Below is the the instruction that describes the task: ### Input: Creates a subclass with discreet values constraint. ### Response: def withValues(cls, *values): """Creates a subclass with discreet values constraint. """ class X(cls): subtypeSpec = cls.subtypeSpec + constraint.SingleValueConstraint( *values) X.__name__ = cls.__name__ return X
def specific_notes(hazard, exposure): """Return notes which are specific for a given hazard and exposure. :param hazard: The hazard definition. :type hazard: safe.definition.hazard :param exposure: The exposure definition. :type hazard: safe.definition.exposure :return: List of notes specific. :rtype: list """ for item in ITEMS: if item['hazard'] == hazard and item['exposure'] == exposure: return item.get('notes', []) return []
Return notes which are specific for a given hazard and exposure. :param hazard: The hazard definition. :type hazard: safe.definition.hazard :param exposure: The exposure definition. :type hazard: safe.definition.exposure :return: List of notes specific. :rtype: list
Below is the the instruction that describes the task: ### Input: Return notes which are specific for a given hazard and exposure. :param hazard: The hazard definition. :type hazard: safe.definition.hazard :param exposure: The exposure definition. :type hazard: safe.definition.exposure :return: List of notes specific. :rtype: list ### Response: def specific_notes(hazard, exposure): """Return notes which are specific for a given hazard and exposure. :param hazard: The hazard definition. :type hazard: safe.definition.hazard :param exposure: The exposure definition. :type hazard: safe.definition.exposure :return: List of notes specific. :rtype: list """ for item in ITEMS: if item['hazard'] == hazard and item['exposure'] == exposure: return item.get('notes', []) return []
def run_norm(net, df=None, norm_type='zscore', axis='row', keep_orig=False): ''' A dataframe (more accurately a dictionary of dataframes, e.g. mat, mat_up...) can be passed to run_norm and a normalization will be run ( e.g. zscore) on either the rows or columns ''' # df here is actually a dictionary of several dataframes, 'mat', 'mat_orig', # etc if df is None: df = net.dat_to_df() if norm_type == 'zscore': df = zscore_df(df, axis, keep_orig) if norm_type == 'qn': df = qn_df(df, axis, keep_orig) net.df_to_dat(df)
A dataframe (more accurately a dictionary of dataframes, e.g. mat, mat_up...) can be passed to run_norm and a normalization will be run ( e.g. zscore) on either the rows or columns
Below is the the instruction that describes the task: ### Input: A dataframe (more accurately a dictionary of dataframes, e.g. mat, mat_up...) can be passed to run_norm and a normalization will be run ( e.g. zscore) on either the rows or columns ### Response: def run_norm(net, df=None, norm_type='zscore', axis='row', keep_orig=False): ''' A dataframe (more accurately a dictionary of dataframes, e.g. mat, mat_up...) can be passed to run_norm and a normalization will be run ( e.g. zscore) on either the rows or columns ''' # df here is actually a dictionary of several dataframes, 'mat', 'mat_orig', # etc if df is None: df = net.dat_to_df() if norm_type == 'zscore': df = zscore_df(df, axis, keep_orig) if norm_type == 'qn': df = qn_df(df, axis, keep_orig) net.df_to_dat(df)
def upload_folder_run(upload_context): """ Function run by CreateFolderCommand to create the folder. Runs in a background process. :param upload_context: UploadContext: contains data service setup and folder details. """ data_service = upload_context.make_data_service() folder_name, parent_kind, parent_remote_id = upload_context.params result = data_service.create_folder(folder_name, parent_kind, parent_remote_id) return result.json()['id']
Function run by CreateFolderCommand to create the folder. Runs in a background process. :param upload_context: UploadContext: contains data service setup and folder details.
Below is the the instruction that describes the task: ### Input: Function run by CreateFolderCommand to create the folder. Runs in a background process. :param upload_context: UploadContext: contains data service setup and folder details. ### Response: def upload_folder_run(upload_context): """ Function run by CreateFolderCommand to create the folder. Runs in a background process. :param upload_context: UploadContext: contains data service setup and folder details. """ data_service = upload_context.make_data_service() folder_name, parent_kind, parent_remote_id = upload_context.params result = data_service.create_folder(folder_name, parent_kind, parent_remote_id) return result.json()['id']
def workspace_from_dir(directory, recurse=True): """ Construct a workspace object from a directory name. If recurse=True, this function will search down the directory tree and return the first workspace it finds. If recurse=False, an exception will be raised if the given directory is not a workspace. Workspace identification requires a file called 'workspace.pkl' to be present in each workspace directory, which can unfortunately be a little fragile. """ directory = os.path.abspath(directory) pickle_path = os.path.join(directory, 'workspace.pkl') # Make sure the given directory contains a 'workspace' file. This file is # needed to instantiate the right kind of workspace. if not os.path.exists(pickle_path): if recurse: parent_dir = os.path.dirname(directory) # Keep looking for a workspace as long as we haven't hit the root # of the file system. If an exception is raised, that means no # workspace was found. Catch and re-raise the exception so that # the name of the directory reported in the exception is meaningful # to the user. try: return workspace_from_dir(parent_dir, parent_dir != '/') except WorkspaceNotFound: raise WorkspaceNotFound(directory) else: raise WorkspaceNotFound(directory) # Load the 'workspace' file and create a workspace. with open(pickle_path) as file: workspace_class = pickle.load(file) return workspace_class.from_directory(directory)
Construct a workspace object from a directory name. If recurse=True, this function will search down the directory tree and return the first workspace it finds. If recurse=False, an exception will be raised if the given directory is not a workspace. Workspace identification requires a file called 'workspace.pkl' to be present in each workspace directory, which can unfortunately be a little fragile.
Below is the the instruction that describes the task: ### Input: Construct a workspace object from a directory name. If recurse=True, this function will search down the directory tree and return the first workspace it finds. If recurse=False, an exception will be raised if the given directory is not a workspace. Workspace identification requires a file called 'workspace.pkl' to be present in each workspace directory, which can unfortunately be a little fragile. ### Response: def workspace_from_dir(directory, recurse=True): """ Construct a workspace object from a directory name. If recurse=True, this function will search down the directory tree and return the first workspace it finds. If recurse=False, an exception will be raised if the given directory is not a workspace. Workspace identification requires a file called 'workspace.pkl' to be present in each workspace directory, which can unfortunately be a little fragile. """ directory = os.path.abspath(directory) pickle_path = os.path.join(directory, 'workspace.pkl') # Make sure the given directory contains a 'workspace' file. This file is # needed to instantiate the right kind of workspace. if not os.path.exists(pickle_path): if recurse: parent_dir = os.path.dirname(directory) # Keep looking for a workspace as long as we haven't hit the root # of the file system. If an exception is raised, that means no # workspace was found. Catch and re-raise the exception so that # the name of the directory reported in the exception is meaningful # to the user. try: return workspace_from_dir(parent_dir, parent_dir != '/') except WorkspaceNotFound: raise WorkspaceNotFound(directory) else: raise WorkspaceNotFound(directory) # Load the 'workspace' file and create a workspace. with open(pickle_path) as file: workspace_class = pickle.load(file) return workspace_class.from_directory(directory)
def _on_open(self, sender, *args, **kwargs): """ Internal handler for opening the device. """ self.get_config() self.get_version() self.on_open()
Internal handler for opening the device.
Below is the the instruction that describes the task: ### Input: Internal handler for opening the device. ### Response: def _on_open(self, sender, *args, **kwargs): """ Internal handler for opening the device. """ self.get_config() self.get_version() self.on_open()
def in_session(self): """Provide a session scope around a series of operations.""" session = self.get_session() try: yield session session.commit() except IntegrityError: session.rollback() raise DuplicateError("Duplicate unique value detected!") except (OperationalError, DisconnectionError): session.rollback() self.close() logger.warn("Database Connection Lost!") raise DatabaseConnectionError() except Exception: session.rollback() raise finally: session.close()
Provide a session scope around a series of operations.
Below is the the instruction that describes the task: ### Input: Provide a session scope around a series of operations. ### Response: def in_session(self): """Provide a session scope around a series of operations.""" session = self.get_session() try: yield session session.commit() except IntegrityError: session.rollback() raise DuplicateError("Duplicate unique value detected!") except (OperationalError, DisconnectionError): session.rollback() self.close() logger.warn("Database Connection Lost!") raise DatabaseConnectionError() except Exception: session.rollback() raise finally: session.close()
def map_port(protocol, public_port, private_port, lifetime=3600, gateway_ip=None, retry=9, use_exception=True): """A function to map public_port to private_port of protocol. Returns the complete response on success. protocol - NATPMP_PROTOCOL_UDP or NATPMP_PROTOCOL_TCP public_port - the public port of the mapping requested private_port - the private port of the mapping requested lifetime - the duration of the mapping in seconds. Defaults to 3600, per specification. gateway_ip - the IP to the NAT-PMP compatible gateway. Defaults to using auto-detection function get_gateway_addr() retry - the number of times to retry the request if unsuccessful. Defaults to 9 as per specification. use_exception - throw an exception if an error result is received from the gateway. Defaults to True. """ if protocol not in [NATPMP_PROTOCOL_UDP, NATPMP_PROTOCOL_TCP]: raise ValueError("Must be either NATPMP_PROTOCOL_UDP or " "NATPMP_PROTOCOL_TCP") if gateway_ip is None: gateway_ip = get_gateway_addr() response = None port_mapping_request = PortMapRequest(protocol, private_port, public_port, lifetime) port_mapping_response = \ send_request_with_retry(gateway_ip, port_mapping_request, response_data_class=PortMapResponse, retry=retry) if port_mapping_response.result != 0 and use_exception: raise NATPMPResultError(port_mapping_response.result, error_str(port_mapping_response.result), port_mapping_response) return port_mapping_response
A function to map public_port to private_port of protocol. Returns the complete response on success. protocol - NATPMP_PROTOCOL_UDP or NATPMP_PROTOCOL_TCP public_port - the public port of the mapping requested private_port - the private port of the mapping requested lifetime - the duration of the mapping in seconds. Defaults to 3600, per specification. gateway_ip - the IP to the NAT-PMP compatible gateway. Defaults to using auto-detection function get_gateway_addr() retry - the number of times to retry the request if unsuccessful. Defaults to 9 as per specification. use_exception - throw an exception if an error result is received from the gateway. Defaults to True.
Below is the the instruction that describes the task: ### Input: A function to map public_port to private_port of protocol. Returns the complete response on success. protocol - NATPMP_PROTOCOL_UDP or NATPMP_PROTOCOL_TCP public_port - the public port of the mapping requested private_port - the private port of the mapping requested lifetime - the duration of the mapping in seconds. Defaults to 3600, per specification. gateway_ip - the IP to the NAT-PMP compatible gateway. Defaults to using auto-detection function get_gateway_addr() retry - the number of times to retry the request if unsuccessful. Defaults to 9 as per specification. use_exception - throw an exception if an error result is received from the gateway. Defaults to True. ### Response: def map_port(protocol, public_port, private_port, lifetime=3600, gateway_ip=None, retry=9, use_exception=True): """A function to map public_port to private_port of protocol. Returns the complete response on success. protocol - NATPMP_PROTOCOL_UDP or NATPMP_PROTOCOL_TCP public_port - the public port of the mapping requested private_port - the private port of the mapping requested lifetime - the duration of the mapping in seconds. Defaults to 3600, per specification. gateway_ip - the IP to the NAT-PMP compatible gateway. Defaults to using auto-detection function get_gateway_addr() retry - the number of times to retry the request if unsuccessful. Defaults to 9 as per specification. use_exception - throw an exception if an error result is received from the gateway. Defaults to True. """ if protocol not in [NATPMP_PROTOCOL_UDP, NATPMP_PROTOCOL_TCP]: raise ValueError("Must be either NATPMP_PROTOCOL_UDP or " "NATPMP_PROTOCOL_TCP") if gateway_ip is None: gateway_ip = get_gateway_addr() response = None port_mapping_request = PortMapRequest(protocol, private_port, public_port, lifetime) port_mapping_response = \ send_request_with_retry(gateway_ip, port_mapping_request, response_data_class=PortMapResponse, retry=retry) if port_mapping_response.result != 0 and use_exception: raise NATPMPResultError(port_mapping_response.result, error_str(port_mapping_response.result), port_mapping_response) return port_mapping_response
def encoding(encoding=True): """DEPRECATED: use pynvim.decode().""" if isinstance(encoding, str): encoding = True def dec(f): f._nvim_decode = encoding return f return dec
DEPRECATED: use pynvim.decode().
Below is the the instruction that describes the task: ### Input: DEPRECATED: use pynvim.decode(). ### Response: def encoding(encoding=True): """DEPRECATED: use pynvim.decode().""" if isinstance(encoding, str): encoding = True def dec(f): f._nvim_decode = encoding return f return dec
def do_request(self, json_obj): """Perform one HTTP request to Zabbix API""" self.debug('Request: url="%s" headers=%s', self._api_url, self._http_headers) self.debug('Request: body=%s', json_obj) self.r_query.append(json_obj) request = urllib2.Request(url=self._api_url, data=json_obj.encode('utf-8'), headers=self._http_headers) opener = urllib2.build_opener(self._http_handler) urllib2.install_opener(opener) try: response = opener.open(request, timeout=self.timeout) except Exception as e: raise ZabbixAPIException('HTTP connection problem: %s' % e) self.debug('Response: code=%s', response.code) # NOTE: Getting a 412 response code means the headers are not in the list of allowed headers. if response.code != 200: raise ZabbixAPIException('HTTP error %s: %s' % (response.status, response.reason)) reads = response.read() if len(reads) == 0: raise ZabbixAPIException('Received zero answer') try: jobj = json.loads(reads.decode('utf-8')) except ValueError as e: self.log(ERROR, 'Unable to decode. returned string: %s', reads) raise ZabbixAPIException('Unable to decode response: %s' % e) self.debug('Response: body=%s', jobj) self.id += 1 if 'error' in jobj: # zabbix API error error = jobj['error'] if isinstance(error, dict): raise ZabbixAPIError(**error) try: return jobj['result'] except KeyError: raise ZabbixAPIException('Missing result in API response')
Perform one HTTP request to Zabbix API
Below is the the instruction that describes the task: ### Input: Perform one HTTP request to Zabbix API ### Response: def do_request(self, json_obj): """Perform one HTTP request to Zabbix API""" self.debug('Request: url="%s" headers=%s', self._api_url, self._http_headers) self.debug('Request: body=%s', json_obj) self.r_query.append(json_obj) request = urllib2.Request(url=self._api_url, data=json_obj.encode('utf-8'), headers=self._http_headers) opener = urllib2.build_opener(self._http_handler) urllib2.install_opener(opener) try: response = opener.open(request, timeout=self.timeout) except Exception as e: raise ZabbixAPIException('HTTP connection problem: %s' % e) self.debug('Response: code=%s', response.code) # NOTE: Getting a 412 response code means the headers are not in the list of allowed headers. if response.code != 200: raise ZabbixAPIException('HTTP error %s: %s' % (response.status, response.reason)) reads = response.read() if len(reads) == 0: raise ZabbixAPIException('Received zero answer') try: jobj = json.loads(reads.decode('utf-8')) except ValueError as e: self.log(ERROR, 'Unable to decode. returned string: %s', reads) raise ZabbixAPIException('Unable to decode response: %s' % e) self.debug('Response: body=%s', jobj) self.id += 1 if 'error' in jobj: # zabbix API error error = jobj['error'] if isinstance(error, dict): raise ZabbixAPIError(**error) try: return jobj['result'] except KeyError: raise ZabbixAPIException('Missing result in API response')
def gtom(adj, nr_steps): ''' The m-th step generalized topological overlap measure (GTOM) quantifies the extent to which a pair of nodes have similar m-th step neighbors. Mth-step neighbors are nodes that are reachable by a path of at most length m. This function computes the the M x M generalized topological overlap measure (GTOM) matrix for number of steps, numSteps. Parameters ---------- adj : NxN np.ndarray connection matrix nr_steps : int number of steps Returns ------- gt : NxN np.ndarray GTOM matrix Notes ----- When numSteps is equal to 1, GTOM is identical to the topological overlap measure (TOM) from reference [2]. In that case the 'gt' matrix records, for each pair of nodes, the fraction of neighbors the two nodes share in common, where "neighbors" are one step removed. As 'numSteps' is increased, neighbors that are furter out are considered. Elements of 'gt' are bounded between 0 and 1. The 'gt' matrix can be converted from a similarity to a distance matrix by taking 1-gt. ''' bm = binarize(adj, copy=True) bm_aux = bm.copy() nr_nodes = len(adj) if nr_steps > nr_nodes: print("Warning: nr_steps exceeded nr_nodes. Setting nr_steps=nr_nodes") if nr_steps == 0: return bm else: for steps in range(2, nr_steps): for i in range(nr_nodes): # neighbors of node i ng_col, = np.where(bm_aux[i, :] == 1) # neighbors of neighbors of node i nng_row, nng_col = np.where(bm_aux[ng_col, :] == 1) new_ng = np.setdiff1d(nng_col, (i,)) # neighbors of neighbors of i become considered neighbors of i bm_aux[i, new_ng] = 1 bm_aux[new_ng, i] = 1 # numerator of GTOM formula numerator_mat = np.dot(bm_aux, bm_aux) + bm + np.eye(nr_nodes) # vector of node degrees bms = np.sum(bm_aux, axis=0) bms_r = np.tile(bms, (nr_nodes, 1)) denominator_mat = -bm + np.where(bms_r > bms_r.T, bms_r, bms_r.T) + 1 return numerator_mat / denominator_mat
The m-th step generalized topological overlap measure (GTOM) quantifies the extent to which a pair of nodes have similar m-th step neighbors. Mth-step neighbors are nodes that are reachable by a path of at most length m. This function computes the the M x M generalized topological overlap measure (GTOM) matrix for number of steps, numSteps. Parameters ---------- adj : NxN np.ndarray connection matrix nr_steps : int number of steps Returns ------- gt : NxN np.ndarray GTOM matrix Notes ----- When numSteps is equal to 1, GTOM is identical to the topological overlap measure (TOM) from reference [2]. In that case the 'gt' matrix records, for each pair of nodes, the fraction of neighbors the two nodes share in common, where "neighbors" are one step removed. As 'numSteps' is increased, neighbors that are furter out are considered. Elements of 'gt' are bounded between 0 and 1. The 'gt' matrix can be converted from a similarity to a distance matrix by taking 1-gt.
Below is the the instruction that describes the task: ### Input: The m-th step generalized topological overlap measure (GTOM) quantifies the extent to which a pair of nodes have similar m-th step neighbors. Mth-step neighbors are nodes that are reachable by a path of at most length m. This function computes the the M x M generalized topological overlap measure (GTOM) matrix for number of steps, numSteps. Parameters ---------- adj : NxN np.ndarray connection matrix nr_steps : int number of steps Returns ------- gt : NxN np.ndarray GTOM matrix Notes ----- When numSteps is equal to 1, GTOM is identical to the topological overlap measure (TOM) from reference [2]. In that case the 'gt' matrix records, for each pair of nodes, the fraction of neighbors the two nodes share in common, where "neighbors" are one step removed. As 'numSteps' is increased, neighbors that are furter out are considered. Elements of 'gt' are bounded between 0 and 1. The 'gt' matrix can be converted from a similarity to a distance matrix by taking 1-gt. ### Response: def gtom(adj, nr_steps): ''' The m-th step generalized topological overlap measure (GTOM) quantifies the extent to which a pair of nodes have similar m-th step neighbors. Mth-step neighbors are nodes that are reachable by a path of at most length m. This function computes the the M x M generalized topological overlap measure (GTOM) matrix for number of steps, numSteps. Parameters ---------- adj : NxN np.ndarray connection matrix nr_steps : int number of steps Returns ------- gt : NxN np.ndarray GTOM matrix Notes ----- When numSteps is equal to 1, GTOM is identical to the topological overlap measure (TOM) from reference [2]. In that case the 'gt' matrix records, for each pair of nodes, the fraction of neighbors the two nodes share in common, where "neighbors" are one step removed. As 'numSteps' is increased, neighbors that are furter out are considered. Elements of 'gt' are bounded between 0 and 1. The 'gt' matrix can be converted from a similarity to a distance matrix by taking 1-gt. ''' bm = binarize(adj, copy=True) bm_aux = bm.copy() nr_nodes = len(adj) if nr_steps > nr_nodes: print("Warning: nr_steps exceeded nr_nodes. Setting nr_steps=nr_nodes") if nr_steps == 0: return bm else: for steps in range(2, nr_steps): for i in range(nr_nodes): # neighbors of node i ng_col, = np.where(bm_aux[i, :] == 1) # neighbors of neighbors of node i nng_row, nng_col = np.where(bm_aux[ng_col, :] == 1) new_ng = np.setdiff1d(nng_col, (i,)) # neighbors of neighbors of i become considered neighbors of i bm_aux[i, new_ng] = 1 bm_aux[new_ng, i] = 1 # numerator of GTOM formula numerator_mat = np.dot(bm_aux, bm_aux) + bm + np.eye(nr_nodes) # vector of node degrees bms = np.sum(bm_aux, axis=0) bms_r = np.tile(bms, (nr_nodes, 1)) denominator_mat = -bm + np.where(bms_r > bms_r.T, bms_r, bms_r.T) + 1 return numerator_mat / denominator_mat
def confirm_user(self, user): """ Confirms the specified user. Returns False if the user has already been confirmed, True otherwise. :param user: The user to confirm. """ if user.confirmed_at is not None: return False user.confirmed_at = self.security.datetime_factory() user.active = True self.user_manager.save(user) user_confirmed.send(app._get_current_object(), user=user) return True
Confirms the specified user. Returns False if the user has already been confirmed, True otherwise. :param user: The user to confirm.
Below is the the instruction that describes the task: ### Input: Confirms the specified user. Returns False if the user has already been confirmed, True otherwise. :param user: The user to confirm. ### Response: def confirm_user(self, user): """ Confirms the specified user. Returns False if the user has already been confirmed, True otherwise. :param user: The user to confirm. """ if user.confirmed_at is not None: return False user.confirmed_at = self.security.datetime_factory() user.active = True self.user_manager.save(user) user_confirmed.send(app._get_current_object(), user=user) return True
def set_power_mask(self, sid_mask, state): """Sets the power state of the smart power strip.""" packet = bytearray(16) packet[0x00] = 0x0d packet[0x02] = 0xa5 packet[0x03] = 0xa5 packet[0x04] = 0x5a packet[0x05] = 0x5a packet[0x06] = 0xb2 + ((sid_mask<<1) if state else sid_mask) packet[0x07] = 0xc0 packet[0x08] = 0x02 packet[0x0a] = 0x03 packet[0x0d] = sid_mask packet[0x0e] = sid_mask if state else 0 response = self.send_packet(0x6a, packet) err = response[0x22] | (response[0x23] << 8)
Sets the power state of the smart power strip.
Below is the the instruction that describes the task: ### Input: Sets the power state of the smart power strip. ### Response: def set_power_mask(self, sid_mask, state): """Sets the power state of the smart power strip.""" packet = bytearray(16) packet[0x00] = 0x0d packet[0x02] = 0xa5 packet[0x03] = 0xa5 packet[0x04] = 0x5a packet[0x05] = 0x5a packet[0x06] = 0xb2 + ((sid_mask<<1) if state else sid_mask) packet[0x07] = 0xc0 packet[0x08] = 0x02 packet[0x0a] = 0x03 packet[0x0d] = sid_mask packet[0x0e] = sid_mask if state else 0 response = self.send_packet(0x6a, packet) err = response[0x22] | (response[0x23] << 8)
def quadratic(Ks, dim, rhos, required=None): r''' Estimates \int p^2 based on kNN distances. In here because it's used in the l2 distance, above. Returns array of shape (num_Ks,). ''' # Estimated with alpha=1, beta=0: # B_{k,d,1,0} is the same as B_{k,d,0,1} in linear() # and the full estimator is # B / (n - 1) * mean(rho ^ -dim) N = rhos.shape[0] Ks = np.asarray(Ks) Bs = (Ks - 1) / np.pi ** (dim / 2) * gamma(dim / 2 + 1) # shape (num_Ks,) est = Bs / (N - 1) * np.mean(rhos ** (-dim), axis=0) return est
r''' Estimates \int p^2 based on kNN distances. In here because it's used in the l2 distance, above. Returns array of shape (num_Ks,).
Below is the the instruction that describes the task: ### Input: r''' Estimates \int p^2 based on kNN distances. In here because it's used in the l2 distance, above. Returns array of shape (num_Ks,). ### Response: def quadratic(Ks, dim, rhos, required=None): r''' Estimates \int p^2 based on kNN distances. In here because it's used in the l2 distance, above. Returns array of shape (num_Ks,). ''' # Estimated with alpha=1, beta=0: # B_{k,d,1,0} is the same as B_{k,d,0,1} in linear() # and the full estimator is # B / (n - 1) * mean(rho ^ -dim) N = rhos.shape[0] Ks = np.asarray(Ks) Bs = (Ks - 1) / np.pi ** (dim / 2) * gamma(dim / 2 + 1) # shape (num_Ks,) est = Bs / (N - 1) * np.mean(rhos ** (-dim), axis=0) return est
def focusOutEvent(self, event): """Handle focus out event restoring the last valid selected path.""" # Calling asynchronously the 'add_current_text' to avoid crash # https://groups.google.com/group/spyderlib/browse_thread/thread/2257abf530e210bd if not self.is_valid(): lineedit = self.lineEdit() QTimer.singleShot(50, lambda: lineedit.setText(self.selected_text)) hide_status = getattr(self.lineEdit(), 'hide_status_icon', None) if hide_status: hide_status() QComboBox.focusOutEvent(self, event)
Handle focus out event restoring the last valid selected path.
Below is the the instruction that describes the task: ### Input: Handle focus out event restoring the last valid selected path. ### Response: def focusOutEvent(self, event): """Handle focus out event restoring the last valid selected path.""" # Calling asynchronously the 'add_current_text' to avoid crash # https://groups.google.com/group/spyderlib/browse_thread/thread/2257abf530e210bd if not self.is_valid(): lineedit = self.lineEdit() QTimer.singleShot(50, lambda: lineedit.setText(self.selected_text)) hide_status = getattr(self.lineEdit(), 'hide_status_icon', None) if hide_status: hide_status() QComboBox.focusOutEvent(self, event)
def get_time_evolution(self): """Get the function to append the time evolution of this term. Returns: function(circuit: Circuit, t: float): Add gates for time evolution to `circuit` with time `t` """ term = self.simplify() coeff = term.coeff if coeff.imag: raise ValueError("Not a real coefficient.") ops = term.ops def append_to_circuit(circuit, t): if not ops: return for op in ops: n = op.n if op.op == "X": circuit.h[n] elif op.op == "Y": circuit.rx(-half_pi)[n] for i in range(1, len(ops)): circuit.cx[ops[i-1].n, ops[i].n] circuit.rz(-2 * coeff * t)[ops[-1].n] for i in range(len(ops)-1, 0, -1): circuit.cx[ops[i-1].n, ops[i].n] for op in ops: n = op.n if op.op == "X": circuit.h[n] elif op.op == "Y": circuit.rx(half_pi)[n] return append_to_circuit
Get the function to append the time evolution of this term. Returns: function(circuit: Circuit, t: float): Add gates for time evolution to `circuit` with time `t`
Below is the the instruction that describes the task: ### Input: Get the function to append the time evolution of this term. Returns: function(circuit: Circuit, t: float): Add gates for time evolution to `circuit` with time `t` ### Response: def get_time_evolution(self): """Get the function to append the time evolution of this term. Returns: function(circuit: Circuit, t: float): Add gates for time evolution to `circuit` with time `t` """ term = self.simplify() coeff = term.coeff if coeff.imag: raise ValueError("Not a real coefficient.") ops = term.ops def append_to_circuit(circuit, t): if not ops: return for op in ops: n = op.n if op.op == "X": circuit.h[n] elif op.op == "Y": circuit.rx(-half_pi)[n] for i in range(1, len(ops)): circuit.cx[ops[i-1].n, ops[i].n] circuit.rz(-2 * coeff * t)[ops[-1].n] for i in range(len(ops)-1, 0, -1): circuit.cx[ops[i-1].n, ops[i].n] for op in ops: n = op.n if op.op == "X": circuit.h[n] elif op.op == "Y": circuit.rx(half_pi)[n] return append_to_circuit
def _get_all_resourcescenarios(network_id, user_id): """ Get all the resource scenarios in a network, across all scenarios returns a dictionary of dict objects, keyed on scenario_id """ rs_qry = db.DBSession.query( Dataset.type, Dataset.unit_id, Dataset.name, Dataset.hash, Dataset.cr_date, Dataset.created_by, Dataset.hidden, Dataset.value, ResourceScenario.dataset_id, ResourceScenario.scenario_id, ResourceScenario.resource_attr_id, ResourceScenario.source, ResourceAttr.attr_id, ).outerjoin(DatasetOwner, and_(DatasetOwner.dataset_id==Dataset.id, DatasetOwner.user_id==user_id)).filter( or_(Dataset.hidden=='N', Dataset.created_by==user_id, DatasetOwner.user_id != None), ResourceAttr.id == ResourceScenario.resource_attr_id, Scenario.id==ResourceScenario.scenario_id, Scenario.network_id==network_id, Dataset.id==ResourceScenario.dataset_id) x = time.time() logging.info("Getting all resource scenarios") all_rs = db.DBSession.execute(rs_qry.statement).fetchall() log.info("%s resource scenarios retrieved in %s", len(all_rs), time.time()-x) logging.info("resource scenarios retrieved. Processing results...") x = time.time() rs_dict = dict() for rs in all_rs: rs_obj = JSONObject(rs) rs_attr = JSONObject({'attr_id':rs.attr_id}) value = rs.value rs_dataset = JSONDataset({ 'id':rs.dataset_id, 'type' : rs.type, 'unit_id' : rs.unit_id, 'name' : rs.name, 'hash' : rs.hash, 'cr_date':rs.cr_date, 'created_by':rs.created_by, 'hidden':rs.hidden, 'value':value, 'metadata':{}, }) rs_obj.resourceattr = rs_attr rs_obj.value = rs_dataset rs_obj.dataset = rs_dataset scenario_rs = rs_dict.get(rs.scenario_id, []) scenario_rs.append(rs_obj) rs_dict[rs.scenario_id] = scenario_rs logging.info("resource scenarios processed in %s", time.time()-x) return rs_dict
Get all the resource scenarios in a network, across all scenarios returns a dictionary of dict objects, keyed on scenario_id
Below is the the instruction that describes the task: ### Input: Get all the resource scenarios in a network, across all scenarios returns a dictionary of dict objects, keyed on scenario_id ### Response: def _get_all_resourcescenarios(network_id, user_id): """ Get all the resource scenarios in a network, across all scenarios returns a dictionary of dict objects, keyed on scenario_id """ rs_qry = db.DBSession.query( Dataset.type, Dataset.unit_id, Dataset.name, Dataset.hash, Dataset.cr_date, Dataset.created_by, Dataset.hidden, Dataset.value, ResourceScenario.dataset_id, ResourceScenario.scenario_id, ResourceScenario.resource_attr_id, ResourceScenario.source, ResourceAttr.attr_id, ).outerjoin(DatasetOwner, and_(DatasetOwner.dataset_id==Dataset.id, DatasetOwner.user_id==user_id)).filter( or_(Dataset.hidden=='N', Dataset.created_by==user_id, DatasetOwner.user_id != None), ResourceAttr.id == ResourceScenario.resource_attr_id, Scenario.id==ResourceScenario.scenario_id, Scenario.network_id==network_id, Dataset.id==ResourceScenario.dataset_id) x = time.time() logging.info("Getting all resource scenarios") all_rs = db.DBSession.execute(rs_qry.statement).fetchall() log.info("%s resource scenarios retrieved in %s", len(all_rs), time.time()-x) logging.info("resource scenarios retrieved. Processing results...") x = time.time() rs_dict = dict() for rs in all_rs: rs_obj = JSONObject(rs) rs_attr = JSONObject({'attr_id':rs.attr_id}) value = rs.value rs_dataset = JSONDataset({ 'id':rs.dataset_id, 'type' : rs.type, 'unit_id' : rs.unit_id, 'name' : rs.name, 'hash' : rs.hash, 'cr_date':rs.cr_date, 'created_by':rs.created_by, 'hidden':rs.hidden, 'value':value, 'metadata':{}, }) rs_obj.resourceattr = rs_attr rs_obj.value = rs_dataset rs_obj.dataset = rs_dataset scenario_rs = rs_dict.get(rs.scenario_id, []) scenario_rs.append(rs_obj) rs_dict[rs.scenario_id] = scenario_rs logging.info("resource scenarios processed in %s", time.time()-x) return rs_dict
def post(self, request, *args, **kwargs): """Handler for HTTP POST requests.""" context = self.get_context_data(**kwargs) workflow = context[self.context_object_name] try: # Check for the VALIDATE_STEP* headers, if they are present # and valid integers, return validation results as JSON, # otherwise proceed normally. validate_step_start = int(self.request.META.get( 'HTTP_X_HORIZON_VALIDATE_STEP_START', '')) validate_step_end = int(self.request.META.get( 'HTTP_X_HORIZON_VALIDATE_STEP_END', '')) except ValueError: # No VALIDATE_STEP* headers, or invalid values. Just proceed # with normal workflow handling for POSTs. pass else: # There are valid VALIDATE_STEP* headers, so only do validation # for the specified steps and return results. data = self.validate_steps(request, workflow, validate_step_start, validate_step_end) return http.HttpResponse(json.dumps(data), content_type="application/json") if not workflow.is_valid(): return self.render_to_response(context) try: success = workflow.finalize() except forms.ValidationError: return self.render_to_response(context) except Exception: success = False exceptions.handle(request) if success: msg = workflow.format_status_message(workflow.success_message) messages.success(request, msg) else: msg = workflow.format_status_message(workflow.failure_message) messages.error(request, msg) if "HTTP_X_HORIZON_ADD_TO_FIELD" in self.request.META: field_id = self.request.META["HTTP_X_HORIZON_ADD_TO_FIELD"] response = http.HttpResponse() if workflow.object: data = [self.get_object_id(workflow.object), self.get_object_display(workflow.object)] response.content = json.dumps(data) response["X-Horizon-Add-To-Field"] = field_id return response next_url = self.request.POST.get(workflow.redirect_param_name) return shortcuts.redirect(next_url or workflow.get_success_url())
Handler for HTTP POST requests.
Below is the the instruction that describes the task: ### Input: Handler for HTTP POST requests. ### Response: def post(self, request, *args, **kwargs): """Handler for HTTP POST requests.""" context = self.get_context_data(**kwargs) workflow = context[self.context_object_name] try: # Check for the VALIDATE_STEP* headers, if they are present # and valid integers, return validation results as JSON, # otherwise proceed normally. validate_step_start = int(self.request.META.get( 'HTTP_X_HORIZON_VALIDATE_STEP_START', '')) validate_step_end = int(self.request.META.get( 'HTTP_X_HORIZON_VALIDATE_STEP_END', '')) except ValueError: # No VALIDATE_STEP* headers, or invalid values. Just proceed # with normal workflow handling for POSTs. pass else: # There are valid VALIDATE_STEP* headers, so only do validation # for the specified steps and return results. data = self.validate_steps(request, workflow, validate_step_start, validate_step_end) return http.HttpResponse(json.dumps(data), content_type="application/json") if not workflow.is_valid(): return self.render_to_response(context) try: success = workflow.finalize() except forms.ValidationError: return self.render_to_response(context) except Exception: success = False exceptions.handle(request) if success: msg = workflow.format_status_message(workflow.success_message) messages.success(request, msg) else: msg = workflow.format_status_message(workflow.failure_message) messages.error(request, msg) if "HTTP_X_HORIZON_ADD_TO_FIELD" in self.request.META: field_id = self.request.META["HTTP_X_HORIZON_ADD_TO_FIELD"] response = http.HttpResponse() if workflow.object: data = [self.get_object_id(workflow.object), self.get_object_display(workflow.object)] response.content = json.dumps(data) response["X-Horizon-Add-To-Field"] = field_id return response next_url = self.request.POST.get(workflow.redirect_param_name) return shortcuts.redirect(next_url or workflow.get_success_url())
def copy_action_callback(self, *event): """Callback method for copy action""" if react_to_event(self.view, self.tree_view, event) and self.active_entry_widget is None: sm_selection, sm_selected_model_list = self.get_state_machine_selection() # only list specific elements are copied by widget if sm_selection is not None: sm_selection.set(sm_selected_model_list) global_clipboard.copy(sm_selection) return True
Callback method for copy action
Below is the the instruction that describes the task: ### Input: Callback method for copy action ### Response: def copy_action_callback(self, *event): """Callback method for copy action""" if react_to_event(self.view, self.tree_view, event) and self.active_entry_widget is None: sm_selection, sm_selected_model_list = self.get_state_machine_selection() # only list specific elements are copied by widget if sm_selection is not None: sm_selection.set(sm_selected_model_list) global_clipboard.copy(sm_selection) return True
def get_items_by_ids(self, item_ids, item_type=None): """Given a list of item ids, return all the Item objects Args: item_ids (obj): List of item IDs to query item_type (str): (optional) Item type to filter results with Returns: List of `Item` objects for given item IDs and given item type """ urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids] result = self._run_async(urls=urls) items = [Item(r) for r in result if r] if item_type: return [item for item in items if item.item_type == item_type] else: return items
Given a list of item ids, return all the Item objects Args: item_ids (obj): List of item IDs to query item_type (str): (optional) Item type to filter results with Returns: List of `Item` objects for given item IDs and given item type
Below is the the instruction that describes the task: ### Input: Given a list of item ids, return all the Item objects Args: item_ids (obj): List of item IDs to query item_type (str): (optional) Item type to filter results with Returns: List of `Item` objects for given item IDs and given item type ### Response: def get_items_by_ids(self, item_ids, item_type=None): """Given a list of item ids, return all the Item objects Args: item_ids (obj): List of item IDs to query item_type (str): (optional) Item type to filter results with Returns: List of `Item` objects for given item IDs and given item type """ urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids] result = self._run_async(urls=urls) items = [Item(r) for r in result if r] if item_type: return [item for item in items if item.item_type == item_type] else: return items
def begin_scan(self, callback=None, interval=DEF_SCAN_INTERVAL, window=DEF_SCAN_WINDOW): """Begins a BLE scan and returns immediately. Using this method you can begin a BLE scan and leave the dongle in scanning mode in the background. It will remain in scanning mode until you call the :meth:`end_scan` method or the :meth:`reset` method. Args: callback (callbable): a callback that will be called for each new device discovered by the scanning process. Will be passed a single argument, a :class:`ScanResult` object. May be None if not needed. interval (int): BLE scan interval, in units of 625us window (int): BLE scan window, in units of 625us Returns: True on success, False otherwise. """ # TODO validate params and current state logger.debug('configuring scan parameters') self.api.ble_cmd_gap_set_scan_parameters(interval, window, 1) self._set_state(self._STATE_CONFIGURE_SCAN) self.api.ble_cmd_gap_discover(1) # any discoverable devices self._wait_for_state(self._STATE_CONFIGURE_SCAN) # TODO check state logger.debug('starting async scan for devices') self.scan_targets = None self.scan_callback = callback self._set_state(self._STATE_SCANNING) return True
Begins a BLE scan and returns immediately. Using this method you can begin a BLE scan and leave the dongle in scanning mode in the background. It will remain in scanning mode until you call the :meth:`end_scan` method or the :meth:`reset` method. Args: callback (callbable): a callback that will be called for each new device discovered by the scanning process. Will be passed a single argument, a :class:`ScanResult` object. May be None if not needed. interval (int): BLE scan interval, in units of 625us window (int): BLE scan window, in units of 625us Returns: True on success, False otherwise.
Below is the the instruction that describes the task: ### Input: Begins a BLE scan and returns immediately. Using this method you can begin a BLE scan and leave the dongle in scanning mode in the background. It will remain in scanning mode until you call the :meth:`end_scan` method or the :meth:`reset` method. Args: callback (callbable): a callback that will be called for each new device discovered by the scanning process. Will be passed a single argument, a :class:`ScanResult` object. May be None if not needed. interval (int): BLE scan interval, in units of 625us window (int): BLE scan window, in units of 625us Returns: True on success, False otherwise. ### Response: def begin_scan(self, callback=None, interval=DEF_SCAN_INTERVAL, window=DEF_SCAN_WINDOW): """Begins a BLE scan and returns immediately. Using this method you can begin a BLE scan and leave the dongle in scanning mode in the background. It will remain in scanning mode until you call the :meth:`end_scan` method or the :meth:`reset` method. Args: callback (callbable): a callback that will be called for each new device discovered by the scanning process. Will be passed a single argument, a :class:`ScanResult` object. May be None if not needed. interval (int): BLE scan interval, in units of 625us window (int): BLE scan window, in units of 625us Returns: True on success, False otherwise. """ # TODO validate params and current state logger.debug('configuring scan parameters') self.api.ble_cmd_gap_set_scan_parameters(interval, window, 1) self._set_state(self._STATE_CONFIGURE_SCAN) self.api.ble_cmd_gap_discover(1) # any discoverable devices self._wait_for_state(self._STATE_CONFIGURE_SCAN) # TODO check state logger.debug('starting async scan for devices') self.scan_targets = None self.scan_callback = callback self._set_state(self._STATE_SCANNING) return True
def djfrontend_jquery_smoothscroll(version=None): """ Returns the jQuery Smooth Scroll plugin file according to version number. TEMPLATE_DEBUG returns full file, otherwise returns minified file. """ if version is None: version = getattr(settings, 'DJFRONTEND_JQUERY_SMOOTHSCROLL', DJFRONTEND_JQUERY_SMOOTHSCROLL_DEFAULT) if getattr(settings, 'TEMPLATE_DEBUG', False): template = '<script src="{static}djfrontend/js/jquery/jquery.smooth-scroll/{v}/jquery.smooth-scroll.js"></script>' else: template = ( '<script src="//cdnjs.cloudflare.com/ajax/libs/jquery-smooth-scroll/{v}/jquery.smooth-scroll.min.js"></script>' '<script>window.jQuery.fn.smoothScroll || document.write(\'<script src="{static}djfrontend/js/jquery/jquery.smooth-scroll/{v}/jquery.smooth-scroll.min.js"><\/script>\')</script>') return format_html(template, static=_static_url, v=version)
Returns the jQuery Smooth Scroll plugin file according to version number. TEMPLATE_DEBUG returns full file, otherwise returns minified file.
Below is the the instruction that describes the task: ### Input: Returns the jQuery Smooth Scroll plugin file according to version number. TEMPLATE_DEBUG returns full file, otherwise returns minified file. ### Response: def djfrontend_jquery_smoothscroll(version=None): """ Returns the jQuery Smooth Scroll plugin file according to version number. TEMPLATE_DEBUG returns full file, otherwise returns minified file. """ if version is None: version = getattr(settings, 'DJFRONTEND_JQUERY_SMOOTHSCROLL', DJFRONTEND_JQUERY_SMOOTHSCROLL_DEFAULT) if getattr(settings, 'TEMPLATE_DEBUG', False): template = '<script src="{static}djfrontend/js/jquery/jquery.smooth-scroll/{v}/jquery.smooth-scroll.js"></script>' else: template = ( '<script src="//cdnjs.cloudflare.com/ajax/libs/jquery-smooth-scroll/{v}/jquery.smooth-scroll.min.js"></script>' '<script>window.jQuery.fn.smoothScroll || document.write(\'<script src="{static}djfrontend/js/jquery/jquery.smooth-scroll/{v}/jquery.smooth-scroll.min.js"><\/script>\')</script>') return format_html(template, static=_static_url, v=version)
def ConvertToWireFormat(self, value): """Encode the nested protobuf into wire format.""" data = value.SerializeToString() return (self.encoded_tag, VarintEncode(len(data)), data)
Encode the nested protobuf into wire format.
Below is the the instruction that describes the task: ### Input: Encode the nested protobuf into wire format. ### Response: def ConvertToWireFormat(self, value): """Encode the nested protobuf into wire format.""" data = value.SerializeToString() return (self.encoded_tag, VarintEncode(len(data)), data)
def rewind_body(body, body_pos): """ Attempt to rewind body to a certain position. Primarily used for request redirects and retries. :param body: File-like object that supports seek. :param int pos: Position to seek to in file. """ body_seek = getattr(body, 'seek', None) if body_seek is not None and isinstance(body_pos, integer_types): try: body_seek(body_pos) except (IOError, OSError): raise UnrewindableBodyError("An error occurred when rewinding request " "body for redirect/retry.") elif body_pos is _FAILEDTELL: raise UnrewindableBodyError("Unable to record file position for rewinding " "request body during a redirect/retry.") else: raise ValueError("body_pos must be of type integer, " "instead it was %s." % type(body_pos))
Attempt to rewind body to a certain position. Primarily used for request redirects and retries. :param body: File-like object that supports seek. :param int pos: Position to seek to in file.
Below is the the instruction that describes the task: ### Input: Attempt to rewind body to a certain position. Primarily used for request redirects and retries. :param body: File-like object that supports seek. :param int pos: Position to seek to in file. ### Response: def rewind_body(body, body_pos): """ Attempt to rewind body to a certain position. Primarily used for request redirects and retries. :param body: File-like object that supports seek. :param int pos: Position to seek to in file. """ body_seek = getattr(body, 'seek', None) if body_seek is not None and isinstance(body_pos, integer_types): try: body_seek(body_pos) except (IOError, OSError): raise UnrewindableBodyError("An error occurred when rewinding request " "body for redirect/retry.") elif body_pos is _FAILEDTELL: raise UnrewindableBodyError("Unable to record file position for rewinding " "request body during a redirect/retry.") else: raise ValueError("body_pos must be of type integer, " "instead it was %s." % type(body_pos))
def help_center_incremental_articles_list(self, start_time=None, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/articles#list-articles" api_path = "/api/v2/help_center/incremental/articles.json" api_query = {} if "query" in kwargs.keys(): api_query.update(kwargs["query"]) del kwargs["query"] if start_time: api_query.update({ "start_time": start_time, }) return self.call(api_path, query=api_query, **kwargs)
https://developer.zendesk.com/rest_api/docs/help_center/articles#list-articles
Below is the the instruction that describes the task: ### Input: https://developer.zendesk.com/rest_api/docs/help_center/articles#list-articles ### Response: def help_center_incremental_articles_list(self, start_time=None, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/articles#list-articles" api_path = "/api/v2/help_center/incremental/articles.json" api_query = {} if "query" in kwargs.keys(): api_query.update(kwargs["query"]) del kwargs["query"] if start_time: api_query.update({ "start_time": start_time, }) return self.call(api_path, query=api_query, **kwargs)
def format(self, record) -> str: """ :type record: aiologger.loggers.json.LogRecord """ msg = dict(self.formatter_fields_for_record(record)) if record.flatten and isinstance(record.msg, dict): msg.update(record.msg) else: msg[MSG_FIELDNAME] = record.msg if record.extra: msg.update(record.extra) if record.exc_info: msg["exc_info"] = record.exc_info if record.exc_text: msg["exc_text"] = record.exc_text return self.serializer( msg, default=self._default_handler, **record.serializer_kwargs )
:type record: aiologger.loggers.json.LogRecord
Below is the the instruction that describes the task: ### Input: :type record: aiologger.loggers.json.LogRecord ### Response: def format(self, record) -> str: """ :type record: aiologger.loggers.json.LogRecord """ msg = dict(self.formatter_fields_for_record(record)) if record.flatten and isinstance(record.msg, dict): msg.update(record.msg) else: msg[MSG_FIELDNAME] = record.msg if record.extra: msg.update(record.extra) if record.exc_info: msg["exc_info"] = record.exc_info if record.exc_text: msg["exc_text"] = record.exc_text return self.serializer( msg, default=self._default_handler, **record.serializer_kwargs )
def get_request_params(self) -> List[ExtensionParameter]: """ Build request parameters. """ return _build_parameters( self.server_no_context_takeover, self.client_no_context_takeover, self.server_max_window_bits, self.client_max_window_bits, )
Build request parameters.
Below is the the instruction that describes the task: ### Input: Build request parameters. ### Response: def get_request_params(self) -> List[ExtensionParameter]: """ Build request parameters. """ return _build_parameters( self.server_no_context_takeover, self.client_no_context_takeover, self.server_max_window_bits, self.client_max_window_bits, )
def situation_parameters(self): """ Situation parameters defining detection logic for the context. This will return a list of SituationParameter indicating how the detection is made, i.e. regular expression, integer value, etc. :rtype: list(SituationParameter) """ for param in self.data.get('situation_parameters', []): cache = ElementCache(data=self.make_request(href=param)) yield type('SituationParameter', (SituationParameter,), { 'data': cache})(name=cache.name, type=cache.type, href=param)
Situation parameters defining detection logic for the context. This will return a list of SituationParameter indicating how the detection is made, i.e. regular expression, integer value, etc. :rtype: list(SituationParameter)
Below is the the instruction that describes the task: ### Input: Situation parameters defining detection logic for the context. This will return a list of SituationParameter indicating how the detection is made, i.e. regular expression, integer value, etc. :rtype: list(SituationParameter) ### Response: def situation_parameters(self): """ Situation parameters defining detection logic for the context. This will return a list of SituationParameter indicating how the detection is made, i.e. regular expression, integer value, etc. :rtype: list(SituationParameter) """ for param in self.data.get('situation_parameters', []): cache = ElementCache(data=self.make_request(href=param)) yield type('SituationParameter', (SituationParameter,), { 'data': cache})(name=cache.name, type=cache.type, href=param)
def within_footprint(img, wcs, x, y): """Determine whether input x, y fall in the science area of the image. Parameters ---------- img : ndarray ndarray of image where non-science areas are marked with value of NaN. wcs : `stwcs.wcsutil.HSTWCS` HSTWCS or WCS object with naxis terms defined. x, y : ndarray arrays of x, y positions for sources to be checked. Returns ------- x, y : ndarray New arrays which have been trimmed of all sources that fall outside the science areas of the image """ # start with limits of WCS shape if hasattr(wcs, 'naxis1'): naxis1 = wcs.naxis1 naxis2 = wcs.naxis2 elif hasattr(wcs, 'pixel_shape'): naxis1, naxis2 = wcs.pixel_shape else: naxis1 = wcs._naxis1 naxis2 = wcs._naxis2 maskx = np.bitwise_or(x < 0, x > naxis1) masky = np.bitwise_or(y < 0, y > naxis2) mask = ~np.bitwise_or(maskx, masky) x = x[mask] y = y[mask] # Now, confirm that these points fall within actual science area of WCS img_mask = create_image_footprint(img, wcs, border=1.0) inmask = np.where(img_mask[y.astype(np.int32), x.astype(np.int32)])[0] x = x[inmask] y = y[inmask] return x, y
Determine whether input x, y fall in the science area of the image. Parameters ---------- img : ndarray ndarray of image where non-science areas are marked with value of NaN. wcs : `stwcs.wcsutil.HSTWCS` HSTWCS or WCS object with naxis terms defined. x, y : ndarray arrays of x, y positions for sources to be checked. Returns ------- x, y : ndarray New arrays which have been trimmed of all sources that fall outside the science areas of the image
Below is the the instruction that describes the task: ### Input: Determine whether input x, y fall in the science area of the image. Parameters ---------- img : ndarray ndarray of image where non-science areas are marked with value of NaN. wcs : `stwcs.wcsutil.HSTWCS` HSTWCS or WCS object with naxis terms defined. x, y : ndarray arrays of x, y positions for sources to be checked. Returns ------- x, y : ndarray New arrays which have been trimmed of all sources that fall outside the science areas of the image ### Response: def within_footprint(img, wcs, x, y): """Determine whether input x, y fall in the science area of the image. Parameters ---------- img : ndarray ndarray of image where non-science areas are marked with value of NaN. wcs : `stwcs.wcsutil.HSTWCS` HSTWCS or WCS object with naxis terms defined. x, y : ndarray arrays of x, y positions for sources to be checked. Returns ------- x, y : ndarray New arrays which have been trimmed of all sources that fall outside the science areas of the image """ # start with limits of WCS shape if hasattr(wcs, 'naxis1'): naxis1 = wcs.naxis1 naxis2 = wcs.naxis2 elif hasattr(wcs, 'pixel_shape'): naxis1, naxis2 = wcs.pixel_shape else: naxis1 = wcs._naxis1 naxis2 = wcs._naxis2 maskx = np.bitwise_or(x < 0, x > naxis1) masky = np.bitwise_or(y < 0, y > naxis2) mask = ~np.bitwise_or(maskx, masky) x = x[mask] y = y[mask] # Now, confirm that these points fall within actual science area of WCS img_mask = create_image_footprint(img, wcs, border=1.0) inmask = np.where(img_mask[y.astype(np.int32), x.astype(np.int32)])[0] x = x[inmask] y = y[inmask] return x, y